You've already forked FFmpeg
							
							
				mirror of
				https://github.com/FFmpeg/FFmpeg.git
				synced 2025-10-30 23:18:11 +02:00 
			
		
		
		
	ljpegenc: deMpegEncContextize
The encoder uses almost none of the mpegvideo infrastructure, only some fields from MpegEncContext. The FATE results change because now an all-zero quant matrix is written into the file. Since it is not used for anything for ljpeg, this should not be a problem.
This commit is contained in:
		| @@ -30,55 +30,68 @@ | ||||
|  * lossless JPEG encoder. | ||||
|  */ | ||||
|  | ||||
| #include "libavutil/frame.h" | ||||
| #include "libavutil/mem.h" | ||||
| #include "libavutil/pixdesc.h" | ||||
|  | ||||
| #include "avcodec.h" | ||||
| #include "dsputil.h" | ||||
| #include "internal.h" | ||||
| #include "mpegvideo.h" | ||||
| #include "mjpeg.h" | ||||
| #include "mjpegenc.h" | ||||
|  | ||||
| typedef struct LJpegEncContext { | ||||
|     DSPContext dsp; | ||||
|     ScanTable scantable; | ||||
|     uint16_t matrix[64]; | ||||
|  | ||||
|     int vsample[3]; | ||||
|     int hsample[3]; | ||||
|  | ||||
|     uint16_t huff_code_dc_luminance[12]; | ||||
|     uint16_t huff_code_dc_chrominance[12]; | ||||
|     uint8_t  huff_size_dc_luminance[12]; | ||||
|     uint8_t  huff_size_dc_chrominance[12]; | ||||
|  | ||||
|     uint16_t (*scratch)[4]; | ||||
| } LJpegEncContext; | ||||
|  | ||||
| static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                                    const AVFrame *pict, int *got_packet) | ||||
| { | ||||
|     MpegEncContext * const s = avctx->priv_data; | ||||
|     MJpegContext * const m = s->mjpeg_ctx; | ||||
|     const int width= s->width; | ||||
|     const int height= s->height; | ||||
|     AVFrame * const p = &s->current_picture.f; | ||||
|     LJpegEncContext *s = avctx->priv_data; | ||||
|     PutBitContext pb; | ||||
|     const int width  = avctx->width; | ||||
|     const int height = avctx->height; | ||||
|     const int predictor= avctx->prediction_method+1; | ||||
|     const int mb_width  = (width  + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; | ||||
|     const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; | ||||
|     int ret, max_pkt_size = FF_MIN_BUFFER_SIZE; | ||||
|     const int mb_width  = (width  + s->hsample[0] - 1) / s->hsample[0]; | ||||
|     const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0]; | ||||
|     int max_pkt_size = FF_MIN_BUFFER_SIZE; | ||||
|     int ret, header_bits; | ||||
|  | ||||
|     if (avctx->pix_fmt == AV_PIX_FMT_BGRA) | ||||
|         max_pkt_size += width * height * 3 * 4; | ||||
|     else { | ||||
|         max_pkt_size += mb_width * mb_height * 3 * 4 | ||||
|                         * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]; | ||||
|                         * s->hsample[0] * s->vsample[0]; | ||||
|     } | ||||
|     if ((ret = ff_alloc_packet(pkt, max_pkt_size)) < 0) { | ||||
|         av_log(avctx, AV_LOG_ERROR, "Error getting output packet of size %d.\n", max_pkt_size); | ||||
|         return ret; | ||||
|     } | ||||
|  | ||||
|     init_put_bits(&s->pb, pkt->data, pkt->size); | ||||
|     init_put_bits(&pb, pkt->data, pkt->size); | ||||
|  | ||||
|     av_frame_unref(p); | ||||
|     ret = av_frame_ref(p, pict); | ||||
|     if (ret < 0) | ||||
|         return ret; | ||||
|     p->pict_type= AV_PICTURE_TYPE_I; | ||||
|     p->key_frame= 1; | ||||
|     ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable, | ||||
|                                    s->matrix); | ||||
|  | ||||
|     ff_mjpeg_encode_picture_header(avctx, &s->pb, &s->intra_scantable, | ||||
|                                    s->intra_matrix); | ||||
|  | ||||
|     s->header_bits= put_bits_count(&s->pb); | ||||
|     header_bits = put_bits_count(&pb); | ||||
|  | ||||
|     if(avctx->pix_fmt == AV_PIX_FMT_BGRA){ | ||||
|         int x, y, i; | ||||
|         const int linesize= p->linesize[0]; | ||||
|         uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; | ||||
|         const int linesize = pict->linesize[0]; | ||||
|         uint16_t (*buffer)[4] = s->scratch; | ||||
|         int left[3], top[3], topleft[3]; | ||||
|  | ||||
|         for(i=0; i<3; i++){ | ||||
| @@ -87,10 +100,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|  | ||||
|         for(y = 0; y < height; y++) { | ||||
|             const int modified_predictor= y ? predictor : 1; | ||||
|             uint8_t *ptr = p->data[0] + (linesize * y); | ||||
|             uint8_t *ptr = pict->data[0] + (linesize * y); | ||||
|  | ||||
|             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){ | ||||
|                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | ||||
|             if(pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) < width * 3 * 4) { | ||||
|                 av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); | ||||
|                 return -1; | ||||
|             } | ||||
|  | ||||
| @@ -115,9 +128,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                     diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100; | ||||
|  | ||||
|                     if(i==0) | ||||
|                         ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly | ||||
|                         ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly | ||||
|                     else | ||||
|                         ff_mjpeg_encode_dc(&s->pb, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); | ||||
|                         ff_mjpeg_encode_dc(&pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); | ||||
|                 } | ||||
|             } | ||||
|         } | ||||
| @@ -125,8 +138,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|         int mb_x, mb_y, i; | ||||
|  | ||||
|         for(mb_y = 0; mb_y < mb_height; mb_y++) { | ||||
|             if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ | ||||
|                 av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); | ||||
|             if (pb.buf_end - pb.buf - (put_bits_count(&pb) >> 3) < | ||||
|                 mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) { | ||||
|                 av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); | ||||
|                 return -1; | ||||
|             } | ||||
|             for(mb_x = 0; mb_x < mb_width; mb_x++) { | ||||
| @@ -134,15 +148,15 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                     for(i=0;i<3;i++) { | ||||
|                         uint8_t *ptr; | ||||
|                         int x, y, h, v, linesize; | ||||
|                         h = s->mjpeg_hsample[i]; | ||||
|                         v = s->mjpeg_vsample[i]; | ||||
|                         linesize= p->linesize[i]; | ||||
|                         h = s->hsample[i]; | ||||
|                         v = s->vsample[i]; | ||||
|                         linesize = pict->linesize[i]; | ||||
|  | ||||
|                         for(y=0; y<v; y++){ | ||||
|                             for(x=0; x<h; x++){ | ||||
|                                 int pred; | ||||
|  | ||||
|                                 ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap | ||||
|                                 ptr = pict->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap | ||||
|                                 if(y==0 && mb_y==0){ | ||||
|                                     if(x==0 && mb_x==0){ | ||||
|                                         pred= 128; | ||||
| @@ -158,9 +172,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                                 } | ||||
|  | ||||
|                                 if(i==0) | ||||
|                                     ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly | ||||
|                                     ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly | ||||
|                                 else | ||||
|                                     ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); | ||||
|                                     ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); | ||||
|                             } | ||||
|                         } | ||||
|                     } | ||||
| @@ -168,8 +182,8 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                     for(i=0;i<3;i++) { | ||||
|                         uint8_t *ptr; | ||||
|                         int x, y, h, v, linesize; | ||||
|                         h = s->mjpeg_hsample[i]; | ||||
|                         v = s->mjpeg_vsample[i]; | ||||
|                         h = s->hsample[i]; | ||||
|                         v = s->vsample[i]; | ||||
|                         linesize = pict->linesize[i]; | ||||
|  | ||||
|                         for(y=0; y<v; y++){ | ||||
| @@ -180,9 +194,9 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|                                 PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); | ||||
|  | ||||
|                                 if(i==0) | ||||
|                                     ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly | ||||
|                                     ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly | ||||
|                                 else | ||||
|                                     ff_mjpeg_encode_dc(&s->pb, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); | ||||
|                                     ff_mjpeg_encode_dc(&pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); | ||||
|                             } | ||||
|                         } | ||||
|                     } | ||||
| @@ -193,11 +207,10 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
|  | ||||
|     emms_c(); | ||||
|  | ||||
|     ff_mjpeg_encode_picture_trailer(&s->pb, s->header_bits); | ||||
|     s->picture_number++; | ||||
|     ff_mjpeg_encode_picture_trailer(&pb, header_bits); | ||||
|  | ||||
|     flush_put_bits(&s->pb); | ||||
|     pkt->size   = put_bits_ptr(&s->pb) - s->pb.buf; | ||||
|     flush_put_bits(&pb); | ||||
|     pkt->size   = put_bits_ptr(&pb) - pb.buf; | ||||
|     pkt->flags |= AV_PKT_FLAG_KEY; | ||||
|     *got_packet = 1; | ||||
|  | ||||
| @@ -205,14 +218,86 @@ static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, | ||||
| //    return (put_bits_count(&f->pb)+7)/8; | ||||
| } | ||||
|  | ||||
| static av_cold int ljpeg_encode_close(AVCodecContext *avctx) | ||||
| { | ||||
|     LJpegEncContext *s = avctx->priv_data; | ||||
|  | ||||
| AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them | ||||
|     av_frame_free(&avctx->coded_frame); | ||||
|     av_freep(&s->scratch); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| static av_cold int ljpeg_encode_init(AVCodecContext *avctx) | ||||
| { | ||||
|     LJpegEncContext *s = avctx->priv_data; | ||||
|     int chroma_v_shift, chroma_h_shift; | ||||
|  | ||||
|     if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P || | ||||
|          avctx->pix_fmt == AV_PIX_FMT_YUV422P || | ||||
|          avctx->pix_fmt == AV_PIX_FMT_YUV444P) && | ||||
|         avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { | ||||
|         av_log(avctx, AV_LOG_ERROR, | ||||
|                "Limited range YUV is non-standard, set strict_std_compliance to " | ||||
|                "at least unofficial to use it.\n"); | ||||
|         return AVERROR(EINVAL); | ||||
|     } | ||||
|  | ||||
|     avctx->coded_frame = av_frame_alloc(); | ||||
|     if (!avctx->coded_frame) | ||||
|         return AVERROR(ENOMEM); | ||||
|  | ||||
|     avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; | ||||
|     avctx->coded_frame->key_frame = 1; | ||||
|  | ||||
|     s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch)); | ||||
|  | ||||
|     ff_dsputil_init(&s->dsp, avctx); | ||||
|     ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); | ||||
|  | ||||
|     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, | ||||
|                                      &chroma_v_shift); | ||||
|  | ||||
|     if (avctx->pix_fmt   == AV_PIX_FMT_BGRA) { | ||||
|         s->vsample[0] = s->hsample[0] = | ||||
|         s->vsample[1] = s->hsample[1] = | ||||
|         s->vsample[2] = s->hsample[2] = 1; | ||||
|     } else { | ||||
|         s->vsample[0] = 2; | ||||
|         s->vsample[1] = 2 >> chroma_v_shift; | ||||
|         s->vsample[2] = 2 >> chroma_v_shift; | ||||
|         s->hsample[0] = 2; | ||||
|         s->hsample[1] = 2 >> chroma_h_shift; | ||||
|         s->hsample[2] = 2 >> chroma_h_shift; | ||||
|     } | ||||
|  | ||||
|     ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance, | ||||
|                                  s->huff_code_dc_luminance, | ||||
|                                  avpriv_mjpeg_bits_dc_luminance, | ||||
|                                  avpriv_mjpeg_val_dc); | ||||
|     ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance, | ||||
|                                  s->huff_code_dc_chrominance, | ||||
|                                  avpriv_mjpeg_bits_dc_chrominance, | ||||
|                                  avpriv_mjpeg_val_dc); | ||||
|  | ||||
|     return 0; | ||||
| } | ||||
|  | ||||
| AVCodec ff_ljpeg_encoder = { | ||||
|     .name           = "ljpeg", | ||||
|     .long_name      = NULL_IF_CONFIG_SMALL("Lossless JPEG"), | ||||
|     .type           = AVMEDIA_TYPE_VIDEO, | ||||
|     .id             = AV_CODEC_ID_LJPEG, | ||||
|     .priv_data_size = sizeof(MpegEncContext), | ||||
|     .init           = ff_MPV_encode_init, | ||||
|     .priv_data_size = sizeof(LJpegEncContext), | ||||
|     .init           = ljpeg_encode_init, | ||||
|     .encode2        = encode_picture_lossless, | ||||
|     .close          = ff_MPV_encode_end, | ||||
|     .close          = ljpeg_encode_close, | ||||
|     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ420P, | ||||
|                                                     AV_PIX_FMT_YUVJ422P, | ||||
|                                                     AV_PIX_FMT_YUVJ444P, | ||||
|                                                     AV_PIX_FMT_BGRA, | ||||
|                                                     AV_PIX_FMT_YUV420P, | ||||
|                                                     AV_PIX_FMT_YUV422P, | ||||
|                                                     AV_PIX_FMT_YUVJ444P, | ||||
|                                                     AV_PIX_FMT_NONE }, | ||||
| }; | ||||
|   | ||||
| @@ -609,8 +609,6 @@ typedef struct MpegEncContext { | ||||
|  | ||||
|     /* MJPEG specific */ | ||||
|     struct MJpegContext *mjpeg_ctx; | ||||
|     int mjpeg_vsample[3];       ///< vertical sampling factors, default = {2, 1, 1} | ||||
|     int mjpeg_hsample[3];       ///< horizontal sampling factors, default = {2, 1, 1} | ||||
|  | ||||
|     /* MSMPEG4 specific */ | ||||
|     int mv_table_index; | ||||
|   | ||||
| @@ -227,7 +227,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) | ||||
| { | ||||
|     MpegEncContext *s = avctx->priv_data; | ||||
|     int i, ret; | ||||
|     int chroma_h_shift, chroma_v_shift; | ||||
|  | ||||
|     MPV_encode_defaults(s); | ||||
|  | ||||
| @@ -240,19 +239,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) | ||||
|             return -1; | ||||
|         } | ||||
|         break; | ||||
|     case AV_CODEC_ID_LJPEG: | ||||
|         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && | ||||
|             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P && | ||||
|             avctx->pix_fmt != AV_PIX_FMT_YUVJ444P && | ||||
|             avctx->pix_fmt != AV_PIX_FMT_BGRA     && | ||||
|             ((avctx->pix_fmt != AV_PIX_FMT_YUV420P && | ||||
|               avctx->pix_fmt != AV_PIX_FMT_YUV422P && | ||||
|               avctx->pix_fmt != AV_PIX_FMT_YUV444P) || | ||||
|              avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) { | ||||
|             av_log(avctx, AV_LOG_ERROR, "colorspace not supported in LJPEG\n"); | ||||
|             return -1; | ||||
|         } | ||||
|         break; | ||||
|     case AV_CODEC_ID_MJPEG: | ||||
|         if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && | ||||
|             avctx->pix_fmt != AV_PIX_FMT_YUVJ422P && | ||||
| @@ -538,9 +524,6 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) | ||||
|     if (avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) | ||||
|         s->inter_quant_bias = avctx->inter_quant_bias; | ||||
|  | ||||
|     av_pix_fmt_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, | ||||
|                                      &chroma_v_shift); | ||||
|  | ||||
|     if (avctx->codec_id == AV_CODEC_ID_MPEG4 && | ||||
|         s->avctx->time_base.den > (1 << 16) - 1) { | ||||
|         av_log(avctx, AV_LOG_ERROR, | ||||
| @@ -564,24 +547,10 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx) | ||||
|         avctx->delay  = s->low_delay ? 0 : (s->max_b_frames + 1); | ||||
|         s->rtp_mode   = 1; | ||||
|         break; | ||||
|     case AV_CODEC_ID_LJPEG: | ||||
|     case AV_CODEC_ID_MJPEG: | ||||
|         s->out_format = FMT_MJPEG; | ||||
|         s->intra_only = 1; /* force intra only for jpeg */ | ||||
|         if (avctx->codec->id == AV_CODEC_ID_LJPEG && | ||||
|             avctx->pix_fmt   == AV_PIX_FMT_BGRA) { | ||||
|             s->mjpeg_vsample[0] = s->mjpeg_hsample[0] = | ||||
|             s->mjpeg_vsample[1] = s->mjpeg_hsample[1] = | ||||
|             s->mjpeg_vsample[2] = s->mjpeg_hsample[2] = 1; | ||||
|         } else { | ||||
|             s->mjpeg_vsample[0] = 2; | ||||
|             s->mjpeg_vsample[1] = 2 >> chroma_v_shift; | ||||
|             s->mjpeg_vsample[2] = 2 >> chroma_v_shift; | ||||
|             s->mjpeg_hsample[0] = 2; | ||||
|             s->mjpeg_hsample[1] = 2 >> chroma_h_shift; | ||||
|             s->mjpeg_hsample[2] = 2 >> chroma_h_shift; | ||||
|         } | ||||
|         if (!(CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) || | ||||
|         if (!CONFIG_MJPEG_ENCODER || | ||||
|             ff_mjpeg_encode_init(s) < 0) | ||||
|             return -1; | ||||
|         avctx->delay = 0; | ||||
| @@ -838,7 +807,7 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx) | ||||
|     ff_rate_control_uninit(s); | ||||
|  | ||||
|     ff_MPV_common_end(s); | ||||
|     if ((CONFIG_MJPEG_ENCODER || CONFIG_LJPEG_ENCODER) && | ||||
|     if (CONFIG_MJPEG_ENCODER && | ||||
|         s->out_format == FMT_MJPEG) | ||||
|         ff_mjpeg_encode_close(s); | ||||
|  | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| 2b1a2d5ad7f357df955d8548320d13f7 *tests/data/fate/vsynth1-ljpeg.avi | ||||
| aed2be6710c0dddacfa410dff7ce7e79 *tests/data/fate/vsynth1-ljpeg.avi | ||||
| 6312924 tests/data/fate/vsynth1-ljpeg.avi | ||||
| c5ccac874dbf808e9088bc3107860042 *tests/data/fate/vsynth1-ljpeg.out.rawvideo | ||||
| stddev:    0.00 PSNR:999.99 MAXDIFF:    0 bytes:  7603200/  7603200 | ||||
|   | ||||
| @@ -1,4 +1,4 @@ | ||||
| db9fd56e154b1056c9443a82a96db6f5 *tests/data/fate/vsynth2-ljpeg.avi | ||||
| 5d603cecd59db0f255a53bda837a6bae *tests/data/fate/vsynth2-ljpeg.avi | ||||
| 4766902 tests/data/fate/vsynth2-ljpeg.avi | ||||
| dde5895817ad9d219f79a52d0bdfb001 *tests/data/fate/vsynth2-ljpeg.out.rawvideo | ||||
| stddev:    0.00 PSNR:999.99 MAXDIFF:    0 bytes:  7603200/  7603200 | ||||
|   | ||||
		Reference in New Issue
	
	Block a user