1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-03-23 04:24:35 +02:00

mjpegenc: yuvj444p support

Signed-off-by: Paul B Mahol <onemda@gmail.com>
This commit is contained in:
Paul B Mahol 2012-11-07 21:44:27 +00:00
parent 158763312f
commit 9f02d4ed0f
3 changed files with 55 additions and 30 deletions

View File

@ -463,10 +463,18 @@ void ff_mjpeg_encode_mb(MpegEncContext *s, DCTELEM block[6][64])
} }
if (s->chroma_format == CHROMA_420) { if (s->chroma_format == CHROMA_420) {
encode_block(s, block[5], 5); encode_block(s, block[5], 5);
} else { } else if (s->chroma_format == CHROMA_422) {
encode_block(s, block[6], 6); encode_block(s, block[6], 6);
encode_block(s, block[5], 5); encode_block(s, block[5], 5);
encode_block(s, block[7], 7); encode_block(s, block[7], 7);
} else {
encode_block(s, block[6], 6);
encode_block(s, block[8], 8);
encode_block(s, block[10], 10);
encode_block(s, block[5], 5);
encode_block(s, block[7], 7);
encode_block(s, block[9], 9);
encode_block(s, block[11], 11);
} }
s->i_tex_bits += get_bits_diff(s); s->i_tex_bits += get_bits_diff(s);
@ -505,7 +513,7 @@ AVCodec ff_mjpeg_encoder = {
.close = ff_MPV_encode_end, .close = ff_MPV_encode_end,
.capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY, .capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]){ .pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_NONE AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_NONE
}, },
.long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"), .long_name = NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
}; };

View File

@ -451,7 +451,7 @@ typedef struct MpegEncContext {
uint8_t *luma_dc_vlc_length; uint8_t *luma_dc_vlc_length;
#define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level)) #define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level))
int coded_score[8]; int coded_score[12];
/** precomputed matrix (combine qscale and DCT renorm) */ /** precomputed matrix (combine qscale and DCT renorm) */
int (*q_intra_matrix)[64]; int (*q_intra_matrix)[64];
@ -676,7 +676,7 @@ typedef struct MpegEncContext {
DCTELEM (*pblocks[12])[64]; DCTELEM (*pblocks[12])[64];
DCTELEM (*block)[64]; ///< points to one of the following blocks DCTELEM (*block)[64]; ///< points to one of the following blocks
DCTELEM (*blocks)[8][64]; // for HQ mode we need to keep the best block DCTELEM (*blocks)[12][64]; // for HQ mode we need to keep the best block
int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch() int (*decode_mb)(struct MpegEncContext *s, DCTELEM block[6][64]); // used by some codecs to avoid a switch()
#define SLICE_OK 0 #define SLICE_OK 0
#define SLICE_ERROR -1 #define SLICE_ERROR -1

View File

@ -321,8 +321,10 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
case AV_CODEC_ID_AMV: case AV_CODEC_ID_AMV:
if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P && if (avctx->pix_fmt != AV_PIX_FMT_YUVJ420P &&
avctx->pix_fmt != AV_PIX_FMT_YUVJ422P && avctx->pix_fmt != AV_PIX_FMT_YUVJ422P &&
avctx->pix_fmt != AV_PIX_FMT_YUVJ444P &&
((avctx->pix_fmt != AV_PIX_FMT_YUV420P && ((avctx->pix_fmt != AV_PIX_FMT_YUV420P &&
avctx->pix_fmt != AV_PIX_FMT_YUV422P) || avctx->pix_fmt != AV_PIX_FMT_YUV422P &&
avctx->pix_fmt != AV_PIX_FMT_YUV444P) ||
avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) { avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL)) {
av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n"); av_log(avctx, AV_LOG_ERROR, "colorspace not supported in jpeg\n");
return -1; return -1;
@ -336,6 +338,10 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
} }
switch (avctx->pix_fmt) { switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUVJ444P:
case AV_PIX_FMT_YUV444P:
s->chroma_format = CHROMA_444;
break;
case AV_PIX_FMT_YUVJ422P: case AV_PIX_FMT_YUVJ422P:
case AV_PIX_FMT_YUV422P: case AV_PIX_FMT_YUV422P:
s->chroma_format = CHROMA_422; s->chroma_format = CHROMA_422;
@ -1808,15 +1814,17 @@ static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
static av_always_inline void encode_mb_internal(MpegEncContext *s, static av_always_inline void encode_mb_internal(MpegEncContext *s,
int motion_x, int motion_y, int motion_x, int motion_y,
int mb_block_height, int mb_block_height,
int mb_block_width,
int mb_block_count) int mb_block_count)
{ {
int16_t weight[8][64]; int16_t weight[12][64];
DCTELEM orig[8][64]; DCTELEM orig[12][64];
const int mb_x = s->mb_x; const int mb_x = s->mb_x;
const int mb_y = s->mb_y; const int mb_y = s->mb_y;
int i; int i;
int skip_dct[8]; int skip_dct[12];
int dct_offset = s->linesize * 8; // default for progressive frames int dct_offset = s->linesize * 8; // default for progressive frames
int uv_dct_offset = s->uvlinesize * 8;
uint8_t *ptr_y, *ptr_cb, *ptr_cr; uint8_t *ptr_y, *ptr_cb, *ptr_cr;
int wrap_y, wrap_c; int wrap_y, wrap_c;
@ -1858,20 +1866,20 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
ptr_y = s->new_picture.f.data[0] + ptr_y = s->new_picture.f.data[0] +
(mb_y * 16 * wrap_y) + mb_x * 16; (mb_y * 16 * wrap_y) + mb_x * 16;
ptr_cb = s->new_picture.f.data[1] + ptr_cb = s->new_picture.f.data[1] +
(mb_y * mb_block_height * wrap_c) + mb_x * 8; (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
ptr_cr = s->new_picture.f.data[2] + ptr_cr = s->new_picture.f.data[2] +
(mb_y * mb_block_height * wrap_c) + mb_x * 8; (mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){ if((mb_x*16+16 > s->width || mb_y*16+16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
uint8_t *ebuf = s->edge_emu_buffer + 32; uint8_t *ebuf = s->edge_emu_buffer + 32;
s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16, s->dsp.emulated_edge_mc(ebuf, ptr_y, wrap_y, 16, 16, mb_x * 16,
mb_y * 16, s->width, s->height); mb_y * 16, s->width, s->height);
ptr_y = ebuf; ptr_y = ebuf;
s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, 8, s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y, ptr_cb, wrap_c, mb_block_width,
mb_block_height, mb_x * 8, mb_y * 8, mb_block_height, mb_x * 8, mb_y * 8,
(s->width+1) >> 1, (s->height+1) >> 1); (s->width+1) >> 1, (s->height+1) >> 1);
ptr_cb = ebuf + 18 * wrap_y; ptr_cb = ebuf + 18 * wrap_y;
s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, 8, s->dsp.emulated_edge_mc(ebuf + 18 * wrap_y + 8, ptr_cr, wrap_c, mb_block_width,
mb_block_height, mb_x * 8, mb_y * 8, mb_block_height, mb_x * 8, mb_y * 8,
(s->width+1) >> 1, (s->height+1) >> 1); (s->width+1) >> 1, (s->height+1) >> 1);
ptr_cr = ebuf + 18 * wrap_y + 8; ptr_cr = ebuf + 18 * wrap_y + 8;
@ -1896,8 +1904,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->interlaced_dct = 1; s->interlaced_dct = 1;
dct_offset = wrap_y; dct_offset = wrap_y;
uv_dct_offset = wrap_c;
wrap_y <<= 1; wrap_y <<= 1;
if (s->chroma_format == CHROMA_422) if (s->chroma_format == CHROMA_422 ||
s->chroma_format == CHROMA_444)
wrap_c <<= 1; wrap_c <<= 1;
} }
} }
@ -1914,11 +1924,16 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
} else { } else {
s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c);
s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c);
if (!s->chroma_y_shift) { /* 422 */ if (!s->chroma_y_shift && s->chroma_x_shift) { /* 422 */
s->dsp.get_pixels(s->block[6], s->dsp.get_pixels(s->block[6], ptr_cb + uv_dct_offset, wrap_c);
ptr_cb + (dct_offset >> 1), wrap_c); s->dsp.get_pixels(s->block[7], ptr_cr + uv_dct_offset, wrap_c);
s->dsp.get_pixels(s->block[7], } else if (!s->chroma_y_shift && !s->chroma_x_shift) { /* 444 */
ptr_cr + (dct_offset >> 1), wrap_c); s->dsp.get_pixels(s->block[6], ptr_cb + 8, wrap_c);
s->dsp.get_pixels(s->block[7], ptr_cr + 8, wrap_c);
s->dsp.get_pixels(s->block[8], ptr_cb + uv_dct_offset, wrap_c);
s->dsp.get_pixels(s->block[9], ptr_cr + uv_dct_offset, wrap_c);
s->dsp.get_pixels(s->block[10], ptr_cb + uv_dct_offset + 8, wrap_c);
s->dsp.get_pixels(s->block[11], ptr_cr + uv_dct_offset + 8, wrap_c);
} }
} }
} else { } else {
@ -1977,6 +1992,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->interlaced_dct = 1; s->interlaced_dct = 1;
dct_offset = wrap_y; dct_offset = wrap_y;
uv_dct_offset = wrap_c;
wrap_y <<= 1; wrap_y <<= 1;
if (s->chroma_format == CHROMA_422) if (s->chroma_format == CHROMA_422)
wrap_c <<= 1; wrap_c <<= 1;
@ -1998,10 +2014,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c);
s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c);
if (!s->chroma_y_shift) { /* 422 */ if (!s->chroma_y_shift) { /* 422 */
s->dsp.diff_pixels(s->block[6], ptr_cb + (dct_offset >> 1), s->dsp.diff_pixels(s->block[6], ptr_cb + uv_dct_offset,
dest_cb + (dct_offset >> 1), wrap_c); dest_cb + uv_dct_offset, wrap_c);
s->dsp.diff_pixels(s->block[7], ptr_cr + (dct_offset >> 1), s->dsp.diff_pixels(s->block[7], ptr_cr + uv_dct_offset,
dest_cr + (dct_offset >> 1), wrap_c); dest_cr + uv_dct_offset, wrap_c);
} }
} }
/* pre quantization */ /* pre quantization */
@ -2028,12 +2044,12 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
wrap_c, 8) < 20 * s->qscale) wrap_c, 8) < 20 * s->qscale)
skip_dct[5] = 1; skip_dct[5] = 1;
if (!s->chroma_y_shift) { /* 422 */ if (!s->chroma_y_shift) { /* 422 */
if (s->dsp.sad[1](NULL, ptr_cb + (dct_offset >> 1), if (s->dsp.sad[1](NULL, ptr_cb + uv_dct_offset,
dest_cb + (dct_offset >> 1), dest_cb + uv_dct_offset,
wrap_c, 8) < 20 * s->qscale) wrap_c, 8) < 20 * s->qscale)
skip_dct[6] = 1; skip_dct[6] = 1;
if (s->dsp.sad[1](NULL, ptr_cr + (dct_offset >> 1), if (s->dsp.sad[1](NULL, ptr_cr + uv_dct_offset,
dest_cr + (dct_offset >> 1), dest_cr + uv_dct_offset,
wrap_c, 8) < 20 * s->qscale) wrap_c, 8) < 20 * s->qscale)
skip_dct[7] = 1; skip_dct[7] = 1;
} }
@ -2055,10 +2071,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
get_visual_weight(weight[5], ptr_cr , wrap_c); get_visual_weight(weight[5], ptr_cr , wrap_c);
if (!s->chroma_y_shift) { /* 422 */ if (!s->chroma_y_shift) { /* 422 */
if (!skip_dct[6]) if (!skip_dct[6])
get_visual_weight(weight[6], ptr_cb + (dct_offset >> 1), get_visual_weight(weight[6], ptr_cb + uv_dct_offset,
wrap_c); wrap_c);
if (!skip_dct[7]) if (!skip_dct[7])
get_visual_weight(weight[7], ptr_cr + (dct_offset >> 1), get_visual_weight(weight[7], ptr_cr + uv_dct_offset,
wrap_c); wrap_c);
} }
memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count); memcpy(orig[0], s->block[0], sizeof(DCTELEM) * 64 * mb_block_count);
@ -2172,8 +2188,9 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y) static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
{ {
if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 6); if (s->chroma_format == CHROMA_420) encode_mb_internal(s, motion_x, motion_y, 8, 8, 6);
else encode_mb_internal(s, motion_x, motion_y, 16, 8); else if (s->chroma_format == CHROMA_422) encode_mb_internal(s, motion_x, motion_y, 16, 8, 8);
else encode_mb_internal(s, motion_x, motion_y, 16, 16, 12);
} }
static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){ static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){