mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-03-17 20:17:55 +02:00
Revert "removing lowres support"
There have been multiple user complaints about loosing this feature while its not clear the 3% speedloss claims where real or fabricated. My own testing indicates no statistically significant speed difference both with mpeg2 and mpeg4, and if at all the code with lowres support is a tiny bit faster than without. This reverts commit 92ef4be4ab9fbb7d901b22e0036a4ca90b00a476, reversing changes made to 2e07f42957666df6d7c63a62263b8447e97b1442. Conflicts: cmdutils.c libavcodec/arm/vp8dsp_init_arm.c libavcodec/mpegvideo.c libavcodec/mpegvideo.h libavutil/arm/Makefile Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
parent
f2c5383620
commit
5e50a5724b
@ -127,6 +127,7 @@ AVCodec ff_flv_decoder = {
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
|
@ -658,5 +658,6 @@ AVCodec ff_h261_decoder = {
|
||||
.close = h261_decode_end,
|
||||
.decode = h261_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("H.261"),
|
||||
};
|
||||
|
@ -151,7 +151,7 @@ static int get_consumed_bytes(MpegEncContext *s, int buf_size){
|
||||
|
||||
static int decode_slice(MpegEncContext *s){
|
||||
const int part_mask= s->partitioned_frame ? (ER_AC_END|ER_AC_ERROR) : 0x7F;
|
||||
const int mb_size = 16;
|
||||
const int mb_size= 16>>s->avctx->lowres;
|
||||
s->last_resync_gb= s->gb;
|
||||
s->first_slice_line= 1;
|
||||
|
||||
@ -766,6 +766,7 @@ AVCodec ff_h263_decoder = {
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1 |
|
||||
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY,
|
||||
.flush = ff_mpeg_flush,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
};
|
||||
|
@ -716,6 +716,7 @@ av_cold void ff_intrax8_common_end(IntraX8Context * w)
|
||||
* The parent codec must call MPV_frame_start(), ff_er_frame_start() before calling this function.
|
||||
* The parent codec must call ff_er_frame_end(), MPV_frame_end() after calling this function.
|
||||
* This function does not use MPV_decode_mb().
|
||||
* lowres decoding is theoretically impossible.
|
||||
* @param w pointer to IntraX8Context
|
||||
* @param dquant doubled quantizer, it would be odd in case of VC-1 halfpq==1.
|
||||
* @param quant_offset offset away from zero
|
||||
|
@ -940,6 +940,21 @@ static int ljpeg_decode_yuv_scan(MJpegDecodeContext *s, int predictor,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_always_inline void mjpeg_copy_block(uint8_t *dst, const uint8_t *src,
|
||||
int linesize, int lowres)
|
||||
{
|
||||
switch (lowres) {
|
||||
case 0: copy_block8(dst, src, linesize, linesize, 8);
|
||||
break;
|
||||
case 1: copy_block4(dst, src, linesize, linesize, 4);
|
||||
break;
|
||||
case 2: copy_block2(dst, src, linesize, linesize, 2);
|
||||
break;
|
||||
case 3: *dst = *src;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
int Al, const uint8_t *mb_bitmask,
|
||||
const AVFrame *reference)
|
||||
@ -1010,8 +1025,8 @@ static int mjpeg_decode_scan(MJpegDecodeContext *s, int nb_components, int Ah,
|
||||
ptr = data[c] + block_offset;
|
||||
if (!s->progressive) {
|
||||
if (copy_mb)
|
||||
copy_block8(ptr, reference_data[c] + block_offset,
|
||||
linesize[c], linesize[c], 8);
|
||||
mjpeg_copy_block(ptr, reference_data[c] + block_offset,
|
||||
linesize[c], s->avctx->lowres);
|
||||
else {
|
||||
s->dsp.clear_block(s->block);
|
||||
if (decode_block(s, s->block, i,
|
||||
|
@ -1676,6 +1676,7 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
const uint8_t **buf, int buf_size)
|
||||
{
|
||||
AVCodecContext *avctx = s->avctx;
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int field_pic = s->picture_structure != PICT_FRAME;
|
||||
|
||||
s->resync_mb_x =
|
||||
@ -1798,14 +1799,14 @@ static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
|
||||
}
|
||||
}
|
||||
|
||||
s->dest[0] += 16;
|
||||
s->dest[1] += 16 >> s->chroma_x_shift;
|
||||
s->dest[2] += 16 >> s->chroma_x_shift;
|
||||
s->dest[0] += 16 >> lowres;
|
||||
s->dest[1] +=(16 >> lowres) >> s->chroma_x_shift;
|
||||
s->dest[2] +=(16 >> lowres) >> s->chroma_x_shift;
|
||||
|
||||
ff_MPV_decode_mb(s, s->block);
|
||||
|
||||
if (++s->mb_x >= s->mb_width) {
|
||||
const int mb_size = 16;
|
||||
const int mb_size = 16 >> s->avctx->lowres;
|
||||
|
||||
ff_draw_horiz_band(s, mb_size*(s->mb_y >> field_pic), mb_size);
|
||||
ff_MPV_report_decode_progress(s);
|
||||
@ -2590,6 +2591,7 @@ AVCodec ff_mpeg1video_decoder = {
|
||||
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
|
||||
CODEC_CAP_SLICE_THREADS,
|
||||
.flush = flush,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1 video"),
|
||||
.update_thread_context = ONLY_IF_THREADS_ENABLED(mpeg_decode_update_thread_context)
|
||||
};
|
||||
@ -2606,6 +2608,7 @@ AVCodec ff_mpeg2video_decoder = {
|
||||
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
|
||||
CODEC_CAP_SLICE_THREADS,
|
||||
.flush = flush,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-2 video"),
|
||||
.profiles = NULL_IF_CONFIG_SMALL(mpeg2_video_profiles),
|
||||
};
|
||||
|
@ -2343,6 +2343,7 @@ AVCodec ff_mpeg4_decoder = {
|
||||
CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY |
|
||||
CODEC_CAP_FRAME_THREADS,
|
||||
.flush = ff_mpeg_flush,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
|
||||
.pix_fmts = ff_hwaccel_pixfmt_list_420,
|
||||
.profiles = NULL_IF_CONFIG_SMALL(mpeg4_video_profiles),
|
||||
|
@ -1784,6 +1784,381 @@ void ff_print_debug_info(MpegEncContext *s, AVFrame *pict)
|
||||
}
|
||||
}
|
||||
|
||||
static inline int hpel_motion_lowres(MpegEncContext *s,
|
||||
uint8_t *dest, uint8_t *src,
|
||||
int field_based, int field_select,
|
||||
int src_x, int src_y,
|
||||
int width, int height, int stride,
|
||||
int h_edge_pos, int v_edge_pos,
|
||||
int w, int h, h264_chroma_mc_func *pix_op,
|
||||
int motion_x, int motion_y)
|
||||
{
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres, 2);
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
int emu = 0;
|
||||
int sx, sy;
|
||||
|
||||
if (s->quarter_sample) {
|
||||
motion_x /= 2;
|
||||
motion_y /= 2;
|
||||
}
|
||||
|
||||
sx = motion_x & s_mask;
|
||||
sy = motion_y & s_mask;
|
||||
src_x += motion_x >> lowres + 1;
|
||||
src_y += motion_y >> lowres + 1;
|
||||
|
||||
src += src_y * stride + src_x;
|
||||
|
||||
if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w, 0) ||
|
||||
(unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, src, s->linesize, w + 1,
|
||||
(h + 1) << field_based, src_x,
|
||||
src_y << field_based,
|
||||
h_edge_pos,
|
||||
v_edge_pos);
|
||||
src = s->edge_emu_buffer;
|
||||
emu = 1;
|
||||
}
|
||||
|
||||
sx = (sx << 2) >> lowres;
|
||||
sy = (sy << 2) >> lowres;
|
||||
if (field_select)
|
||||
src += s->linesize;
|
||||
pix_op[op_index](dest, src, stride, h, sx, sy);
|
||||
return emu;
|
||||
}
|
||||
|
||||
/* apply one mpeg motion vector to the three components */
|
||||
static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
|
||||
uint8_t *dest_y,
|
||||
uint8_t *dest_cb,
|
||||
uint8_t *dest_cr,
|
||||
int field_based,
|
||||
int bottom_field,
|
||||
int field_select,
|
||||
uint8_t **ref_picture,
|
||||
h264_chroma_mc_func *pix_op,
|
||||
int motion_x, int motion_y,
|
||||
int h, int mb_y)
|
||||
{
|
||||
uint8_t *ptr_y, *ptr_cb, *ptr_cr;
|
||||
int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, uvlinesize, linesize, sx, sy,
|
||||
uvsx, uvsy;
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres-1+s->chroma_x_shift, 2);
|
||||
const int block_s = 8>>lowres;
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres;
|
||||
const int v_edge_pos = s->v_edge_pos >> lowres;
|
||||
linesize = s->current_picture.f.linesize[0] << field_based;
|
||||
uvlinesize = s->current_picture.f.linesize[1] << field_based;
|
||||
|
||||
// FIXME obviously not perfect but qpel will not work in lowres anyway
|
||||
if (s->quarter_sample) {
|
||||
motion_x /= 2;
|
||||
motion_y /= 2;
|
||||
}
|
||||
|
||||
if(field_based){
|
||||
motion_y += (bottom_field - field_select)*((1 << lowres)-1);
|
||||
}
|
||||
|
||||
sx = motion_x & s_mask;
|
||||
sy = motion_y & s_mask;
|
||||
src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
|
||||
src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
|
||||
|
||||
if (s->out_format == FMT_H263) {
|
||||
uvsx = ((motion_x >> 1) & s_mask) | (sx & 1);
|
||||
uvsy = ((motion_y >> 1) & s_mask) | (sy & 1);
|
||||
uvsrc_x = src_x >> 1;
|
||||
uvsrc_y = src_y >> 1;
|
||||
} else if (s->out_format == FMT_H261) {
|
||||
// even chroma mv's are full pel in H261
|
||||
mx = motion_x / 4;
|
||||
my = motion_y / 4;
|
||||
uvsx = (2 * mx) & s_mask;
|
||||
uvsy = (2 * my) & s_mask;
|
||||
uvsrc_x = s->mb_x * block_s + (mx >> lowres);
|
||||
uvsrc_y = mb_y * block_s + (my >> lowres);
|
||||
} else {
|
||||
if(s->chroma_y_shift){
|
||||
mx = motion_x / 2;
|
||||
my = motion_y / 2;
|
||||
uvsx = mx & s_mask;
|
||||
uvsy = my & s_mask;
|
||||
uvsrc_x = s->mb_x * block_s + (mx >> lowres + 1);
|
||||
uvsrc_y = (mb_y * block_s >> field_based) + (my >> lowres + 1);
|
||||
} else {
|
||||
if(s->chroma_x_shift){
|
||||
//Chroma422
|
||||
mx = motion_x / 2;
|
||||
uvsx = mx & s_mask;
|
||||
uvsy = motion_y & s_mask;
|
||||
uvsrc_y = src_y;
|
||||
uvsrc_x = s->mb_x*block_s + (mx >> (lowres+1));
|
||||
} else {
|
||||
//Chroma444
|
||||
uvsx = motion_x & s_mask;
|
||||
uvsy = motion_y & s_mask;
|
||||
uvsrc_x = src_x;
|
||||
uvsrc_y = src_y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ptr_y = ref_picture[0] + src_y * linesize + src_x;
|
||||
ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||
ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
|
||||
|
||||
if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s, 0) ||
|
||||
(unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
|
||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr_y,
|
||||
s->linesize, 17, 17 + field_based,
|
||||
src_x, src_y << field_based, h_edge_pos,
|
||||
v_edge_pos);
|
||||
ptr_y = s->edge_emu_buffer;
|
||||
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
||||
uint8_t *uvbuf = s->edge_emu_buffer + 18 * s->linesize;
|
||||
s->dsp.emulated_edge_mc(uvbuf , ptr_cb, s->uvlinesize, 9,
|
||||
9 + field_based,
|
||||
uvsrc_x, uvsrc_y << field_based,
|
||||
h_edge_pos >> 1, v_edge_pos >> 1);
|
||||
s->dsp.emulated_edge_mc(uvbuf + 16, ptr_cr, s->uvlinesize, 9,
|
||||
9 + field_based,
|
||||
uvsrc_x, uvsrc_y << field_based,
|
||||
h_edge_pos >> 1, v_edge_pos >> 1);
|
||||
ptr_cb = uvbuf;
|
||||
ptr_cr = uvbuf + 16;
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME use this for field pix too instead of the obnoxious hack which changes picture.f.data
|
||||
if (bottom_field) {
|
||||
dest_y += s->linesize;
|
||||
dest_cb += s->uvlinesize;
|
||||
dest_cr += s->uvlinesize;
|
||||
}
|
||||
|
||||
if (field_select) {
|
||||
ptr_y += s->linesize;
|
||||
ptr_cb += s->uvlinesize;
|
||||
ptr_cr += s->uvlinesize;
|
||||
}
|
||||
|
||||
sx = (sx << 2) >> lowres;
|
||||
sy = (sy << 2) >> lowres;
|
||||
pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
|
||||
|
||||
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY)) {
|
||||
uvsx = (uvsx << 2) >> lowres;
|
||||
uvsy = (uvsy << 2) >> lowres;
|
||||
if (h >> s->chroma_y_shift) {
|
||||
pix_op[op_index](dest_cb, ptr_cb, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
||||
pix_op[op_index](dest_cr, ptr_cr, uvlinesize, h >> s->chroma_y_shift, uvsx, uvsy);
|
||||
}
|
||||
}
|
||||
// FIXME h261 lowres loop filter
|
||||
}
|
||||
|
||||
static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
|
||||
uint8_t *dest_cb, uint8_t *dest_cr,
|
||||
uint8_t **ref_picture,
|
||||
h264_chroma_mc_func * pix_op,
|
||||
int mx, int my)
|
||||
{
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int op_index = FFMIN(lowres, 2);
|
||||
const int block_s = 8 >> lowres;
|
||||
const int s_mask = (2 << lowres) - 1;
|
||||
const int h_edge_pos = s->h_edge_pos >> lowres + 1;
|
||||
const int v_edge_pos = s->v_edge_pos >> lowres + 1;
|
||||
int emu = 0, src_x, src_y, offset, sx, sy;
|
||||
uint8_t *ptr;
|
||||
|
||||
if (s->quarter_sample) {
|
||||
mx /= 2;
|
||||
my /= 2;
|
||||
}
|
||||
|
||||
/* In case of 8X8, we construct a single chroma motion vector
|
||||
with a special rounding */
|
||||
mx = ff_h263_round_chroma(mx);
|
||||
my = ff_h263_round_chroma(my);
|
||||
|
||||
sx = mx & s_mask;
|
||||
sy = my & s_mask;
|
||||
src_x = s->mb_x * block_s + (mx >> lowres + 1);
|
||||
src_y = s->mb_y * block_s + (my >> lowres + 1);
|
||||
|
||||
offset = src_y * s->uvlinesize + src_x;
|
||||
ptr = ref_picture[1] + offset;
|
||||
if (s->flags & CODEC_FLAG_EMU_EDGE) {
|
||||
if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
|
||||
(unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
|
||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize,
|
||||
9, 9, src_x, src_y, h_edge_pos, v_edge_pos);
|
||||
ptr = s->edge_emu_buffer;
|
||||
emu = 1;
|
||||
}
|
||||
}
|
||||
sx = (sx << 2) >> lowres;
|
||||
sy = (sy << 2) >> lowres;
|
||||
pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
|
||||
|
||||
ptr = ref_picture[2] + offset;
|
||||
if (emu) {
|
||||
s->dsp.emulated_edge_mc(s->edge_emu_buffer, ptr, s->uvlinesize, 9, 9,
|
||||
src_x, src_y, h_edge_pos, v_edge_pos);
|
||||
ptr = s->edge_emu_buffer;
|
||||
}
|
||||
pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
|
||||
}
|
||||
|
||||
/**
|
||||
* motion compensation of a single macroblock
|
||||
* @param s context
|
||||
* @param dest_y luma destination pointer
|
||||
* @param dest_cb chroma cb/u destination pointer
|
||||
* @param dest_cr chroma cr/v destination pointer
|
||||
* @param dir direction (0->forward, 1->backward)
|
||||
* @param ref_picture array[3] of pointers to the 3 planes of the reference picture
|
||||
* @param pix_op halfpel motion compensation function (average or put normally)
|
||||
* the motion vectors are taken from s->mv and the MV type from s->mv_type
|
||||
*/
|
||||
static inline void MPV_motion_lowres(MpegEncContext *s,
|
||||
uint8_t *dest_y, uint8_t *dest_cb,
|
||||
uint8_t *dest_cr,
|
||||
int dir, uint8_t **ref_picture,
|
||||
h264_chroma_mc_func *pix_op)
|
||||
{
|
||||
int mx, my;
|
||||
int mb_x, mb_y, i;
|
||||
const int lowres = s->avctx->lowres;
|
||||
const int block_s = 8 >>lowres;
|
||||
|
||||
mb_x = s->mb_x;
|
||||
mb_y = s->mb_y;
|
||||
|
||||
switch (s->mv_type) {
|
||||
case MV_TYPE_16X16:
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
0, 0, 0,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1],
|
||||
2 * block_s, mb_y);
|
||||
break;
|
||||
case MV_TYPE_8X8:
|
||||
mx = 0;
|
||||
my = 0;
|
||||
for (i = 0; i < 4; i++) {
|
||||
hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
|
||||
s->linesize) * block_s,
|
||||
ref_picture[0], 0, 0,
|
||||
(2 * mb_x + (i & 1)) * block_s,
|
||||
(2 * mb_y + (i >> 1)) * block_s,
|
||||
s->width, s->height, s->linesize,
|
||||
s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
|
||||
block_s, block_s, pix_op,
|
||||
s->mv[dir][i][0], s->mv[dir][i][1]);
|
||||
|
||||
mx += s->mv[dir][i][0];
|
||||
my += s->mv[dir][i][1];
|
||||
}
|
||||
|
||||
if (!CONFIG_GRAY || !(s->flags & CODEC_FLAG_GRAY))
|
||||
chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
|
||||
pix_op, mx, my);
|
||||
break;
|
||||
case MV_TYPE_FIELD:
|
||||
if (s->picture_structure == PICT_FRAME) {
|
||||
/* top field */
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
1, 0, s->field_select[dir][0],
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0], s->mv[dir][0][1],
|
||||
block_s, mb_y);
|
||||
/* bottom field */
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
1, 1, s->field_select[dir][1],
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][1][0], s->mv[dir][1][1],
|
||||
block_s, mb_y);
|
||||
} else {
|
||||
if (s->picture_structure != s->field_select[dir][0] + 1 &&
|
||||
s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
|
||||
}
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
0, 0, s->field_select[dir][0],
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][0][0],
|
||||
s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
|
||||
}
|
||||
break;
|
||||
case MV_TYPE_16X8:
|
||||
for (i = 0; i < 2; i++) {
|
||||
uint8_t **ref2picture;
|
||||
|
||||
if (s->picture_structure == s->field_select[dir][i] + 1 ||
|
||||
s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
|
||||
ref2picture = ref_picture;
|
||||
} else {
|
||||
ref2picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
0, 0, s->field_select[dir][i],
|
||||
ref2picture, pix_op,
|
||||
s->mv[dir][i][0], s->mv[dir][i][1] +
|
||||
2 * block_s * i, block_s, mb_y >> 1);
|
||||
|
||||
dest_y += 2 * block_s * s->linesize;
|
||||
dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
|
||||
dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
|
||||
}
|
||||
break;
|
||||
case MV_TYPE_DMV:
|
||||
if (s->picture_structure == PICT_FRAME) {
|
||||
for (i = 0; i < 2; i++) {
|
||||
int j;
|
||||
for (j = 0; j < 2; j++) {
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
1, j, j ^ i,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][2 * i + j][0],
|
||||
s->mv[dir][2 * i + j][1],
|
||||
block_s, mb_y);
|
||||
}
|
||||
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
|
||||
}
|
||||
} else {
|
||||
for (i = 0; i < 2; i++) {
|
||||
mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
|
||||
0, 0, s->picture_structure != i + 1,
|
||||
ref_picture, pix_op,
|
||||
s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
|
||||
2 * block_s, mb_y >> 1);
|
||||
|
||||
// after put we make avg of the same block
|
||||
pix_op = s->dsp.avg_h264_chroma_pixels_tab;
|
||||
|
||||
// opposite parity is always in the same
|
||||
// frame if this is second field
|
||||
if (!s->first_field) {
|
||||
ref_picture = s->current_picture_ptr->f.data;
|
||||
}
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
assert(0);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* find the lowest MB row referenced in the MVs
|
||||
*/
|
||||
@ -1893,7 +2268,7 @@ void ff_clean_intra_table_entries(MpegEncContext *s)
|
||||
*/
|
||||
static av_always_inline
|
||||
void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
int is_mpeg12)
|
||||
int lowres_flag, int is_mpeg12)
|
||||
{
|
||||
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
|
||||
if(CONFIG_MPEG_XVMC_DECODER && s->avctx->xvmc_acceleration){
|
||||
@ -1938,8 +2313,8 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
qpel_mc_func (*op_qpix)[16];
|
||||
const int linesize = s->current_picture.f.linesize[0]; //not s->linesize as this would be wrong for field pics
|
||||
const int uvlinesize = s->current_picture.f.linesize[1];
|
||||
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || s->avctx->lowres;
|
||||
const int block_size= 8 >> s->avctx->lowres;
|
||||
const int readable= s->pict_type != AV_PICTURE_TYPE_B || s->encoding || s->avctx->draw_horiz_band || lowres_flag;
|
||||
const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
|
||||
|
||||
/* avoid copy if macroblock skipped in last frame too */
|
||||
/* skip only during decoding as we might trash the buffers during encoding a bit */
|
||||
@ -1988,19 +2363,31 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}
|
||||
}
|
||||
|
||||
op_qpix= s->me.qpel_put;
|
||||
if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
|
||||
op_pix = s->dsp.put_pixels_tab;
|
||||
if(lowres_flag){
|
||||
h264_chroma_mc_func *op_pix = s->dsp.put_h264_chroma_pixels_tab;
|
||||
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix);
|
||||
op_pix = s->dsp.avg_h264_chroma_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix);
|
||||
}
|
||||
}else{
|
||||
op_pix = s->dsp.put_no_rnd_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
|
||||
op_pix = s->dsp.avg_pixels_tab;
|
||||
op_qpix= s->me.qpel_avg;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
|
||||
op_qpix= s->me.qpel_put;
|
||||
if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
|
||||
op_pix = s->dsp.put_pixels_tab;
|
||||
}else{
|
||||
op_pix = s->dsp.put_no_rnd_pixels_tab;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_FORWARD) {
|
||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
|
||||
op_pix = s->dsp.avg_pixels_tab;
|
||||
op_qpix= s->me.qpel_avg;
|
||||
}
|
||||
if (s->mv_dir & MV_DIR_BACKWARD) {
|
||||
MPV_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f.data, op_pix, op_qpix);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -2016,9 +2403,9 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
if(s->encoding || !( s->msmpeg4_version || s->codec_id==CODEC_ID_MPEG1VIDEO || s->codec_id==CODEC_ID_MPEG2VIDEO
|
||||
|| (s->codec_id==CODEC_ID_MPEG4 && !s->mpeg_quant))){
|
||||
add_dequant_dct(s, block[0], 0, dest_y , dct_linesize, s->qscale);
|
||||
add_dequant_dct(s, block[1], 1, dest_y + 8 , dct_linesize, s->qscale);
|
||||
add_dequant_dct(s, block[1], 1, dest_y + block_size, dct_linesize, s->qscale);
|
||||
add_dequant_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize, s->qscale);
|
||||
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + 8 , dct_linesize, s->qscale);
|
||||
add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
|
||||
|
||||
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
||||
if (s->chroma_y_shift){
|
||||
@ -2035,9 +2422,9 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}
|
||||
} else if(is_mpeg12 || (s->codec_id != CODEC_ID_WMV2)){
|
||||
add_dct(s, block[0], 0, dest_y , dct_linesize);
|
||||
add_dct(s, block[1], 1, dest_y + 8 , dct_linesize);
|
||||
add_dct(s, block[1], 1, dest_y + block_size, dct_linesize);
|
||||
add_dct(s, block[2], 2, dest_y + dct_offset , dct_linesize);
|
||||
add_dct(s, block[3], 3, dest_y + dct_offset + 8 , dct_linesize);
|
||||
add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
|
||||
|
||||
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
||||
if(s->chroma_y_shift){//Chroma420
|
||||
@ -2046,17 +2433,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}else{
|
||||
//chroma422
|
||||
dct_linesize = uvlinesize << s->interlaced_dct;
|
||||
dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*8;
|
||||
dct_offset = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
|
||||
|
||||
add_dct(s, block[4], 4, dest_cb, dct_linesize);
|
||||
add_dct(s, block[5], 5, dest_cr, dct_linesize);
|
||||
add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
|
||||
add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
|
||||
if(!s->chroma_x_shift){//Chroma444
|
||||
add_dct(s, block[8], 8, dest_cb+8, dct_linesize);
|
||||
add_dct(s, block[9], 9, dest_cr+8, dct_linesize);
|
||||
add_dct(s, block[10], 10, dest_cb+8+dct_offset, dct_linesize);
|
||||
add_dct(s, block[11], 11, dest_cr+8+dct_offset, dct_linesize);
|
||||
add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
|
||||
add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
|
||||
add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
|
||||
add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
|
||||
}
|
||||
}
|
||||
}//fi gray
|
||||
@ -2087,9 +2474,9 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}
|
||||
}else{
|
||||
s->dsp.idct_put(dest_y , dct_linesize, block[0]);
|
||||
s->dsp.idct_put(dest_y + 8 , dct_linesize, block[1]);
|
||||
s->dsp.idct_put(dest_y + block_size, dct_linesize, block[1]);
|
||||
s->dsp.idct_put(dest_y + dct_offset , dct_linesize, block[2]);
|
||||
s->dsp.idct_put(dest_y + dct_offset + 8 , dct_linesize, block[3]);
|
||||
s->dsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
|
||||
|
||||
if(!CONFIG_GRAY || !(s->flags&CODEC_FLAG_GRAY)){
|
||||
if(s->chroma_y_shift){
|
||||
@ -2098,17 +2485,17 @@ void MPV_decode_mb_internal(MpegEncContext *s, DCTELEM block[12][64],
|
||||
}else{
|
||||
|
||||
dct_linesize = uvlinesize << s->interlaced_dct;
|
||||
dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*8;
|
||||
dct_offset = s->interlaced_dct? uvlinesize : uvlinesize*block_size;
|
||||
|
||||
s->dsp.idct_put(dest_cb, dct_linesize, block[4]);
|
||||
s->dsp.idct_put(dest_cr, dct_linesize, block[5]);
|
||||
s->dsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
|
||||
s->dsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
|
||||
if(!s->chroma_x_shift){//Chroma444
|
||||
s->dsp.idct_put(dest_cb + 8, dct_linesize, block[8]);
|
||||
s->dsp.idct_put(dest_cr + 8, dct_linesize, block[9]);
|
||||
s->dsp.idct_put(dest_cb + 8 + dct_offset, dct_linesize, block[10]);
|
||||
s->dsp.idct_put(dest_cr + 8 + dct_offset, dct_linesize, block[11]);
|
||||
s->dsp.idct_put(dest_cb + block_size, dct_linesize, block[8]);
|
||||
s->dsp.idct_put(dest_cr + block_size, dct_linesize, block[9]);
|
||||
s->dsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
|
||||
s->dsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
|
||||
}
|
||||
}
|
||||
}//gray
|
||||
@ -2126,10 +2513,12 @@ skip_idct:
|
||||
void ff_MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]){
|
||||
#if !CONFIG_SMALL
|
||||
if(s->out_format == FMT_MPEG1) {
|
||||
MPV_decode_mb_internal(s, block, 1);
|
||||
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 1);
|
||||
else MPV_decode_mb_internal(s, block, 0, 1);
|
||||
} else
|
||||
#endif
|
||||
MPV_decode_mb_internal(s, block, 0);
|
||||
if(s->avctx->lowres) MPV_decode_mb_internal(s, block, 1, 0);
|
||||
else MPV_decode_mb_internal(s, block, 0, 0);
|
||||
}
|
||||
|
||||
/**
|
||||
|
@ -1216,6 +1216,7 @@ AVCodec ff_msmpeg4v1_decoder = {
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
@ -1229,6 +1230,7 @@ AVCodec ff_msmpeg4v2_decoder = {
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
@ -1242,6 +1244,7 @@ AVCodec ff_msmpeg4v3_decoder = {
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
@ -1255,6 +1258,7 @@ AVCodec ff_wmv1_decoder = {
|
||||
.close = ff_h263_decode_end,
|
||||
.decode = ff_h263_decode_frame,
|
||||
.capabilities = CODEC_CAP_DRAW_HORIZ_BAND | CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
|
@ -339,4 +339,5 @@ AVCodec ff_mxpeg_decoder = {
|
||||
.close = mxpeg_decode_end,
|
||||
.decode = mxpeg_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
};
|
||||
|
@ -435,7 +435,8 @@ av_log(s->avctx, AV_LOG_DEBUG, "\n");*/
|
||||
// s->obmc=1;
|
||||
// s->umvplus=1;
|
||||
s->modified_quant=1;
|
||||
s->loop_filter=1;
|
||||
if(!s->avctx->lowres)
|
||||
s->loop_filter=1;
|
||||
|
||||
if(s->avctx->debug & FF_DEBUG_PICT_INFO){
|
||||
av_log(s->avctx, AV_LOG_INFO, "num:%5d x:%2d y:%2d type:%d qscale:%2d rnd:%d\n",
|
||||
@ -754,6 +755,7 @@ AVCodec ff_rv10_decoder = {
|
||||
.close = rv10_decode_end,
|
||||
.decode = rv10_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
@ -768,6 +770,7 @@ AVCodec ff_rv20_decoder = {
|
||||
.decode = rv10_decode_frame,
|
||||
.capabilities = CODEC_CAP_DR1 | CODEC_CAP_DELAY,
|
||||
.flush = ff_mpeg_flush,
|
||||
.max_lowres = 3,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
|
||||
.pix_fmts = ff_pixfmt_list_420,
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user