You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-04 22:03:09 +02:00
avcodec/mpegvideo: Move [fb]_code to Mpeg4Dec and MPVEncContext
It is only used by the MPEG-4 decoder and the encoders. Notice that this field is a per-frame property and therefore does not need to by synced in mpeg4_update_thread_context(). Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
@ -910,7 +910,7 @@ void ff_estimate_p_frame_motion(MPVEncContext *const s,
|
|||||||
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
|
c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
|
||||||
c->sub_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
|
c->sub_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
|
||||||
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
|
c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
|
||||||
c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV;
|
c->current_mv_penalty = c->mv_penalty[s->f_code] + MAX_DMV;
|
||||||
|
|
||||||
get_limits(s, 16*mb_x, 16*mb_y, 0);
|
get_limits(s, 16*mb_x, 16*mb_y, 0);
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
@ -1077,7 +1077,7 @@ int ff_pre_estimate_p_frame_motion(MPVEncContext *const s,
|
|||||||
av_assert0(s->c.quarter_sample == 0 || s->c.quarter_sample == 1);
|
av_assert0(s->c.quarter_sample == 0 || s->c.quarter_sample == 1);
|
||||||
|
|
||||||
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
|
c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
|
||||||
c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV;
|
c->current_mv_penalty = c->mv_penalty[s->f_code] + MAX_DMV;
|
||||||
|
|
||||||
get_limits(s, 16*mb_x, 16*mb_y, 0);
|
get_limits(s, 16*mb_x, 16*mb_y, 0);
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
@ -1187,8 +1187,8 @@ static inline int check_bidir_mv(MPVEncContext *const s,
|
|||||||
//FIXME better f_code prediction (max mv & distance)
|
//FIXME better f_code prediction (max mv & distance)
|
||||||
//FIXME pointers
|
//FIXME pointers
|
||||||
MotionEstContext *const c = &s->me;
|
MotionEstContext *const c = &s->me;
|
||||||
const uint8_t * const mv_penalty_f = c->mv_penalty[s->c.f_code] + MAX_DMV; // f_code of the prev frame
|
const uint8_t * const mv_penalty_f = c->mv_penalty[s->f_code] + MAX_DMV; // f_code of the prev frame
|
||||||
const uint8_t * const mv_penalty_b = c->mv_penalty[s->c.b_code] + MAX_DMV; // f_code of the prev frame
|
const uint8_t * const mv_penalty_b = c->mv_penalty[s->b_code] + MAX_DMV; // f_code of the prev frame
|
||||||
int stride= c->stride;
|
int stride= c->stride;
|
||||||
uint8_t *dest_y = c->scratchpad;
|
uint8_t *dest_y = c->scratchpad;
|
||||||
const uint8_t *ptr;
|
const uint8_t *ptr;
|
||||||
@ -1526,11 +1526,11 @@ void ff_estimate_b_frame_motion(MPVEncContext *const s,
|
|||||||
|
|
||||||
// FIXME penalty stuff for non-MPEG-4
|
// FIXME penalty stuff for non-MPEG-4
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
fmin = estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->c.f_code) +
|
fmin = estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->f_code) +
|
||||||
3 * c->mb_penalty_factor;
|
3 * c->mb_penalty_factor;
|
||||||
|
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
bmin = estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->c.b_code) +
|
bmin = estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->b_code) +
|
||||||
2 * c->mb_penalty_factor;
|
2 * c->mb_penalty_factor;
|
||||||
ff_dlog(c->avctx, " %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]);
|
ff_dlog(c->avctx, " %d %d ", s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1]);
|
||||||
|
|
||||||
@ -1541,11 +1541,11 @@ void ff_estimate_b_frame_motion(MPVEncContext *const s,
|
|||||||
if (c->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
if (c->avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
||||||
//FIXME mb type penalty
|
//FIXME mb type penalty
|
||||||
c->skip=0;
|
c->skip=0;
|
||||||
c->current_mv_penalty= c->mv_penalty[s->c.f_code] + MAX_DMV;
|
c->current_mv_penalty = c->mv_penalty[s->f_code] + MAX_DMV;
|
||||||
fimin= interlaced_search(s, 0,
|
fimin= interlaced_search(s, 0,
|
||||||
s->b_field_mv_table[0], s->b_field_select_table[0],
|
s->b_field_mv_table[0], s->b_field_select_table[0],
|
||||||
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0);
|
s->b_forw_mv_table[xy][0], s->b_forw_mv_table[xy][1], 0);
|
||||||
c->current_mv_penalty= c->mv_penalty[s->c.b_code] + MAX_DMV;
|
c->current_mv_penalty = c->mv_penalty[s->b_code] + MAX_DMV;
|
||||||
bimin= interlaced_search(s, 2,
|
bimin= interlaced_search(s, 2,
|
||||||
s->b_field_mv_table[1], s->b_field_select_table[1],
|
s->b_field_mv_table[1], s->b_field_select_table[1],
|
||||||
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0);
|
s->b_back_mv_table[xy][0], s->b_back_mv_table[xy][1], 0);
|
||||||
@ -1661,7 +1661,7 @@ int ff_get_best_fcode(MPVMainEncContext *const m, const int16_t (*mv_table)[2],
|
|||||||
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
|
void ff_fix_long_p_mvs(MPVEncContext *const s, int type)
|
||||||
{
|
{
|
||||||
MotionEstContext *const c = &s->me;
|
MotionEstContext *const c = &s->me;
|
||||||
const int f_code= s->c.f_code;
|
const int f_code= s->f_code;
|
||||||
int y, range;
|
int y, range;
|
||||||
av_assert0(s->c.pict_type == AV_PICTURE_TYPE_P);
|
av_assert0(s->c.pict_type == AV_PICTURE_TYPE_P);
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
|
|||||||
s->c.pict_type == AV_PICTURE_TYPE_B) {
|
s->c.pict_type == AV_PICTURE_TYPE_B) {
|
||||||
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
||||||
if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
|
if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
|
||||||
put_bits(&s->pb, 3, s->c.f_code); /* forward_f_code */
|
put_bits(&s->pb, 3, s->f_code); /* forward_f_code */
|
||||||
else
|
else
|
||||||
put_bits(&s->pb, 3, 7); /* forward_f_code */
|
put_bits(&s->pb, 3, 7); /* forward_f_code */
|
||||||
}
|
}
|
||||||
@ -367,7 +367,7 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
|
|||||||
if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
||||||
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
put_bits(&s->pb, 1, 0); /* half pel coordinates */
|
||||||
if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
|
if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
|
||||||
put_bits(&s->pb, 3, s->c.b_code); /* backward_f_code */
|
put_bits(&s->pb, 3, s->b_code); /* backward_f_code */
|
||||||
else
|
else
|
||||||
put_bits(&s->pb, 3, 7); /* backward_f_code */
|
put_bits(&s->pb, 3, 7); /* backward_f_code */
|
||||||
}
|
}
|
||||||
@ -380,14 +380,14 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
|
|||||||
put_bits(&s->pb, 4, 8); /* pic ext */
|
put_bits(&s->pb, 4, 8); /* pic ext */
|
||||||
if (s->c.pict_type == AV_PICTURE_TYPE_P ||
|
if (s->c.pict_type == AV_PICTURE_TYPE_P ||
|
||||||
s->c.pict_type == AV_PICTURE_TYPE_B) {
|
s->c.pict_type == AV_PICTURE_TYPE_B) {
|
||||||
put_bits(&s->pb, 4, s->c.f_code);
|
put_bits(&s->pb, 4, s->f_code);
|
||||||
put_bits(&s->pb, 4, s->c.f_code);
|
put_bits(&s->pb, 4, s->f_code);
|
||||||
} else {
|
} else {
|
||||||
put_bits(&s->pb, 8, 255);
|
put_bits(&s->pb, 8, 255);
|
||||||
}
|
}
|
||||||
if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
||||||
put_bits(&s->pb, 4, s->c.b_code);
|
put_bits(&s->pb, 4, s->b_code);
|
||||||
put_bits(&s->pb, 4, s->c.b_code);
|
put_bits(&s->pb, 4, s->b_code);
|
||||||
} else {
|
} else {
|
||||||
put_bits(&s->pb, 8, 255);
|
put_bits(&s->pb, 8, 255);
|
||||||
}
|
}
|
||||||
@ -748,11 +748,11 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
// RAL: f_code parameter added
|
// RAL: f_code parameter added
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
motion_x - s->c.last_mv[0][0][0],
|
motion_x - s->c.last_mv[0][0][0],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
// RAL: f_code parameter added
|
// RAL: f_code parameter added
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
motion_y - s->c.last_mv[0][0][1],
|
motion_y - s->c.last_mv[0][0][1],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->mv_bits += get_bits_diff(s);
|
s->mv_bits += get_bits_diff(s);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
@ -763,11 +763,11 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
// RAL: f_code parameter added
|
// RAL: f_code parameter added
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
motion_x - s->c.last_mv[0][0][0],
|
motion_x - s->c.last_mv[0][0][0],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
// RAL: f_code parameter added
|
// RAL: f_code parameter added
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
motion_y - s->c.last_mv[0][0][1],
|
motion_y - s->c.last_mv[0][0][1],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.qscale -= s->dquant;
|
s->c.qscale -= s->dquant;
|
||||||
s->mv_bits += get_bits_diff(s);
|
s->mv_bits += get_bits_diff(s);
|
||||||
}
|
}
|
||||||
@ -793,10 +793,10 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
put_bits(&s->pb, 1, s->c.field_select[0][i]);
|
put_bits(&s->pb, 1, s->c.field_select[0][i]);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
|
s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
||||||
s->c.last_mv[0][i][1] = 2 * s->c.mv[0][i][1];
|
s->c.last_mv[0][i][1] = 2 * s->c.mv[0][i][1];
|
||||||
}
|
}
|
||||||
@ -836,10 +836,10 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
if (s->c.mv_dir & MV_DIR_FORWARD) {
|
if (s->c.mv_dir & MV_DIR_FORWARD) {
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
|
s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
|
s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.last_mv[0][0][0] =
|
s->c.last_mv[0][0][0] =
|
||||||
s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
|
s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
|
||||||
s->c.last_mv[0][0][1] =
|
s->c.last_mv[0][0][1] =
|
||||||
@ -848,10 +848,10 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
if (s->c.mv_dir & MV_DIR_BACKWARD) {
|
if (s->c.mv_dir & MV_DIR_BACKWARD) {
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
|
s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
|
s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
s->c.last_mv[1][0][0] =
|
s->c.last_mv[1][0][0] =
|
||||||
s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
|
s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
|
||||||
s->c.last_mv[1][0][1] =
|
s->c.last_mv[1][0][1] =
|
||||||
@ -881,10 +881,10 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
put_bits(&s->pb, 1, s->c.field_select[0][i]);
|
put_bits(&s->pb, 1, s->c.field_select[0][i]);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
|
s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
||||||
s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
|
s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
|
||||||
}
|
}
|
||||||
@ -894,10 +894,10 @@ static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
|
|||||||
put_bits(&s->pb, 1, s->c.field_select[1][i]);
|
put_bits(&s->pb, 1, s->c.field_select[1][i]);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
|
s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
mpeg1_encode_motion(s,
|
mpeg1_encode_motion(s,
|
||||||
s->c.mv[1][i][1] - (s->c.last_mv[1][i][1] >> 1),
|
s->c.mv[1][i][1] - (s->c.last_mv[1][i][1] >> 1),
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
|
s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
|
||||||
s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
|
s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
|
||||||
}
|
}
|
||||||
|
@ -421,7 +421,7 @@ static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
|
|||||||
|
|
||||||
s->gb = gb;
|
s->gb = gb;
|
||||||
|
|
||||||
if (len >= ff_mpeg4_get_video_packet_prefix_length(s->pict_type, s->f_code, s->b_code))
|
if (len >= ff_mpeg4_get_video_packet_prefix_length(s->pict_type, ctx->f_code, ctx->b_code))
|
||||||
return mb_num;
|
return mb_num;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -714,7 +714,7 @@ int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
|
|||||||
if (get_bits1(&s->gb))
|
if (get_bits1(&s->gb))
|
||||||
break;
|
break;
|
||||||
|
|
||||||
if (len != ff_mpeg4_get_video_packet_prefix_length(s->pict_type, s->f_code, s->b_code)) {
|
if (len != ff_mpeg4_get_video_packet_prefix_length(s->pict_type, ctx->f_code, ctx->b_code)) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
|
av_log(s->avctx, AV_LOG_ERROR, "marker does not match f_code\n");
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
}
|
}
|
||||||
@ -844,7 +844,7 @@ static inline int get_amv(Mpeg4DecContext *ctx, int n)
|
|||||||
{
|
{
|
||||||
MpegEncContext *s = &ctx->m;
|
MpegEncContext *s = &ctx->m;
|
||||||
int x, y, mb_v, sum, dx, dy, shift;
|
int x, y, mb_v, sum, dx, dy, shift;
|
||||||
int len = 1 << (s->f_code + 4);
|
int len = 1 << (ctx->f_code + 4);
|
||||||
const int a = ctx->sprite_warping_accuracy;
|
const int a = ctx->sprite_warping_accuracy;
|
||||||
|
|
||||||
if (s->workaround_bugs & FF_BUG_AMV)
|
if (s->workaround_bugs & FF_BUG_AMV)
|
||||||
@ -1117,11 +1117,11 @@ try_again:
|
|||||||
|
|
||||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
if (!s->mcsel) {
|
if (!s->mcsel) {
|
||||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
mx = ff_h263_decode_motion(s, pred_x, ctx->f_code);
|
||||||
if (mx >= 0xffff)
|
if (mx >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
my = ff_h263_decode_motion(s, pred_y, ctx->f_code);
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
|
s->cur_pic.mb_type[xy] = MB_TYPE_16x16 |
|
||||||
@ -1148,11 +1148,11 @@ try_again:
|
|||||||
MB_TYPE_FORWARD_MV;
|
MB_TYPE_FORWARD_MV;
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
mx = ff_h263_decode_motion(s, pred_x, ctx->f_code);
|
||||||
if (mx >= 0xffff)
|
if (mx >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
my = ff_h263_decode_motion(s, pred_y, ctx->f_code);
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
mot_val[0] = mx;
|
mot_val[0] = mx;
|
||||||
@ -1792,11 +1792,11 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
mx = ff_h263_decode_motion(s, pred_x, ctx->f_code);
|
||||||
if (mx >= 0xffff)
|
if (mx >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
my = ff_h263_decode_motion(s, pred_y / 2, s->f_code);
|
my = ff_h263_decode_motion(s, pred_y / 2, ctx->f_code);
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
@ -1808,12 +1808,12 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
/* 16x16 motion prediction */
|
/* 16x16 motion prediction */
|
||||||
s->mv_type = MV_TYPE_16X16;
|
s->mv_type = MV_TYPE_16X16;
|
||||||
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y);
|
||||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
mx = ff_h263_decode_motion(s, pred_x, ctx->f_code);
|
||||||
|
|
||||||
if (mx >= 0xffff)
|
if (mx >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
my = ff_h263_decode_motion(s, pred_y, ctx->f_code);
|
||||||
|
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
@ -1825,11 +1825,11 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
s->mv_type = MV_TYPE_8X8;
|
s->mv_type = MV_TYPE_8X8;
|
||||||
for (i = 0; i < 4; i++) {
|
for (i = 0; i < 4; i++) {
|
||||||
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
int16_t *mot_val = ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y);
|
||||||
mx = ff_h263_decode_motion(s, pred_x, s->f_code);
|
mx = ff_h263_decode_motion(s, pred_x, ctx->f_code);
|
||||||
if (mx >= 0xffff)
|
if (mx >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
my = ff_h263_decode_motion(s, pred_y, s->f_code);
|
my = ff_h263_decode_motion(s, pred_y, ctx->f_code);
|
||||||
if (my >= 0xffff)
|
if (my >= 0xffff)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
s->mv[0][i][0] = mx;
|
s->mv[0][i][0] = mx;
|
||||||
@ -1927,8 +1927,8 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
if (HAS_FORWARD_MV(mb_type)) {
|
if (HAS_FORWARD_MV(mb_type)) {
|
||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
|
|
||||||
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], s->f_code);
|
mx = ff_h263_decode_motion(s, s->last_mv[0][0][0], ctx->f_code);
|
||||||
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], s->f_code);
|
my = ff_h263_decode_motion(s, s->last_mv[0][0][1], ctx->f_code);
|
||||||
s->last_mv[0][1][0] =
|
s->last_mv[0][1][0] =
|
||||||
s->last_mv[0][0][0] =
|
s->last_mv[0][0][0] =
|
||||||
s->mv[0][0][0] = mx;
|
s->mv[0][0][0] = mx;
|
||||||
@ -1940,8 +1940,8 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
if (HAS_BACKWARD_MV(mb_type)) {
|
if (HAS_BACKWARD_MV(mb_type)) {
|
||||||
s->mv_dir |= MV_DIR_BACKWARD;
|
s->mv_dir |= MV_DIR_BACKWARD;
|
||||||
|
|
||||||
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], s->b_code);
|
mx = ff_h263_decode_motion(s, s->last_mv[1][0][0], ctx->b_code);
|
||||||
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], s->b_code);
|
my = ff_h263_decode_motion(s, s->last_mv[1][0][1], ctx->b_code);
|
||||||
s->last_mv[1][1][0] =
|
s->last_mv[1][1][0] =
|
||||||
s->last_mv[1][0][0] =
|
s->last_mv[1][0][0] =
|
||||||
s->mv[1][0][0] = mx;
|
s->mv[1][0][0] = mx;
|
||||||
@ -1956,8 +1956,8 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
s->mv_dir = MV_DIR_FORWARD;
|
s->mv_dir = MV_DIR_FORWARD;
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], s->f_code);
|
mx = ff_h263_decode_motion(s, s->last_mv[0][i][0], ctx->f_code);
|
||||||
my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, s->f_code);
|
my = ff_h263_decode_motion(s, s->last_mv[0][i][1] / 2, ctx->f_code);
|
||||||
s->last_mv[0][i][0] =
|
s->last_mv[0][i][0] =
|
||||||
s->mv[0][i][0] = mx;
|
s->mv[0][i][0] = mx;
|
||||||
s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2;
|
s->last_mv[0][i][1] = (s->mv[0][i][1] = my) * 2;
|
||||||
@ -1968,8 +1968,8 @@ static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
|
|||||||
s->mv_dir |= MV_DIR_BACKWARD;
|
s->mv_dir |= MV_DIR_BACKWARD;
|
||||||
|
|
||||||
for (i = 0; i < 2; i++) {
|
for (i = 0; i < 2; i++) {
|
||||||
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], s->b_code);
|
mx = ff_h263_decode_motion(s, s->last_mv[1][i][0], ctx->b_code);
|
||||||
my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, s->b_code);
|
my = ff_h263_decode_motion(s, s->last_mv[1][i][1] / 2, ctx->b_code);
|
||||||
s->last_mv[1][i][0] =
|
s->last_mv[1][i][0] =
|
||||||
s->mv[1][i][0] = mx;
|
s->mv[1][i][0] = mx;
|
||||||
s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2;
|
s->last_mv[1][i][1] = (s->mv[1][i][1] = my) * 2;
|
||||||
@ -3355,8 +3355,8 @@ static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s->f_code = 1;
|
ctx->f_code = 1;
|
||||||
s->b_code = 1;
|
ctx->b_code = 1;
|
||||||
if (ctx->shape != BIN_ONLY_SHAPE) {
|
if (ctx->shape != BIN_ONLY_SHAPE) {
|
||||||
s->chroma_qscale = s->qscale = get_bits(gb, ctx->quant_precision);
|
s->chroma_qscale = s->qscale = get_bits(gb, ctx->quant_precision);
|
||||||
if (s->qscale == 0) {
|
if (s->qscale == 0) {
|
||||||
@ -3366,21 +3366,21 @@ static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s->pict_type != AV_PICTURE_TYPE_I) {
|
if (s->pict_type != AV_PICTURE_TYPE_I) {
|
||||||
s->f_code = get_bits(gb, 3); /* fcode_for */
|
ctx->f_code = get_bits(gb, 3); /* fcode_for */
|
||||||
if (s->f_code == 0) {
|
if (ctx->f_code == 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"Error, header damaged or not MPEG-4 header (f_code=0)\n");
|
"Error, header damaged or not MPEG-4 header (f_code=0)\n");
|
||||||
s->f_code = 1;
|
ctx->f_code = 1;
|
||||||
return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
|
return AVERROR_INVALIDDATA; // makes no sense to continue, as there is nothing left from the image then
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
if (s->pict_type == AV_PICTURE_TYPE_B) {
|
||||||
s->b_code = get_bits(gb, 3);
|
ctx->b_code = get_bits(gb, 3);
|
||||||
if (s->b_code == 0) {
|
if (ctx->b_code == 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
"Error, header damaged or not MPEG4 header (b_code=0)\n");
|
"Error, header damaged or not MPEG4 header (b_code=0)\n");
|
||||||
s->b_code=1;
|
ctx->b_code=1;
|
||||||
return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly
|
return AVERROR_INVALIDDATA; // makes no sense to continue, as the MV decoding will break very quickly
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3388,7 +3388,7 @@ static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
|
|||||||
if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
|
if (s->avctx->debug & FF_DEBUG_PICT_INFO) {
|
||||||
av_log(s->avctx, AV_LOG_DEBUG,
|
av_log(s->avctx, AV_LOG_DEBUG,
|
||||||
"qp:%d fc:%d,%d %c size:%d pro:%d alt:%d top:%d %cpel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n",
|
"qp:%d fc:%d,%d %c size:%d pro:%d alt:%d top:%d %cpel part:%d resync:%d w:%d a:%d rnd:%d vot:%d%s dc:%d ce:%d/%d/%d time:%"PRId64" tincr:%d\n",
|
||||||
s->qscale, s->f_code, s->b_code,
|
s->qscale, ctx->f_code, ctx->b_code,
|
||||||
s->pict_type == AV_PICTURE_TYPE_I ? 'I' : (s->pict_type == AV_PICTURE_TYPE_P ? 'P' : (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
|
s->pict_type == AV_PICTURE_TYPE_I ? 'I' : (s->pict_type == AV_PICTURE_TYPE_P ? 'P' : (s->pict_type == AV_PICTURE_TYPE_B ? 'B' : 'S')),
|
||||||
gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
|
gb->size_in_bits,s->progressive_sequence, s->alternate_scan,
|
||||||
s->top_field_first, s->quarter_sample ? 'q' : 'h',
|
s->top_field_first, s->quarter_sample ? 'q' : 'h',
|
||||||
|
@ -34,6 +34,9 @@
|
|||||||
typedef struct Mpeg4DecContext {
|
typedef struct Mpeg4DecContext {
|
||||||
MpegEncContext m;
|
MpegEncContext m;
|
||||||
|
|
||||||
|
int f_code; ///< forward MV resolution
|
||||||
|
int b_code; ///< backward MV resolution for B-frames
|
||||||
|
|
||||||
/// number of bits to represent the fractional part of time
|
/// number of bits to represent the fractional part of time
|
||||||
int time_increment_bits;
|
int time_increment_bits;
|
||||||
int shape;
|
int shape;
|
||||||
|
@ -589,7 +589,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
|
s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
|
||||||
s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
|
s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.last_mv[0][0][0] =
|
s->c.last_mv[0][0][0] =
|
||||||
s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
|
s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
|
||||||
s->c.last_mv[0][0][1] =
|
s->c.last_mv[0][0][1] =
|
||||||
@ -599,7 +599,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
|
s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
|
||||||
s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
|
s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
s->c.last_mv[1][0][0] =
|
s->c.last_mv[1][0][0] =
|
||||||
s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
|
s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
|
||||||
s->c.last_mv[1][0][1] =
|
s->c.last_mv[1][0][1] =
|
||||||
@ -619,7 +619,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
|
||||||
s->c.mv[0][i][1] - s->c.last_mv[0][i][1] / 2,
|
s->c.mv[0][i][1] - s->c.last_mv[0][i][1] / 2,
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
|
||||||
s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
|
s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
|
||||||
}
|
}
|
||||||
@ -629,7 +629,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
|
s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
|
||||||
s->c.mv[1][i][1] - s->c.last_mv[1][i][1] / 2,
|
s->c.mv[1][i][1] - s->c.last_mv[1][i][1] / 2,
|
||||||
s->c.b_code);
|
s->b_code);
|
||||||
s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
|
s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
|
||||||
s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
|
s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
|
||||||
}
|
}
|
||||||
@ -741,7 +741,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
motion_x - pred_x,
|
motion_x - pred_x,
|
||||||
motion_y - pred_y,
|
motion_y - pred_y,
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
} else if (s->c.mv_type == MV_TYPE_FIELD) {
|
} else if (s->c.mv_type == MV_TYPE_FIELD) {
|
||||||
if (s->dquant)
|
if (s->dquant)
|
||||||
cbpc += 8;
|
cbpc += 8;
|
||||||
@ -771,11 +771,11 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[0][0][0] - pred_x,
|
s->c.mv[0][0][0] - pred_x,
|
||||||
s->c.mv[0][0][1] - pred_y,
|
s->c.mv[0][0][1] - pred_y,
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.mv[0][1][0] - pred_x,
|
s->c.mv[0][1][0] - pred_x,
|
||||||
s->c.mv[0][1][1] - pred_y,
|
s->c.mv[0][1][1] - pred_y,
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
} else {
|
} else {
|
||||||
av_assert2(s->c.mv_type == MV_TYPE_8X8);
|
av_assert2(s->c.mv_type == MV_TYPE_8X8);
|
||||||
put_bits(&s->pb,
|
put_bits(&s->pb,
|
||||||
@ -796,7 +796,7 @@ static void mpeg4_encode_mb(MPVEncContext *const s, int16_t block[][64],
|
|||||||
ff_h263_encode_motion_vector(s,
|
ff_h263_encode_motion_vector(s,
|
||||||
s->c.cur_pic.motion_val[0][s->c.block_index[i]][0] - pred_x,
|
s->c.cur_pic.motion_val[0][s->c.block_index[i]][0] - pred_x,
|
||||||
s->c.cur_pic.motion_val[0][s->c.block_index[i]][1] - pred_y,
|
s->c.cur_pic.motion_val[0][s->c.block_index[i]][1] - pred_y,
|
||||||
s->c.f_code);
|
s->f_code);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1116,9 +1116,9 @@ static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
|
|||||||
put_bits(&s->pb, 5, s->c.qscale);
|
put_bits(&s->pb, 5, s->c.qscale);
|
||||||
|
|
||||||
if (s->c.pict_type != AV_PICTURE_TYPE_I)
|
if (s->c.pict_type != AV_PICTURE_TYPE_I)
|
||||||
put_bits(&s->pb, 3, s->c.f_code); /* fcode_for */
|
put_bits(&s->pb, 3, s->f_code); /* fcode_for */
|
||||||
if (s->c.pict_type == AV_PICTURE_TYPE_B)
|
if (s->c.pict_type == AV_PICTURE_TYPE_B)
|
||||||
put_bits(&s->pb, 3, s->c.b_code); /* fcode_back */
|
put_bits(&s->pb, 3, s->b_code); /* fcode_back */
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -1393,7 +1393,7 @@ void ff_mpeg4_encode_video_packet_header(MPVEncContext *const s)
|
|||||||
{
|
{
|
||||||
int mb_num_bits = av_log2(s->c.mb_num - 1) + 1;
|
int mb_num_bits = av_log2(s->c.mb_num - 1) + 1;
|
||||||
|
|
||||||
put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s->c.pict_type, s->c.f_code, s->c.b_code), 0);
|
put_bits(&s->pb, ff_mpeg4_get_video_packet_prefix_length(s->c.pict_type, s->f_code, s->b_code), 0);
|
||||||
put_bits(&s->pb, 1, 1);
|
put_bits(&s->pb, 1, 1);
|
||||||
|
|
||||||
put_bits(&s->pb, mb_num_bits, s->c.mb_x + s->c.mb_y * s->c.mb_width);
|
put_bits(&s->pb, mb_num_bits, s->c.mb_x + s->c.mb_y * s->c.mb_width);
|
||||||
|
@ -179,8 +179,6 @@ typedef struct MpegEncContext {
|
|||||||
QpelDSPContext qdsp;
|
QpelDSPContext qdsp;
|
||||||
VideoDSPContext vdsp;
|
VideoDSPContext vdsp;
|
||||||
H263DSPContext h263dsp;
|
H263DSPContext h263dsp;
|
||||||
int f_code; ///< forward MV resolution
|
|
||||||
int b_code; ///< backward MV resolution for B-frames (MPEG-4)
|
|
||||||
int16_t (*p_field_mv_table_base)[2];
|
int16_t (*p_field_mv_table_base)[2];
|
||||||
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding
|
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding
|
||||||
|
|
||||||
|
@ -258,8 +258,8 @@ static void update_duplicate_context_after_me(MPVEncContext *const dst,
|
|||||||
{
|
{
|
||||||
#define COPY(a) dst->a = src->a
|
#define COPY(a) dst->a = src->a
|
||||||
COPY(c.pict_type);
|
COPY(c.pict_type);
|
||||||
COPY(c.f_code);
|
COPY(f_code);
|
||||||
COPY(c.b_code);
|
COPY(b_code);
|
||||||
COPY(c.qscale);
|
COPY(c.qscale);
|
||||||
COPY(lambda);
|
COPY(lambda);
|
||||||
COPY(lambda2);
|
COPY(lambda2);
|
||||||
@ -285,8 +285,8 @@ static av_cold void mpv_encode_defaults(MPVMainEncContext *const m)
|
|||||||
|
|
||||||
ff_mpv_common_defaults(&s->c);
|
ff_mpv_common_defaults(&s->c);
|
||||||
|
|
||||||
s->c.f_code = 1;
|
s->f_code = 1;
|
||||||
s->c.b_code = 1;
|
s->b_code = 1;
|
||||||
|
|
||||||
if (!m->fcode_tab) {
|
if (!m->fcode_tab) {
|
||||||
m->fcode_tab = default_fcode_tab + MAX_MV;
|
m->fcode_tab = default_fcode_tab + MAX_MV;
|
||||||
@ -3794,23 +3794,23 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
|
|||||||
|
|
||||||
if (!s->c.umvplus) {
|
if (!s->c.umvplus) {
|
||||||
if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
|
if (s->c.pict_type == AV_PICTURE_TYPE_P || s->c.pict_type == AV_PICTURE_TYPE_S) {
|
||||||
s->c.f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
|
s->f_code = ff_get_best_fcode(m, s->p_mv_table, CANDIDATE_MB_TYPE_INTER);
|
||||||
|
|
||||||
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
||||||
int a,b;
|
int a,b;
|
||||||
a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
|
a = ff_get_best_fcode(m, s->c.p_field_mv_table[0][0], CANDIDATE_MB_TYPE_INTER_I); //FIXME field_select
|
||||||
b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
|
b = ff_get_best_fcode(m, s->c.p_field_mv_table[1][1], CANDIDATE_MB_TYPE_INTER_I);
|
||||||
s->c.f_code = FFMAX3(s->c.f_code, a, b);
|
s->f_code = FFMAX3(s->f_code, a, b);
|
||||||
}
|
}
|
||||||
|
|
||||||
ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
|
ff_fix_long_p_mvs(s, s->intra_penalty ? CANDIDATE_MB_TYPE_INTER : CANDIDATE_MB_TYPE_INTRA);
|
||||||
ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
|
ff_fix_long_mvs(s, NULL, 0, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, !!s->intra_penalty);
|
||||||
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
||||||
int j;
|
int j;
|
||||||
for(i=0; i<2; i++){
|
for(i=0; i<2; i++){
|
||||||
for(j=0; j<2; j++)
|
for(j=0; j<2; j++)
|
||||||
ff_fix_long_mvs(s, s->p_field_select_table[i], j,
|
ff_fix_long_mvs(s, s->p_field_select_table[i], j,
|
||||||
s->c.p_field_mv_table[i][j], s->c.f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
|
s->c.p_field_mv_table[i][j], s->f_code, CANDIDATE_MB_TYPE_INTER_I, !!s->intra_penalty);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
} else if (s->c.pict_type == AV_PICTURE_TYPE_B) {
|
||||||
@ -3818,16 +3818,16 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
|
|||||||
|
|
||||||
a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
|
a = ff_get_best_fcode(m, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD);
|
||||||
b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
|
b = ff_get_best_fcode(m, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR);
|
||||||
s->c.f_code = FFMAX(a, b);
|
s->f_code = FFMAX(a, b);
|
||||||
|
|
||||||
a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
|
a = ff_get_best_fcode(m, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD);
|
||||||
b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
|
b = ff_get_best_fcode(m, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR);
|
||||||
s->c.b_code = FFMAX(a, b);
|
s->b_code = FFMAX(a, b);
|
||||||
|
|
||||||
ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
|
ff_fix_long_mvs(s, NULL, 0, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1);
|
||||||
ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->c.b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
|
ff_fix_long_mvs(s, NULL, 0, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1);
|
||||||
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->c.f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
|
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1);
|
||||||
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->c.b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
|
ff_fix_long_mvs(s, NULL, 0, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1);
|
||||||
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
if (s->c.avctx->flags & AV_CODEC_FLAG_INTERLACED_ME) {
|
||||||
int dir, j;
|
int dir, j;
|
||||||
for(dir=0; dir<2; dir++){
|
for(dir=0; dir<2; dir++){
|
||||||
@ -3836,7 +3836,7 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
|
|||||||
int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
|
int type= dir ? (CANDIDATE_MB_TYPE_BACKWARD_I|CANDIDATE_MB_TYPE_BIDIR_I)
|
||||||
: (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
|
: (CANDIDATE_MB_TYPE_FORWARD_I |CANDIDATE_MB_TYPE_BIDIR_I);
|
||||||
ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
|
ff_fix_long_mvs(s, s->b_field_select_table[dir][i], j,
|
||||||
s->b_field_mv_table[dir][i][j], dir ? s->c.b_code : s->c.f_code, type, 1);
|
s->b_field_mv_table[dir][i][j], dir ? s->b_code : s->f_code, type, 1);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -74,6 +74,9 @@ typedef struct MPVEncContext {
|
|||||||
PixblockDSPContext pdsp;
|
PixblockDSPContext pdsp;
|
||||||
MotionEstContext me;
|
MotionEstContext me;
|
||||||
|
|
||||||
|
int f_code; ///< forward MV resolution
|
||||||
|
int b_code; ///< backward MV resolution for B-frames
|
||||||
|
|
||||||
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame
|
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame
|
||||||
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame
|
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame
|
||||||
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame
|
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame
|
||||||
|
@ -349,7 +349,7 @@ static void msmpeg4v2_encode_motion(MPVEncContext *const s, int val)
|
|||||||
/* zero vector; corresponds to ff_mvtab[0] */
|
/* zero vector; corresponds to ff_mvtab[0] */
|
||||||
put_bits(&s->pb, 1, 0x1);
|
put_bits(&s->pb, 1, 0x1);
|
||||||
} else {
|
} else {
|
||||||
bit_size = s->c.f_code - 1;
|
bit_size = s->f_code - 1;
|
||||||
range = 1 << bit_size;
|
range = 1 << bit_size;
|
||||||
if (val <= -64)
|
if (val <= -64)
|
||||||
val += 64;
|
val += 64;
|
||||||
|
@ -80,8 +80,8 @@ static int nvdec_mpeg4_start_frame(AVCodecContext *avctx,
|
|||||||
.vop_rounding_type = s->no_rounding,
|
.vop_rounding_type = s->no_rounding,
|
||||||
.alternate_vertical_scan_flag = s->alternate_scan,
|
.alternate_vertical_scan_flag = s->alternate_scan,
|
||||||
.interlaced = !s->progressive_sequence,
|
.interlaced = !s->progressive_sequence,
|
||||||
.vop_fcode_forward = s->f_code,
|
.vop_fcode_forward = m->f_code,
|
||||||
.vop_fcode_backward = s->b_code,
|
.vop_fcode_backward = m->b_code,
|
||||||
.trd = { s->pp_time, s->pp_field_time >> 1 },
|
.trd = { s->pp_time, s->pp_field_time >> 1 },
|
||||||
.trb = { s->pb_time, s->pb_field_time >> 1 },
|
.trb = { s->pb_time, s->pb_field_time >> 1 },
|
||||||
|
|
||||||
|
@ -49,8 +49,8 @@ void ff_write_pass1_stats(MPVMainEncContext *const m)
|
|||||||
s->p_tex_bits,
|
s->p_tex_bits,
|
||||||
s->mv_bits,
|
s->mv_bits,
|
||||||
s->misc_bits,
|
s->misc_bits,
|
||||||
s->c.f_code,
|
s->f_code,
|
||||||
s->c.b_code,
|
s->b_code,
|
||||||
m->mc_mb_var_sum,
|
m->mc_mb_var_sum,
|
||||||
m->mb_var_sum,
|
m->mb_var_sum,
|
||||||
s->i_count,
|
s->i_count,
|
||||||
@ -903,8 +903,8 @@ void ff_get_2pass_fcode(MPVMainEncContext *const m)
|
|||||||
const RateControlContext *rcc = &m->rc_context;
|
const RateControlContext *rcc = &m->rc_context;
|
||||||
const RateControlEntry *rce = &rcc->entry[s->c.picture_number];
|
const RateControlEntry *rce = &rcc->entry[s->c.picture_number];
|
||||||
|
|
||||||
s->c.f_code = rce->f_code;
|
s->f_code = rce->f_code;
|
||||||
s->c.b_code = rce->b_code;
|
s->b_code = rce->b_code;
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME rd or at least approx for dquant
|
// FIXME rd or at least approx for dquant
|
||||||
@ -997,8 +997,8 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
|
|||||||
rce->mc_mb_var_sum = m->mc_mb_var_sum;
|
rce->mc_mb_var_sum = m->mc_mb_var_sum;
|
||||||
rce->mb_var_sum = m->mb_var_sum;
|
rce->mb_var_sum = m->mb_var_sum;
|
||||||
rce->qscale = FF_QP2LAMBDA * 2;
|
rce->qscale = FF_QP2LAMBDA * 2;
|
||||||
rce->f_code = s->c.f_code;
|
rce->f_code = s->f_code;
|
||||||
rce->b_code = s->c.b_code;
|
rce->b_code = s->b_code;
|
||||||
rce->misc_bits = 1;
|
rce->misc_bits = 1;
|
||||||
|
|
||||||
bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
|
bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
|
||||||
|
@ -48,7 +48,7 @@ int ff_rv20_encode_picture_header(MPVMainEncContext *const m)
|
|||||||
|
|
||||||
put_bits(&s->pb, 1, s->c.no_rounding);
|
put_bits(&s->pb, 1, s->c.no_rounding);
|
||||||
|
|
||||||
av_assert0(s->c.f_code == 1);
|
av_assert0(s->f_code == 1);
|
||||||
av_assert0(!s->c.unrestricted_mv);
|
av_assert0(!s->c.unrestricted_mv);
|
||||||
av_assert0(!s->c.alt_inter_vlc);
|
av_assert0(!s->c.alt_inter_vlc);
|
||||||
av_assert0(!s->c.umvplus);
|
av_assert0(!s->c.umvplus);
|
||||||
|
@ -445,7 +445,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
|
|||||||
c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
|
c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
|
||||||
c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
|
c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
|
||||||
c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
|
c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
|
||||||
c->current_mv_penalty = c->mv_penalty[enc->m.s.c.f_code=1] + MAX_DMV;
|
c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV;
|
||||||
|
|
||||||
c->xmin = - x*block_w - 16+3;
|
c->xmin = - x*block_w - 16+3;
|
||||||
c->ymin = - y*block_w - 16+3;
|
c->ymin = - y*block_w - 16+3;
|
||||||
@ -1863,7 +1863,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
mpv->c.mb_height = block_height;
|
mpv->c.mb_height = block_height;
|
||||||
mpv->c.mb_stride = mpv->c.mb_width + 1;
|
mpv->c.mb_stride = mpv->c.mb_width + 1;
|
||||||
mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
|
mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
|
||||||
mpv->c.f_code = 1;
|
mpv->f_code = 1;
|
||||||
mpv->c.pict_type = pic->pict_type;
|
mpv->c.pict_type = pic->pict_type;
|
||||||
mpv->me.motion_est = enc->motion_est;
|
mpv->me.motion_est = enc->motion_est;
|
||||||
mpv->me.dia_size = avctx->dia_size;
|
mpv->me.dia_size = avctx->dia_size;
|
||||||
|
@ -337,7 +337,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
|||||||
s2->mb_height = block_height;
|
s2->mb_height = block_height;
|
||||||
s2->mb_stride = s2->mb_width + 1;
|
s2->mb_stride = s2->mb_width + 1;
|
||||||
s2->b8_stride = 2 * s2->mb_width + 1;
|
s2->b8_stride = 2 * s2->mb_width + 1;
|
||||||
s2->f_code = 1;
|
s->m.f_code = 1;
|
||||||
s2->pict_type = s->pict_type;
|
s2->pict_type = s->pict_type;
|
||||||
s->m.me.scene_change_score = 0;
|
s->m.me.scene_change_score = 0;
|
||||||
// s2->out_format = FMT_H263;
|
// s2->out_format = FMT_H263;
|
||||||
@ -400,7 +400,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
|||||||
}
|
}
|
||||||
|
|
||||||
ff_fix_long_p_mvs(&s->m, CANDIDATE_MB_TYPE_INTRA);
|
ff_fix_long_p_mvs(&s->m, CANDIDATE_MB_TYPE_INTRA);
|
||||||
ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s2->f_code,
|
ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code,
|
||||||
CANDIDATE_MB_TYPE_INTER, 0);
|
CANDIDATE_MB_TYPE_INTER, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,8 +87,8 @@ static int vaapi_mpeg4_start_frame(AVCodecContext *avctx,
|
|||||||
.top_field_first = s->top_field_first,
|
.top_field_first = s->top_field_first,
|
||||||
.alternate_vertical_scan_flag = s->alternate_scan,
|
.alternate_vertical_scan_flag = s->alternate_scan,
|
||||||
},
|
},
|
||||||
.vop_fcode_forward = s->f_code,
|
.vop_fcode_forward = ctx->f_code,
|
||||||
.vop_fcode_backward = s->b_code,
|
.vop_fcode_backward = ctx->b_code,
|
||||||
.vop_time_increment_resolution = avctx->framerate.num,
|
.vop_time_increment_resolution = avctx->framerate.num,
|
||||||
.num_macroblocks_in_gob = s->mb_width * H263_GOB_HEIGHT(s->height),
|
.num_macroblocks_in_gob = s->mb_width * H263_GOB_HEIGHT(s->height),
|
||||||
.num_gobs_in_vop =
|
.num_gobs_in_vop =
|
||||||
|
@ -64,8 +64,8 @@ static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
|
|||||||
info->trd[1] = s->pp_field_time >> 1;
|
info->trd[1] = s->pp_field_time >> 1;
|
||||||
info->trb[1] = s->pb_field_time >> 1;
|
info->trb[1] = s->pb_field_time >> 1;
|
||||||
info->vop_time_increment_resolution = s->avctx->framerate.num;
|
info->vop_time_increment_resolution = s->avctx->framerate.num;
|
||||||
info->vop_fcode_forward = s->f_code;
|
info->vop_fcode_forward = ctx->f_code;
|
||||||
info->vop_fcode_backward = s->b_code;
|
info->vop_fcode_backward = ctx->b_code;
|
||||||
info->resync_marker_disable = !ctx->resync_marker;
|
info->resync_marker_disable = !ctx->resync_marker;
|
||||||
info->interlaced = !s->progressive_sequence;
|
info->interlaced = !s->progressive_sequence;
|
||||||
info->quant_type = s->mpeg_quant;
|
info->quant_type = s->mpeg_quant;
|
||||||
|
Reference in New Issue
Block a user