1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-10 06:10:52 +02:00

avcodec/mpegvideo_enc: Move lambda, lambda2 to MPVEncContext

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt
2025-03-19 12:50:06 +01:00
parent 6ebc810e6f
commit d66e9cb0d2
8 changed files with 44 additions and 42 deletions

View File

@@ -52,7 +52,7 @@ static inline int get_p_cbp(MPVEncContext *const s,
int best_cbpc_score = INT_MAX; int best_cbpc_score = INT_MAX;
int cbpc = (-1), cbpy = (-1); int cbpc = (-1), cbpy = (-1);
const int offset = (s->c.mv_type == MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0); const int offset = (s->c.mv_type == MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0);
const int lambda = s->c.lambda2 >> (FF_LAMBDA_SHIFT - 6); const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
int score = ff_h263_inter_MCBPC_bits[i + offset] * lambda; int score = ff_h263_inter_MCBPC_bits[i + offset] * lambda;

View File

@@ -902,9 +902,9 @@ void ff_estimate_p_frame_motion(MPVEncContext *const s,
av_assert0(s->c.linesize == c->stride); av_assert0(s->c.linesize == c->stride);
av_assert0(s->c.uvlinesize == c->uvstride); av_assert0(s->c.uvlinesize == c->uvstride);
c->penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->me_cmp); c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->me_sub_cmp); c->sub_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->mb_cmp); c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV; c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV;
get_limits(s, 16*mb_x, 16*mb_y, 0); get_limits(s, 16*mb_x, 16*mb_y, 0);
@@ -968,14 +968,14 @@ void ff_estimate_p_frame_motion(MPVEncContext *const s,
c->mc_mb_var_sum_temp += (vard+128)>>8; c->mc_mb_var_sum_temp += (vard+128)>>8;
if (c->avctx->mb_decision > FF_MB_DECISION_SIMPLE) { if (c->avctx->mb_decision > FF_MB_DECISION_SIMPLE) {
int p_score = FFMIN(vard, varc - 500 + (s->c.lambda2 >> FF_LAMBDA_SHIFT)*100); int p_score = FFMIN(vard, varc - 500 + (s->lambda2 >> FF_LAMBDA_SHIFT)*100);
int i_score = varc - 500 + (s->c.lambda2 >> FF_LAMBDA_SHIFT)*20; int i_score = varc - 500 + (s->lambda2 >> FF_LAMBDA_SHIFT)*20;
c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score); c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
if (vard*2 + 200*256 > varc && !s->intra_penalty) if (vard*2 + 200*256 > varc && !s->intra_penalty)
mb_type|= CANDIDATE_MB_TYPE_INTRA; mb_type|= CANDIDATE_MB_TYPE_INTRA;
if (varc*2 + 200*256 > vard || s->c.qscale > 24){ if (varc*2 + 200*256 > vard || s->c.qscale > 24){
// if (varc*2 + 200*256 + 50*(s->c.lambda2>>FF_LAMBDA_SHIFT) > vard){ // if (varc*2 + 200*256 + 50*(s->lambda2>>FF_LAMBDA_SHIFT) > vard){
mb_type|= CANDIDATE_MB_TYPE_INTER; mb_type|= CANDIDATE_MB_TYPE_INTER;
c->sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16); c->sub_motion_search(s, &mx, &my, dmin, 0, 0, 0, 16);
if (s->mpv_flags & FF_MPV_FLAG_MV0) if (s->mpv_flags & FF_MPV_FLAG_MV0)
@@ -1050,8 +1050,8 @@ void ff_estimate_p_frame_motion(MPVEncContext *const s,
s->c.cur_pic.mb_type[mb_y*s->c.mb_stride + mb_x] = 0; s->c.cur_pic.mb_type[mb_y*s->c.mb_stride + mb_x] = 0;
{ {
int p_score = FFMIN(vard, varc-500+(s->c.lambda2>>FF_LAMBDA_SHIFT)*100); int p_score = FFMIN(vard, varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*100);
int i_score = varc-500+(s->c.lambda2>>FF_LAMBDA_SHIFT)*20; int i_score = varc-500+(s->lambda2>>FF_LAMBDA_SHIFT)*20;
c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score); c->scene_change_score+= ff_sqrt(p_score) - ff_sqrt(i_score);
} }
} }
@@ -1071,7 +1071,7 @@ int ff_pre_estimate_p_frame_motion(MPVEncContext *const s,
av_assert0(s->c.quarter_sample == 0 || s->c.quarter_sample == 1); av_assert0(s->c.quarter_sample == 0 || s->c.quarter_sample == 1);
c->pre_penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->me_pre_cmp); c->pre_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_pre_cmp);
c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV; c->current_mv_penalty = c->mv_penalty[s->c.f_code] + MAX_DMV;
get_limits(s, 16*mb_x, 16*mb_y, 0); get_limits(s, 16*mb_x, 16*mb_y, 0);
@@ -1510,9 +1510,9 @@ void ff_estimate_b_frame_motion(MPVEncContext *const s,
return; return;
} }
c->penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->me_cmp); c->penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->me_sub_cmp); c->sub_penalty_factor= get_penalty_factor(s->lambda, s->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(s->c.lambda, s->c.lambda2, c->avctx->mb_cmp); c->mb_penalty_factor = get_penalty_factor(s->lambda, s->lambda2, c->avctx->mb_cmp);
if (s->c.codec_id == AV_CODEC_ID_MPEG4) if (s->c.codec_id == AV_CODEC_ID_MPEG4)
dmin= direct_search(s, mb_x, mb_y); dmin= direct_search(s, mb_x, mb_y);

View File

@@ -461,7 +461,7 @@ static inline int get_b_cbp(MPVEncContext *const s, int16_t block[6][64],
if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) { if (s->mpv_flags & FF_MPV_FLAG_CBP_RD) {
int score = 0; int score = 0;
const int lambda = s->c.lambda2 >> (FF_LAMBDA_SHIFT - 6); const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
if (s->coded_score[i] < 0) { if (s->coded_score[i] < 0) {

View File

@@ -166,8 +166,6 @@ typedef struct MpegEncContext {
int qscale; ///< QP int qscale; ///< QP
int chroma_qscale; ///< chroma QP int chroma_qscale; ///< chroma QP
unsigned int lambda; ///< Lagrange multiplier used in rate distortion
unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ... int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int droppable; int droppable;

View File

@@ -204,7 +204,7 @@ static inline void update_qscale(MPVMainEncContext *const m)
int best = 1; int best = 1;
for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) { for (i = 0 ; i<FF_ARRAY_ELEMS(ff_mpeg2_non_linear_qscale); i++) {
int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->c.lambda * 139); int diff = FFABS((ff_mpeg2_non_linear_qscale[i]<<(FF_LAMBDA_SHIFT + 6)) - (int)s->lambda * 139);
if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin || if (ff_mpeg2_non_linear_qscale[i] < s->c.avctx->qmin ||
(ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax)) (ff_mpeg2_non_linear_qscale[i] > s->c.avctx->qmax && !m->vbv_ignore_qmax))
continue; continue;
@@ -215,12 +215,12 @@ static inline void update_qscale(MPVMainEncContext *const m)
} }
s->c.qscale = best; s->c.qscale = best;
} else { } else {
s->c.qscale = (s->c.lambda * 139 + FF_LAMBDA_SCALE * 64) >> s->c.qscale = (s->lambda * 139 + FF_LAMBDA_SCALE * 64) >>
(FF_LAMBDA_SHIFT + 7); (FF_LAMBDA_SHIFT + 7);
s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax); s->c.qscale = av_clip(s->c.qscale, s->c.avctx->qmin, m->vbv_ignore_qmax ? 31 : s->c.avctx->qmax);
} }
s->c.lambda2 = (s->c.lambda * s->c.lambda + FF_LAMBDA_SCALE / 2) >> s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
FF_LAMBDA_SHIFT; FF_LAMBDA_SHIFT;
} }
@@ -260,8 +260,8 @@ static void update_duplicate_context_after_me(MPVEncContext *const dst,
COPY(c.f_code); COPY(c.f_code);
COPY(c.b_code); COPY(c.b_code);
COPY(c.qscale); COPY(c.qscale);
COPY(c.lambda); COPY(lambda);
COPY(c.lambda2); COPY(lambda2);
COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header COPY(c.frame_pred_frame_dct); // FIXME don't set in encode_header
COPY(c.progressive_frame); // FIXME don't set in encode_header COPY(c.progressive_frame); // FIXME don't set in encode_header
COPY(c.partitioned_frame); // FIXME don't set in encode_header COPY(c.partitioned_frame); // FIXME don't set in encode_header
@@ -1474,7 +1474,7 @@ static int skip_check(MPVMainEncContext *const m,
if (score64 < m->frame_skip_threshold) if (score64 < m->frame_skip_threshold)
return 1; return 1;
if (score64 < ((m->frame_skip_factor * (int64_t) s->c.lambda) >> 8)) if (score64 < ((m->frame_skip_factor * (int64_t) s->lambda) >> 8))
return 1; return 1;
return 0; return 0;
} }
@@ -1989,8 +1989,8 @@ vbv_retry:
int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139; int min_step = hq ? 1 : (1<<(FF_LAMBDA_SHIFT + 7))/139;
if (put_bits_count(&s->pb) > max_size && if (put_bits_count(&s->pb) > max_size &&
s->c.lambda < m->lmax) { s->lambda < m->lmax) {
m->next_lambda = FFMAX(s->c.lambda + min_step, s->c.lambda * m->next_lambda = FFMAX(s->lambda + min_step, s->lambda *
(s->c.qscale + 1) / s->c.qscale); (s->c.qscale + 1) / s->c.qscale);
if (s->adaptive_quant) { if (s->adaptive_quant) {
for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++) for (int i = 0; i < s->c.mb_height * s->c.mb_stride; i++)
@@ -2295,8 +2295,8 @@ static av_always_inline void encode_mb_internal(MPVEncContext *const s,
const int last_qp = s->c.qscale; const int last_qp = s->c.qscale;
const int mb_xy = mb_x + mb_y * s->c.mb_stride; const int mb_xy = mb_x + mb_y * s->c.mb_stride;
s->c.lambda = s->lambda_table[mb_xy]; s->lambda = s->lambda_table[mb_xy];
s->c.lambda2 = (s->c.lambda * s->c.lambda + FF_LAMBDA_SCALE / 2) >> s->lambda2 = (s->lambda * s->lambda + FF_LAMBDA_SCALE / 2) >>
FF_LAMBDA_SHIFT; FF_LAMBDA_SHIFT;
if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) { if (!(s->mpv_flags & FF_MPV_FLAG_QP_RD)) {
@@ -2721,7 +2721,7 @@ static void encode_mb_hq(MPVEncContext *const s, MPVEncContext *const backup, MP
if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) { if (s->c.avctx->mb_decision == FF_MB_DECISION_RD) {
mpv_reconstruct_mb(s, s->c.block); mpv_reconstruct_mb(s, s->c.block);
score *= s->c.lambda2; score *= s->lambda2;
score += sse_mb(s) << FF_LAMBDA_SHIFT; score += sse_mb(s) << FF_LAMBDA_SHIFT;
} }
@@ -3648,10 +3648,10 @@ static int estimate_qp(MPVMainEncContext *const m, int dry_run)
break; break;
} }
s->c.lambda= s->lambda_table[0]; s->lambda = s->lambda_table[0];
//FIXME broken //FIXME broken
}else }else
s->c.lambda = s->c.cur_pic.ptr->f->quality; s->lambda = s->c.cur_pic.ptr->f->quality;
update_qscale(m); update_qscale(m);
return 0; return 0;
} }
@@ -3692,7 +3692,7 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
s->c.me.scene_change_score=0; s->c.me.scene_change_score=0;
// s->c.lambda= s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion // s->lambda = s->c.cur_pic.ptr->quality; //FIXME qscale / ... stuff for ME rate distortion
if (s->c.pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3; s->c.no_rounding = s->c.msmpeg4_version >= MSMP4_V3;
@@ -3707,9 +3707,9 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
ff_get_2pass_fcode(m); ff_get_2pass_fcode(m);
} else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) { } else if (!(s->c.avctx->flags & AV_CODEC_FLAG_QSCALE)) {
if (s->c.pict_type == AV_PICTURE_TYPE_B) if (s->c.pict_type == AV_PICTURE_TYPE_B)
s->c.lambda = m->last_lambda_for[s->c.pict_type]; s->lambda = m->last_lambda_for[s->c.pict_type];
else else
s->c.lambda = m->last_lambda_for[m->last_non_b_pict_type]; s->lambda = m->last_lambda_for[m->last_non_b_pict_type];
update_qscale(m); update_qscale(m);
} }
@@ -3725,6 +3725,8 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
ret = ff_update_duplicate_context(&slice->c, &s->c); ret = ff_update_duplicate_context(&slice->c, &s->c);
if (ret < 0) if (ret < 0)
return ret; return ret;
slice->lambda = s->lambda;
slice->lambda2 = s->lambda2;
} }
slice->c.me.temp = slice->c.me.scratchpad = slice->c.sc.scratchpad_buf; slice->c.me.temp = slice->c.me.scratchpad = slice->c.sc.scratchpad_buf;
@@ -3737,8 +3739,8 @@ static int encode_picture(MPVMainEncContext *const m, const AVPacket *pkt)
/* Estimate motion for every MB */ /* Estimate motion for every MB */
if (s->c.pict_type != AV_PICTURE_TYPE_I) { if (s->c.pict_type != AV_PICTURE_TYPE_I) {
s->c.lambda = (s->c.lambda * m->me_penalty_compensation + 128) >> 8; s->lambda = (s->lambda * m->me_penalty_compensation + 128) >> 8;
s->c.lambda2 = (s->c.lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8; s->lambda2 = (s->lambda2 * (int64_t) m->me_penalty_compensation + 128) >> 8;
if (s->c.pict_type != AV_PICTURE_TYPE_B) { if (s->c.pict_type != AV_PICTURE_TYPE_B) {
if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) || if ((m->me_pre && m->last_non_b_pict_type == AV_PICTURE_TYPE_I) ||
m->me_pre == 2) { m->me_pre == 2) {
@@ -3970,7 +3972,7 @@ static int dct_quantize_trellis_c(MPVEncContext *const s,
int qmul, qadd, start_i, last_non_zero, i, dc; int qmul, qadd, start_i, last_non_zero, i, dc;
const int esc_length= s->ac_esc_length; const int esc_length= s->ac_esc_length;
const uint8_t *length, *last_length; const uint8_t *length, *last_length;
const int lambda = s->c.lambda2 >> (FF_LAMBDA_SHIFT - 6); const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6);
int mpeg2_qscale; int mpeg2_qscale;
s->fdsp.fdct(block); s->fdsp.fdct(block);
@@ -4360,7 +4362,7 @@ static int dct_quantize_refine(MPVEncContext *const s, //FIXME breaks denoise?
av_assert2(w<(1<<6)); av_assert2(w<(1<<6));
sum += w*w; sum += w*w;
} }
lambda = sum*(uint64_t)s->c.lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6); lambda = sum*(uint64_t)s->lambda2 >> (FF_LAMBDA_SHIFT - 6 + 6 + 6 + 6);
run=0; run=0;
rle_index=0; rle_index=0;

View File

@@ -47,6 +47,8 @@ typedef struct MPVEncContext {
/** bit output */ /** bit output */
PutBitContext pb; PutBitContext pb;
unsigned int lambda; ///< Lagrange multiplier used in rate distortion
unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
int *lambda_table; int *lambda_table;
int adaptive_quant; ///< use adaptive quantization int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale int dquant; ///< qscale difference to prev qscale

View File

@@ -1873,9 +1873,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
mpv->c.out_format = FMT_H263; mpv->c.out_format = FMT_H263;
mpv->c.unrestricted_mv = 1; mpv->c.unrestricted_mv = 1;
mpv->c.lambda = enc->lambda; mpv->lambda = enc->lambda;
mpv->c.qscale = (mpv->c.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); mpv->c.qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
enc->lambda2 = mpv->c.lambda2 = (mpv->c.lambda*mpv->c.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
mpv->c.qdsp = enc->qdsp; //move mpv->c.qdsp = enc->qdsp; //move
mpv->c.hdsp = s->hdsp; mpv->c.hdsp = s->hdsp;

View File

@@ -342,11 +342,11 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
s2->me.scene_change_score = 0; s2->me.scene_change_score = 0;
// s2->out_format = FMT_H263; // s2->out_format = FMT_H263;
// s2->unrestricted_mv = 1; // s2->unrestricted_mv = 1;
s2->lambda = s->quality; s->m.lambda = s->quality;
s2->qscale = s2->lambda * 139 + s2->qscale = s->m.lambda * 139 +
FF_LAMBDA_SCALE * 64 >> FF_LAMBDA_SCALE * 64 >>
FF_LAMBDA_SHIFT + 7; FF_LAMBDA_SHIFT + 7;
s2->lambda2 = s2->lambda * s2->lambda + s->m.lambda2 = s->m.lambda * s->m.lambda +
FF_LAMBDA_SCALE / 2 >> FF_LAMBDA_SCALE / 2 >>
FF_LAMBDA_SHIFT; FF_LAMBDA_SHIFT;