You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-04 22:03:09 +02:00
avcodec/mpegvideo: Move max_b_frames to MPVMainEncContext
Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
@ -650,11 +650,11 @@ static void mpeg4_encode_mb(MpegEncContext *const s, int16_t block[][64],
|
||||
|
||||
if ((cbp | motion_x | motion_y | s->dquant) == 0 &&
|
||||
s->mv_type == MV_TYPE_16X16) {
|
||||
const MPVMainEncContext *const m = slice_to_mainenc(s);
|
||||
/* Check if the B-frames can skip it too, as we must skip it
|
||||
* if we skip here why didn't they just compress
|
||||
* the skip-mb bits instead of reusing them ?! */
|
||||
if (s->max_b_frames > 0) {
|
||||
int i;
|
||||
if (m->max_b_frames > 0) {
|
||||
int x, y, offset;
|
||||
const uint8_t *p_pic;
|
||||
|
||||
@ -665,7 +665,7 @@ static void mpeg4_encode_mb(MpegEncContext *const s, int16_t block[][64],
|
||||
p_pic = s->new_pic->data[0] + offset;
|
||||
|
||||
s->mb_skipped = 1;
|
||||
for (i = 0; i < s->max_b_frames; i++) {
|
||||
for (int i = 0; i < m->max_b_frames; i++) {
|
||||
const uint8_t *b_pic;
|
||||
int diff;
|
||||
const MPVPicture *pic = s->reordered_input_picture[i + 1];
|
||||
@ -929,14 +929,15 @@ static void mpeg4_encode_gop_header(MpegEncContext *s)
|
||||
ff_mpeg4_stuffing(&s->pb);
|
||||
}
|
||||
|
||||
static void mpeg4_encode_visual_object_header(MpegEncContext *s)
|
||||
static void mpeg4_encode_visual_object_header(MPVMainEncContext *const m)
|
||||
{
|
||||
MpegEncContext *const s = &m->s;
|
||||
int profile_and_level_indication;
|
||||
int vo_ver_id;
|
||||
|
||||
if (s->avctx->profile != AV_PROFILE_UNKNOWN) {
|
||||
profile_and_level_indication = s->avctx->profile << 4;
|
||||
} else if (s->max_b_frames || s->quarter_sample) {
|
||||
} else if (m->max_b_frames || s->quarter_sample) {
|
||||
profile_and_level_indication = 0xF0; // adv simple
|
||||
} else {
|
||||
profile_and_level_indication = 0x00; // simple
|
||||
@ -978,7 +979,7 @@ static void mpeg4_encode_vol_header(Mpeg4EncContext *const m4,
|
||||
MpegEncContext *const s = &m4->m.s;
|
||||
int vo_ver_id, vo_type, aspect_ratio_info;
|
||||
|
||||
if (s->max_b_frames || s->quarter_sample) {
|
||||
if (m4->m.max_b_frames || s->quarter_sample) {
|
||||
vo_ver_id = 5;
|
||||
vo_type = ADV_SIMPLE_VO_TYPE;
|
||||
} else {
|
||||
@ -1072,7 +1073,7 @@ static int mpeg4_encode_picture_header(MPVMainEncContext *const m)
|
||||
if (s->pict_type == AV_PICTURE_TYPE_I) {
|
||||
if (!(s->avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER)) {
|
||||
if (s->avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT) // HACK, the reference sw is buggy
|
||||
mpeg4_encode_visual_object_header(s);
|
||||
mpeg4_encode_visual_object_header(m);
|
||||
if (s->avctx->strict_std_compliance < FF_COMPLIANCE_VERY_STRICT || s->picture_number == 0) // HACK, the reference sw is buggy
|
||||
mpeg4_encode_vol_header(m4, 0, 0);
|
||||
}
|
||||
@ -1339,7 +1340,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
|
||||
return AVERROR(ENOMEM);
|
||||
init_put_bits(&s->pb, s->avctx->extradata, 1024);
|
||||
|
||||
mpeg4_encode_visual_object_header(s);
|
||||
mpeg4_encode_visual_object_header(m);
|
||||
mpeg4_encode_vol_header(m4, 0, 0);
|
||||
|
||||
// ff_mpeg4_stuffing(&s->pb); ?
|
||||
|
@ -106,7 +106,6 @@ typedef struct MpegEncContext {
|
||||
|
||||
enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
|
||||
int encoding; ///< true if we are encoding (vs decoding)
|
||||
int max_b_frames; ///< max number of B-frames for encoding
|
||||
int luma_elim_threshold;
|
||||
int chroma_elim_threshold;
|
||||
int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
|
||||
|
@ -470,9 +470,9 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
"max b frames must be 0 or positive for mpegvideo based encoders\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
s->max_b_frames = avctx->max_b_frames;
|
||||
m->max_b_frames = avctx->max_b_frames;
|
||||
s->codec_id = avctx->codec->id;
|
||||
if (s->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
|
||||
if (m->max_b_frames && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
|
||||
av_log(avctx, AV_LOG_ERROR, "B-frames not supported by codec\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
@ -692,7 +692,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
"set strict_std_compliance to 'unofficial' or lower in order to allow it\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
if (s->max_b_frames != 0) {
|
||||
if (m->max_b_frames != 0) {
|
||||
av_log(avctx, AV_LOG_ERROR,
|
||||
"B-frames cannot be used with low delay\n");
|
||||
return AVERROR(EINVAL);
|
||||
@ -744,7 +744,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
case AV_CODEC_ID_MPEG1VIDEO:
|
||||
s->out_format = FMT_MPEG1;
|
||||
s->low_delay = !!(avctx->flags & AV_CODEC_FLAG_LOW_DELAY);
|
||||
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
|
||||
avctx->delay = s->low_delay ? 0 : (m->max_b_frames + 1);
|
||||
ff_mpeg1_encode_init(s);
|
||||
break;
|
||||
#endif
|
||||
@ -834,8 +834,8 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
s->h263_pred = 1;
|
||||
s->unrestricted_mv = 1;
|
||||
s->flipflop_rounding = 1;
|
||||
s->low_delay = s->max_b_frames ? 0 : 1;
|
||||
avctx->delay = s->low_delay ? 0 : (s->max_b_frames + 1);
|
||||
s->low_delay = m->max_b_frames ? 0 : 1;
|
||||
avctx->delay = s->low_delay ? 0 : (m->max_b_frames + 1);
|
||||
break;
|
||||
case AV_CODEC_ID_MSMPEG4V2:
|
||||
s->out_format = FMT_H263;
|
||||
@ -1013,7 +1013,7 @@ av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
|
||||
return ret;
|
||||
|
||||
if (m->b_frame_strategy == 2) {
|
||||
for (i = 0; i < s->max_b_frames + 2; i++) {
|
||||
for (int i = 0; i < m->max_b_frames + 2; i++) {
|
||||
m->tmp_frames[i] = av_frame_alloc();
|
||||
if (!m->tmp_frames[i])
|
||||
return AVERROR(ENOMEM);
|
||||
@ -1255,7 +1255,7 @@ static int load_input_picture(MPVMainEncContext *const m, const AVFrame *pic_arg
|
||||
MPVPicture *pic = NULL;
|
||||
int64_t pts;
|
||||
int display_picture_number = 0, ret;
|
||||
int encoding_delay = s->max_b_frames ? s->max_b_frames
|
||||
int encoding_delay = m->max_b_frames ? m->max_b_frames
|
||||
: (s->low_delay ? 0 : 1);
|
||||
int flush_offset = 1;
|
||||
int direct = 1;
|
||||
@ -1461,7 +1461,7 @@ static int estimate_best_b_count(MPVMainEncContext *const m)
|
||||
const int scale = m->brd_scale;
|
||||
int width = s->width >> scale;
|
||||
int height = s->height >> scale;
|
||||
int i, j, out_size, p_lambda, b_lambda, lambda2;
|
||||
int out_size, p_lambda, b_lambda, lambda2;
|
||||
int64_t best_rd = INT64_MAX;
|
||||
int best_b_count = -1;
|
||||
int ret = 0;
|
||||
@ -1481,7 +1481,7 @@ static int estimate_best_b_count(MPVMainEncContext *const m)
|
||||
lambda2 = (b_lambda * b_lambda + (1 << FF_LAMBDA_SHIFT) / 2) >>
|
||||
FF_LAMBDA_SHIFT;
|
||||
|
||||
for (i = 0; i < s->max_b_frames + 2; i++) {
|
||||
for (int i = 0; i < m->max_b_frames + 2; i++) {
|
||||
const MPVPicture *pre_input_ptr = i ? s->input_picture[i - 1] :
|
||||
s->next_pic.ptr;
|
||||
|
||||
@ -1513,7 +1513,7 @@ static int estimate_best_b_count(MPVMainEncContext *const m)
|
||||
}
|
||||
}
|
||||
|
||||
for (j = 0; j < s->max_b_frames + 1; j++) {
|
||||
for (int j = 0; j < m->max_b_frames + 1; j++) {
|
||||
AVCodecContext *c;
|
||||
int64_t rd = 0;
|
||||
|
||||
@ -1536,7 +1536,7 @@ static int estimate_best_b_count(MPVMainEncContext *const m)
|
||||
c->me_sub_cmp = s->avctx->me_sub_cmp;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
c->time_base = s->avctx->time_base;
|
||||
c->max_b_frames = s->max_b_frames;
|
||||
c->max_b_frames = m->max_b_frames;
|
||||
|
||||
ret = avcodec_open2(c, s->avctx->codec, NULL);
|
||||
if (ret < 0)
|
||||
@ -1554,8 +1554,8 @@ static int estimate_best_b_count(MPVMainEncContext *const m)
|
||||
|
||||
//rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
|
||||
|
||||
for (i = 0; i < s->max_b_frames + 1; i++) {
|
||||
int is_p = i % (j + 1) == j || i == s->max_b_frames;
|
||||
for (int i = 0; i < m->max_b_frames + 1; i++) {
|
||||
int is_p = i % (j + 1) == j || i == m->max_b_frames;
|
||||
|
||||
m->tmp_frames[i + 1]->pict_type = is_p ?
|
||||
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
|
||||
@ -1640,7 +1640,7 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
|
||||
int b_frames = 0;
|
||||
|
||||
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) {
|
||||
for (int i = 0; i < s->max_b_frames + 1; i++) {
|
||||
for (int i = 0; i < m->max_b_frames + 1; i++) {
|
||||
int pict_num = s->input_picture[0]->display_picture_number + i;
|
||||
|
||||
if (pict_num >= m->rc_context.num_entries)
|
||||
@ -1656,12 +1656,11 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
|
||||
}
|
||||
|
||||
if (m->b_frame_strategy == 0) {
|
||||
b_frames = s->max_b_frames;
|
||||
b_frames = m->max_b_frames;
|
||||
while (b_frames && !s->input_picture[b_frames])
|
||||
b_frames--;
|
||||
} else if (m->b_frame_strategy == 1) {
|
||||
int i;
|
||||
for (i = 1; i < s->max_b_frames + 1; i++) {
|
||||
for (int i = 1; i < m->max_b_frames + 1; i++) {
|
||||
if (s->input_picture[i] &&
|
||||
s->input_picture[i]->b_frame_score == 0) {
|
||||
s->input_picture[i]->b_frame_score =
|
||||
@ -1671,19 +1670,18 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
|
||||
s->linesize) + 1;
|
||||
}
|
||||
}
|
||||
for (i = 0; i < s->max_b_frames + 1; i++) {
|
||||
for (int i = 0; i < m->max_b_frames + 1; i++) {
|
||||
if (!s->input_picture[i] ||
|
||||
s->input_picture[i]->b_frame_score - 1 >
|
||||
s->mb_num / m->b_sensitivity)
|
||||
s->mb_num / m->b_sensitivity) {
|
||||
b_frames = FFMAX(0, i - 1);
|
||||
break;
|
||||
}
|
||||
|
||||
b_frames = FFMAX(0, i - 1);
|
||||
}
|
||||
|
||||
/* reset scores */
|
||||
for (i = 0; i < b_frames + 1; i++) {
|
||||
for (int i = 0; i < b_frames + 1; i++)
|
||||
s->input_picture[i]->b_frame_score = 0;
|
||||
}
|
||||
} else if (m->b_frame_strategy == 2) {
|
||||
b_frames = estimate_best_b_count(m);
|
||||
if (b_frames < 0) {
|
||||
@ -1700,7 +1698,7 @@ static int set_bframe_chain_length(MPVMainEncContext *const m)
|
||||
b_frames = i;
|
||||
}
|
||||
if (s->input_picture[b_frames]->f->pict_type == AV_PICTURE_TYPE_B &&
|
||||
b_frames == s->max_b_frames) {
|
||||
b_frames == m->max_b_frames) {
|
||||
av_log(s->avctx, AV_LOG_ERROR,
|
||||
"warning, too many B-frames in a row\n");
|
||||
}
|
||||
|
@ -42,6 +42,7 @@ typedef struct MPVMainEncContext {
|
||||
|
||||
int intra_only; ///< if true, only intra pictures are generated
|
||||
int gop_size;
|
||||
int max_b_frames; ///< max number of B-frames
|
||||
int picture_in_gop_number; ///< 0-> first pic in gop, ...
|
||||
int input_picture_number; ///< used to set pic->display_picture_number
|
||||
int coded_picture_number; ///< used to set pic->coded_picture_number
|
||||
|
@ -581,7 +581,7 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
|
||||
p = s->avctx->stats_in;
|
||||
for (i = -1; p; i++)
|
||||
p = strchr(p + 1, ';');
|
||||
i += s->max_b_frames;
|
||||
i += m->max_b_frames;
|
||||
if (i <= 0 || i >= INT_MAX / sizeof(RateControlEntry))
|
||||
return -1;
|
||||
rcc->entry = av_mallocz(i * sizeof(RateControlEntry));
|
||||
@ -602,7 +602,7 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
|
||||
|
||||
/* read stats */
|
||||
p = s->avctx->stats_in;
|
||||
for (i = 0; i < rcc->num_entries - s->max_b_frames; i++) {
|
||||
for (i = 0; i < rcc->num_entries - m->max_b_frames; i++) {
|
||||
RateControlEntry *rce;
|
||||
int picture_number;
|
||||
int e;
|
||||
@ -663,7 +663,7 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
|
||||
|
||||
if (i % ((m->gop_size + 3) / 4) == 0)
|
||||
rce.pict_type = AV_PICTURE_TYPE_I;
|
||||
else if (i % (s->max_b_frames + 1))
|
||||
else if (i % (m->max_b_frames + 1))
|
||||
rce.pict_type = AV_PICTURE_TYPE_B;
|
||||
else
|
||||
rce.pict_type = AV_PICTURE_TYPE_P;
|
||||
|
Reference in New Issue
Block a user