mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-21 10:55:51 +02:00
h264: support frame size changes with multi threading
Based on code by Janne Grunau Signed-off-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
parent
7e6c68394f
commit
c2dfb1e37c
@ -1170,6 +1170,10 @@ static int decode_init_thread_copy(AVCodecContext *avctx)
|
||||
memcpy(&to->start_field, &from->start_field, \
|
||||
(char *)&to->end_field - (char *)&to->start_field)
|
||||
|
||||
static int h264_slice_header_init(H264Context *, int);
|
||||
|
||||
static int h264_set_parameter_from_sps(H264Context *h);
|
||||
|
||||
static int decode_update_thread_context(AVCodecContext *dst,
|
||||
const AVCodecContext *src)
|
||||
{
|
||||
@ -1181,11 +1185,42 @@ static int decode_update_thread_context(AVCodecContext *dst,
|
||||
if (dst == src)
|
||||
return 0;
|
||||
|
||||
if (inited &&
|
||||
(s->width != s1->width ||
|
||||
s->height != s1->height ||
|
||||
s->mb_width != s1->mb_width ||
|
||||
s->mb_height != s1->mb_height ||
|
||||
h->sps.bit_depth_luma != h1->sps.bit_depth_luma ||
|
||||
h->sps.chroma_format_idc != h1->sps.chroma_format_idc ||
|
||||
h->sps.colorspace != h1->sps.colorspace)) {
|
||||
|
||||
av_freep(&h->bipred_scratchpad);
|
||||
|
||||
s->width = s1->width;
|
||||
s->height = s1->height;
|
||||
s->mb_height = s1->mb_height;
|
||||
h->b_stride = h1->b_stride;
|
||||
|
||||
if ((err = h264_slice_header_init(h, 1)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "h264_slice_header_init() failed");
|
||||
return err;
|
||||
}
|
||||
h->context_reinitialized = 1;
|
||||
|
||||
/* update linesize on resize for h264. The h264 decoder doesn't
|
||||
* necessarily call ff_MPV_frame_start in the new thread */
|
||||
s->linesize = s1->linesize;
|
||||
s->uvlinesize = s1->uvlinesize;
|
||||
|
||||
/* copy block_offset since frame_start may not be called */
|
||||
memcpy(h->block_offset, h1->block_offset, sizeof(h->block_offset));
|
||||
h264_set_parameter_from_sps(h);
|
||||
}
|
||||
|
||||
err = ff_mpeg_update_thread_context(dst, src);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
// FIXME handle width/height changing
|
||||
if (!inited) {
|
||||
for (i = 0; i < MAX_SPS_COUNT; i++)
|
||||
av_freep(h->sps_buffers + i);
|
||||
@ -2501,6 +2536,89 @@ static enum PixelFormat get_pixel_format(H264Context *h)
|
||||
}
|
||||
}
|
||||
|
||||
static int h264_slice_header_init(H264Context *h, int reinit)
|
||||
{
|
||||
MpegEncContext *const s = &h->s;
|
||||
int i, ret;
|
||||
|
||||
if( FFALIGN(s->avctx->width , 16 ) == s->width
|
||||
&& FFALIGN(s->avctx->height, 16*(2 - h->sps.frame_mbs_only_flag)) == s->height
|
||||
&& !h->sps.crop_right && !h->sps.crop_bottom
|
||||
&& (s->avctx->width != s->width || s->avctx->height && s->height)
|
||||
) {
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Using externally provided dimensions\n");
|
||||
s->avctx->coded_width = s->width;
|
||||
s->avctx->coded_height = s->height;
|
||||
} else{
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
s->avctx->width -= (2>>CHROMA444)*FFMIN(h->sps.crop_right, (8<<CHROMA444)-1);
|
||||
s->avctx->height -= (1<<s->chroma_y_shift)*FFMIN(h->sps.crop_bottom, (16>>s->chroma_y_shift)-1) * (2 - h->sps.frame_mbs_only_flag);
|
||||
}
|
||||
|
||||
s->avctx->sample_aspect_ratio = h->sps.sar;
|
||||
av_assert0(s->avctx->sample_aspect_ratio.den);
|
||||
|
||||
if (h->sps.timing_info_present_flag) {
|
||||
int64_t den = h->sps.time_scale;
|
||||
if (h->x264_build < 44U)
|
||||
den *= 2;
|
||||
av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
|
||||
h->sps.num_units_in_tick, den, 1 << 30);
|
||||
}
|
||||
|
||||
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id, s->avctx->pix_fmt);
|
||||
|
||||
if (reinit) {
|
||||
free_tables(h, 0);
|
||||
if ((ret = ff_MPV_common_frame_size_change(s)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_frame_size_change() failed.\n");
|
||||
return ret;
|
||||
}
|
||||
} else {
|
||||
if ((ret = ff_MPV_common_init(s) < 0)) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
s->first_field = 0;
|
||||
h->prev_interlaced_frame = 1;
|
||||
|
||||
init_scan_tables(h);
|
||||
if (ff_h264_alloc_tables(h) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Could not allocate memory for h264\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
if (context_init(h) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
for (i = 1; i < s->slice_context_count; i++) {
|
||||
H264Context *c;
|
||||
c = h->thread_context[i] = av_malloc(sizeof(H264Context));
|
||||
memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
|
||||
memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
|
||||
c->h264dsp = h->h264dsp;
|
||||
c->sps = h->sps;
|
||||
c->pps = h->pps;
|
||||
c->pixel_shift = h->pixel_shift;
|
||||
c->cur_chroma_format_idc = h->cur_chroma_format_idc;
|
||||
init_scan_tables(c);
|
||||
clone_tables(c, h, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < s->slice_context_count; i++)
|
||||
if (context_init(h->thread_context[i]) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode a slice header.
|
||||
@ -2523,6 +2641,8 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
int default_ref_list_done = 0;
|
||||
int last_pic_structure, last_pic_droppable;
|
||||
int must_reinit;
|
||||
int needs_reinit = 0;
|
||||
enum AVPixelFormat pix_fmt;
|
||||
|
||||
/* FIXME: 2tap qpel isn't implemented for high bit depth. */
|
||||
if ((s->avctx->flags2 & CODEC_FLAG2_FAST) &&
|
||||
@ -2597,6 +2717,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
}
|
||||
|
||||
if (h->pps.sps_id != h->current_sps_id ||
|
||||
h->context_reinitialized ||
|
||||
h0->sps_buffers[h->pps.sps_id]->new) {
|
||||
h0->sps_buffers[h->pps.sps_id]->new = 0;
|
||||
|
||||
@ -2611,6 +2732,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
s->avctx->level = h->sps.level_idc;
|
||||
s->avctx->refs = h->sps.ref_frame_count;
|
||||
|
||||
if (s->mb_width != h->sps.mb_width ||
|
||||
s->mb_height != h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag))
|
||||
needs_reinit = 1;
|
||||
|
||||
must_reinit = (s->context_initialized &&
|
||||
( 16*h->sps.mb_width != s->avctx->coded_width
|
||||
|| 16*h->sps.mb_height * (2 - h->sps.frame_mbs_only_flag) != s->avctx->coded_height
|
||||
@ -2634,10 +2759,51 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
s->width = 16 * s->mb_width;
|
||||
s->height = 16 * s->mb_height;
|
||||
|
||||
if(must_reinit) {
|
||||
free_tables(h, 0);
|
||||
flush_dpb(s->avctx);
|
||||
ff_MPV_common_end(s);
|
||||
if (h->sps.video_signal_type_present_flag) {
|
||||
s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
|
||||
: AVCOL_RANGE_MPEG;
|
||||
if (h->sps.colour_description_present_flag) {
|
||||
s->avctx->color_primaries = h->sps.color_primaries;
|
||||
s->avctx->color_trc = h->sps.color_trc;
|
||||
s->avctx->colorspace = h->sps.colorspace;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
ret = get_pixel_format(h);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else
|
||||
pix_fmt = ret;
|
||||
if (s->avctx->pix_fmt == PIX_FMT_NONE)
|
||||
s->avctx->pix_fmt = pix_fmt;
|
||||
|
||||
if (s->context_initialized &&
|
||||
(s->width != s->avctx->coded_width ||
|
||||
s->height != s->avctx->coded_height ||
|
||||
pix_fmt != s->avctx->pix_fmt ||
|
||||
needs_reinit ||
|
||||
av_cmp_q(h->sps.sar, s->avctx->sample_aspect_ratio))) {
|
||||
|
||||
if (h != h0) {
|
||||
av_log(s->avctx, AV_LOG_ERROR, "changing width/height on "
|
||||
"slice %d\n", h0->current_slice + 1);
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
av_log(h->s.avctx, AV_LOG_INFO, "Reinit context to %dx%d, "
|
||||
"pix_fmt: %d\n", s->width, s->height, pix_fmt);
|
||||
|
||||
flush_change(h);
|
||||
|
||||
s->avctx->pix_fmt = pix_fmt;
|
||||
|
||||
if ((ret = h264_slice_header_init(h, 1)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"h264_slice_header_init() failed\n");
|
||||
return ret;
|
||||
}
|
||||
h->context_reinitialized = 1;
|
||||
}
|
||||
if (!s->context_initialized) {
|
||||
if (h != h0) {
|
||||
@ -2645,90 +2811,10 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
||||
"Cannot (re-)initialize context during parallel decoding.\n");
|
||||
return -1;
|
||||
}
|
||||
if( FFALIGN(s->avctx->width , 16 ) == s->width
|
||||
&& FFALIGN(s->avctx->height, 16*(2 - h->sps.frame_mbs_only_flag)) == s->height
|
||||
&& !h->sps.crop_right && !h->sps.crop_bottom
|
||||
&& (s->avctx->width != s->width || s->avctx->height && s->height)
|
||||
) {
|
||||
av_log(h->s.avctx, AV_LOG_DEBUG, "Using externally provided dimensions\n");
|
||||
s->avctx->coded_width = s->width;
|
||||
s->avctx->coded_height = s->height;
|
||||
} else{
|
||||
avcodec_set_dimensions(s->avctx, s->width, s->height);
|
||||
s->avctx->width -= (2>>CHROMA444)*FFMIN(h->sps.crop_right, (8<<CHROMA444)-1);
|
||||
s->avctx->height -= (1<<s->chroma_y_shift)*FFMIN(h->sps.crop_bottom, (16>>s->chroma_y_shift)-1) * (2 - h->sps.frame_mbs_only_flag);
|
||||
}
|
||||
s->avctx->sample_aspect_ratio = h->sps.sar;
|
||||
av_assert0(s->avctx->sample_aspect_ratio.den);
|
||||
|
||||
if (h->sps.video_signal_type_present_flag) {
|
||||
s->avctx->color_range = h->sps.full_range>0 ? AVCOL_RANGE_JPEG
|
||||
: AVCOL_RANGE_MPEG;
|
||||
if (h->sps.colour_description_present_flag) {
|
||||
s->avctx->color_primaries = h->sps.color_primaries;
|
||||
s->avctx->color_trc = h->sps.color_trc;
|
||||
s->avctx->colorspace = h->sps.colorspace;
|
||||
}
|
||||
}
|
||||
|
||||
if (h->sps.timing_info_present_flag) {
|
||||
int64_t den = h->sps.time_scale;
|
||||
if (h->x264_build < 44U)
|
||||
den *= 2;
|
||||
av_reduce(&s->avctx->time_base.num, &s->avctx->time_base.den,
|
||||
h->sps.num_units_in_tick, den, 1 << 30);
|
||||
}
|
||||
|
||||
ret = get_pixel_format(h);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
else
|
||||
s->avctx->pix_fmt = ret;
|
||||
|
||||
s->avctx->hwaccel = ff_find_hwaccel(s->avctx->codec->id,
|
||||
s->avctx->pix_fmt);
|
||||
|
||||
if (ff_MPV_common_init(s) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "ff_MPV_common_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
s->first_field = 0;
|
||||
h->prev_interlaced_frame = 1;
|
||||
|
||||
init_scan_tables(h);
|
||||
if (ff_h264_alloc_tables(h) < 0) {
|
||||
if ((ret = h264_slice_header_init(h, 0)) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"Could not allocate memory for h264\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
h->bipred_scratchpad = NULL;
|
||||
|
||||
if (!HAVE_THREADS || !(s->avctx->active_thread_type & FF_THREAD_SLICE)) {
|
||||
if (context_init(h) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR, "context_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
} else {
|
||||
for (i = 1; i < s->slice_context_count; i++) {
|
||||
H264Context *c;
|
||||
c = h->thread_context[i] = av_malloc(sizeof(H264Context));
|
||||
memcpy(c, h->s.thread_context[i], sizeof(MpegEncContext));
|
||||
memset(&c->s + 1, 0, sizeof(H264Context) - sizeof(MpegEncContext));
|
||||
c->h264dsp = h->h264dsp;
|
||||
c->sps = h->sps;
|
||||
c->pps = h->pps;
|
||||
c->pixel_shift = h->pixel_shift;
|
||||
c->cur_chroma_format_idc = h->cur_chroma_format_idc;
|
||||
init_scan_tables(c);
|
||||
clone_tables(c, h, i);
|
||||
}
|
||||
|
||||
for (i = 0; i < s->slice_context_count; i++)
|
||||
if (context_init(h->thread_context[i]) < 0) {
|
||||
av_log(h->s.avctx, AV_LOG_ERROR,
|
||||
"context_init() failed.\n");
|
||||
return -1;
|
||||
}
|
||||
"h264_slice_header_init() failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
@ -4256,6 +4342,7 @@ not_extra:
|
||||
decode_postinit(h, 1);
|
||||
|
||||
field_end(h, 0);
|
||||
h->context_reinitialized = 0;
|
||||
|
||||
/* Wait for second field. */
|
||||
*got_frame = 0;
|
||||
|
@ -452,6 +452,8 @@ typedef struct H264Context {
|
||||
int nal_length_size; ///< Number of bytes used for nal length (1, 2 or 4)
|
||||
int got_first; ///< this flag is != 0 if we've parsed a frame
|
||||
|
||||
int context_reinitialized;
|
||||
|
||||
SPS *sps_buffers[MAX_SPS_COUNT];
|
||||
PPS *pps_buffers[MAX_PPS_COUNT];
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user