mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-13 21:28:01 +02:00
avcodec/mpegvideo: Make new_picture an ordinary AVFrame
It is currently a "Picture", an mpegvideo-specific type that has a lot of baggage, all of which is unnecessary for new_picture, because only its embedded AVFrame is ever used. So just use an ordinary AVFrame. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
parent
88720f3d81
commit
37250f7f64
@ -680,7 +680,7 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
|
||||
if (s->mecc.me_sub_cmp[0] != s->mecc.mb_cmp[0]) {
|
||||
dmin_sum += s->mecc.mb_cmp[0](s,
|
||||
s->new_picture.f->data[0] +
|
||||
s->new_picture->data[0] +
|
||||
s->mb_x * 16 + s->mb_y * 16 * stride,
|
||||
c->scratchpad, stride, 16);
|
||||
}
|
||||
@ -704,8 +704,8 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
|
||||
s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f->data[2] + offset, s->uvlinesize, 8);
|
||||
}
|
||||
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad, s->uvlinesize, 8);
|
||||
dmin_sum += s->mecc.mb_cmp[1](s, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * 8 * s->uvlinesize, c->scratchpad + 8, s->uvlinesize, 8);
|
||||
}
|
||||
|
||||
c->pred_x= mx;
|
||||
@ -894,7 +894,7 @@ void ff_estimate_p_frame_motion(MpegEncContext * s,
|
||||
int mb_type=0;
|
||||
Picture * const pic= &s->current_picture;
|
||||
|
||||
init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
|
||||
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
|
||||
av_assert0(s->linesize == c->stride);
|
||||
@ -1065,7 +1065,7 @@ int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
|
||||
int P[10][2];
|
||||
const int shift= 1+s->quarter_sample;
|
||||
const int xy= mb_x + mb_y*s->mb_stride;
|
||||
init_ref(c, s->new_picture.f->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data, NULL, 16*mb_x, 16*mb_y, 0);
|
||||
|
||||
av_assert0(s->quarter_sample==0 || s->quarter_sample==1);
|
||||
|
||||
@ -1494,7 +1494,7 @@ void ff_estimate_b_frame_motion(MpegEncContext * s,
|
||||
int fmin, bmin, dmin, fbmin, bimin, fimin;
|
||||
int type=0;
|
||||
const int xy = mb_y*s->mb_stride + mb_x;
|
||||
init_ref(c, s->new_picture.f->data, s->last_picture.f->data,
|
||||
init_ref(c, s->new_picture->data, s->last_picture.f->data,
|
||||
s->next_picture.f->data, 16 * mb_x, 16 * mb_y, 2);
|
||||
|
||||
get_limits(s, 16*mb_x, 16*mb_y);
|
||||
|
@ -645,7 +645,7 @@ void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
|
||||
y = s->mb_y * 16;
|
||||
|
||||
offset = x + y * s->linesize;
|
||||
p_pic = s->new_picture.f->data[0] + offset;
|
||||
p_pic = s->new_picture->data[0] + offset;
|
||||
|
||||
s->mb_skipped = 1;
|
||||
for (i = 0; i < s->max_b_frames; i++) {
|
||||
|
@ -791,7 +791,7 @@ av_cold int ff_mpv_common_init(MpegEncContext *s)
|
||||
if (!(s->next_picture.f = av_frame_alloc()) ||
|
||||
!(s->last_picture.f = av_frame_alloc()) ||
|
||||
!(s->current_picture.f = av_frame_alloc()) ||
|
||||
!(s->new_picture.f = av_frame_alloc()))
|
||||
!(s->new_picture = av_frame_alloc()))
|
||||
goto fail_nomem;
|
||||
|
||||
if ((ret = ff_mpv_init_context_frame(s)))
|
||||
@ -902,7 +902,7 @@ void ff_mpv_common_end(MpegEncContext *s)
|
||||
ff_mpv_picture_free(s->avctx, &s->last_picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->current_picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->next_picture);
|
||||
ff_mpv_picture_free(s->avctx, &s->new_picture);
|
||||
av_frame_free(&s->new_picture);
|
||||
|
||||
s->context_initialized = 0;
|
||||
s->context_reinit = 0;
|
||||
|
@ -153,10 +153,10 @@ typedef struct MpegEncContext {
|
||||
Picture next_picture;
|
||||
|
||||
/**
|
||||
* copy of the source picture structure for encoding.
|
||||
* Reference to the source picture for encoding.
|
||||
* note, linesize & data, might not match the source picture (for field pictures)
|
||||
*/
|
||||
Picture new_picture;
|
||||
AVFrame *new_picture;
|
||||
|
||||
/**
|
||||
* copy of the current picture structure.
|
||||
|
@ -938,7 +938,7 @@ av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
|
||||
av_frame_free(&s->tmp_frames[i]);
|
||||
|
||||
ff_mpv_picture_free(avctx, &s->new_picture);
|
||||
av_frame_free(&s->new_picture);
|
||||
|
||||
av_freep(&avctx->stats_out);
|
||||
|
||||
@ -1486,14 +1486,15 @@ static int select_input_picture(MpegEncContext *s)
|
||||
}
|
||||
}
|
||||
no_output_pic:
|
||||
ff_mpeg_unref_picture(s->avctx, &s->new_picture);
|
||||
av_frame_unref(s->new_picture);
|
||||
|
||||
if (s->reordered_input_picture[0]) {
|
||||
s->reordered_input_picture[0]->reference =
|
||||
s->reordered_input_picture[0]->f->pict_type !=
|
||||
AV_PICTURE_TYPE_B ? 3 : 0;
|
||||
|
||||
if ((ret = ff_mpeg_ref_picture(s->avctx, &s->new_picture, s->reordered_input_picture[0])))
|
||||
if ((ret = av_frame_ref(s->new_picture,
|
||||
s->reordered_input_picture[0]->f)))
|
||||
return ret;
|
||||
|
||||
if (s->reordered_input_picture[0]->shared || s->avctx->rc_buffer_size) {
|
||||
@ -1524,8 +1525,8 @@ no_output_pic:
|
||||
// input is not a shared pix -> reuse buffer for current_pix
|
||||
s->current_picture_ptr = s->reordered_input_picture[0];
|
||||
for (i = 0; i < 4; i++) {
|
||||
if (s->new_picture.f->data[i])
|
||||
s->new_picture.f->data[i] += INPLACE_OFFSET;
|
||||
if (s->new_picture->data[i])
|
||||
s->new_picture->data[i] += INPLACE_OFFSET;
|
||||
}
|
||||
}
|
||||
ff_mpeg_unref_picture(s->avctx, &s->current_picture);
|
||||
@ -1533,7 +1534,7 @@ no_output_pic:
|
||||
s->current_picture_ptr)) < 0)
|
||||
return ret;
|
||||
|
||||
s->picture_number = s->new_picture.f->display_picture_number;
|
||||
s->picture_number = s->new_picture->display_picture_number;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
@ -1686,7 +1687,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
}
|
||||
|
||||
/* output? */
|
||||
if (s->new_picture.f->data[0]) {
|
||||
if (s->new_picture->data[0]) {
|
||||
int growing_buffer = context_count == 1 && !pkt->data && !s->data_partitioning;
|
||||
int pkt_size = growing_buffer ? FFMAX(s->mb_width*s->mb_height*64+10000, avctx->internal->byte_buffer_size) - AV_INPUT_BUFFER_PADDING_SIZE
|
||||
:
|
||||
@ -1710,7 +1711,7 @@ int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
init_put_bits(&s->thread_context[i]->pb, start, end - start);
|
||||
}
|
||||
|
||||
s->pict_type = s->new_picture.f->pict_type;
|
||||
s->pict_type = s->new_picture->pict_type;
|
||||
//emms_c();
|
||||
ret = frame_start(s);
|
||||
if (ret < 0)
|
||||
@ -2076,11 +2077,11 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
|
||||
|
||||
wrap_y = s->linesize;
|
||||
wrap_c = s->uvlinesize;
|
||||
ptr_y = s->new_picture.f->data[0] +
|
||||
ptr_y = s->new_picture->data[0] +
|
||||
(mb_y * 16 * wrap_y) + mb_x * 16;
|
||||
ptr_cb = s->new_picture.f->data[1] +
|
||||
ptr_cb = s->new_picture->data[1] +
|
||||
(mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
|
||||
ptr_cr = s->new_picture.f->data[2] +
|
||||
ptr_cr = s->new_picture->data[2] +
|
||||
(mb_y * mb_block_height * wrap_c) + mb_x * mb_block_width;
|
||||
|
||||
if((mb_x * 16 + 16 > s->width || mb_y * 16 + 16 > s->height) && s->codec_id != AV_CODEC_ID_AMV){
|
||||
@ -2567,18 +2568,18 @@ static int sse_mb(MpegEncContext *s){
|
||||
|
||||
if(w==16 && h==16)
|
||||
if(s->avctx->mb_cmp == FF_CMP_NSSE){
|
||||
return s->mecc.nsse[0](s, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
|
||||
s->mecc.nsse[1](s, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
|
||||
s->mecc.nsse[1](s, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
|
||||
return s->mecc.nsse[0](s, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
|
||||
s->mecc.nsse[1](s, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
|
||||
s->mecc.nsse[1](s, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
|
||||
}else{
|
||||
return s->mecc.sse[0](NULL, s->new_picture.f->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
|
||||
s->mecc.sse[1](NULL, s->new_picture.f->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
|
||||
s->mecc.sse[1](NULL, s->new_picture.f->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
|
||||
return s->mecc.sse[0](NULL, s->new_picture->data[0] + s->mb_x * 16 + s->mb_y * s->linesize * 16, s->dest[0], s->linesize, 16) +
|
||||
s->mecc.sse[1](NULL, s->new_picture->data[1] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[1], s->uvlinesize, 8) +
|
||||
s->mecc.sse[1](NULL, s->new_picture->data[2] + s->mb_x * 8 + s->mb_y * s->uvlinesize * 8, s->dest[2], s->uvlinesize, 8);
|
||||
}
|
||||
else
|
||||
return sse(s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
|
||||
+sse(s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
|
||||
+sse(s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
|
||||
return sse(s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16, s->dest[0], w, h, s->linesize)
|
||||
+sse(s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[1], w>>1, h>>1, s->uvlinesize)
|
||||
+sse(s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*8,s->dest[2], w>>1, h>>1, s->uvlinesize);
|
||||
}
|
||||
|
||||
static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
|
||||
@ -2633,7 +2634,7 @@ static int mb_var_thread(AVCodecContext *c, void *arg){
|
||||
for(mb_x=0; mb_x < s->mb_width; mb_x++) {
|
||||
int xx = mb_x * 16;
|
||||
int yy = mb_y * 16;
|
||||
uint8_t *pix = s->new_picture.f->data[0] + (yy * s->linesize) + xx;
|
||||
uint8_t *pix = s->new_picture->data[0] + (yy * s->linesize) + xx;
|
||||
int varc;
|
||||
int sum = s->mpvencdsp.pix_sum(pix, s->linesize);
|
||||
|
||||
@ -3354,13 +3355,13 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
if(s->mb_y*16 + 16 > s->height) h= s->height- s->mb_y*16;
|
||||
|
||||
s->current_picture.encoding_error[0] += sse(
|
||||
s, s->new_picture.f->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
||||
s, s->new_picture->data[0] + s->mb_x*16 + s->mb_y*s->linesize*16,
|
||||
s->dest[0], w, h, s->linesize);
|
||||
s->current_picture.encoding_error[1] += sse(
|
||||
s, s->new_picture.f->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s, s->new_picture->data[1] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s->dest[1], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||
s->current_picture.encoding_error[2] += sse(
|
||||
s, s->new_picture.f->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s, s->new_picture->data[2] + s->mb_x*8 + s->mb_y*s->uvlinesize*chr_h,
|
||||
s->dest[2], w>>1, h>>s->chroma_y_shift, s->uvlinesize);
|
||||
}
|
||||
if(s->loop_filter){
|
||||
|
@ -1647,7 +1647,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
||||
|
||||
s->m.avctx= s->avctx;
|
||||
s->m. last_picture.f = s->last_picture[0];
|
||||
s->m. new_picture.f = s->input_picture;
|
||||
s->m. new_picture = s->input_picture;
|
||||
s->m. last_picture_ptr= &s->m. last_picture;
|
||||
s->m.linesize = stride;
|
||||
s->m.uvlinesize= s->current_picture->linesize[1];
|
||||
|
@ -277,7 +277,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
||||
s->m.last_picture.f->data[0] = ref_plane;
|
||||
s->m.linesize =
|
||||
s->m.last_picture.f->linesize[0] =
|
||||
s->m.new_picture.f->linesize[0] =
|
||||
s->m.new_picture->linesize[0] =
|
||||
s->m.current_picture.f->linesize[0] = stride;
|
||||
s->m.width = width;
|
||||
s->m.height = height;
|
||||
@ -327,7 +327,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
|
||||
s->m.me.dia_size = s->avctx->dia_size;
|
||||
s->m.first_slice_line = 1;
|
||||
for (y = 0; y < block_height; y++) {
|
||||
s->m.new_picture.f->data[0] = src - y * 16 * stride; // ugly
|
||||
s->m.new_picture->data[0] = src - y * 16 * stride; // ugly
|
||||
s->m.mb_y = y;
|
||||
|
||||
for (i = 0; i < 16 && i + 16 * y < height; i++) {
|
||||
|
Loading…
Reference in New Issue
Block a user