1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge remote-tracking branch 'qatar/master'

* qatar/master:
  mpegvideo_enc: use the AVFrame API properly.
  ffv1: use the AVFrame API properly.
  jpegls: use the AVFrame API properly.
  huffyuv: use the AVFrame API properly.

Conflicts:
	libavcodec/ffv1.c
	libavcodec/ffv1.h
	libavcodec/ffv1dec.c
	libavcodec/ffv1enc.c

Changes to ffv1 are more redone than merged due to them being based on
an ancient codebase and a good part of that having being done already
as well.

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-11-17 13:27:46 +01:00
commit be1e6e7503
10 changed files with 84 additions and 59 deletions

View File

@ -53,6 +53,7 @@ av_cold int ffv1_common_init(AVCodecContext *avctx)
s->last_picture.f = av_frame_alloc(); s->last_picture.f = av_frame_alloc();
if (!s->picture.f || !s->last_picture.f) if (!s->picture.f || !s->last_picture.f)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
ff_dsputil_init(&s->dsp, avctx); ff_dsputil_init(&s->dsp, avctx);
s->width = avctx->width; s->width = avctx->width;

View File

@ -938,12 +938,12 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
uint8_t *dst[4]; uint8_t *dst[4];
ff_thread_await_progress(&f->last_picture, INT_MAX, 0); ff_thread_await_progress(&f->last_picture, INT_MAX, 0);
for (j = 0; j < 4; j++) { for (j = 0; j < 4; j++) {
int sh = (j==1 || j==2) ? f->chroma_h_shift : 0; int sh = (j == 1 || j == 2) ? f->chroma_h_shift : 0;
int sv = (j==1 || j==2) ? f->chroma_v_shift : 0; int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
dst[j] = p->data[j] + p->linesize[j]* dst[j] = p->data[j] + p->linesize[j] *
(fs->slice_y>>sv) + (fs->slice_x>>sh); (fs->slice_y >> sv) + (fs->slice_x >> sh);
src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j]* src[j] = f->last_picture.f->data[j] + f->last_picture.f->linesize[j] *
(fs->slice_y>>sv) + (fs->slice_x>>sh); (fs->slice_y >> sv) + (fs->slice_x >> sh);
} }
av_image_copy(dst, p->linesize, (const uint8_t **)src, av_image_copy(dst, p->linesize, (const uint8_t **)src,
f->last_picture.f->linesize, f->last_picture.f->linesize,

View File

@ -830,6 +830,12 @@ static av_cold int encode_init(AVCodecContext *avctx)
if ((ret = ffv1_allocate_initial_states(s)) < 0) if ((ret = ffv1_allocate_initial_states(s)) < 0)
return ret; return ret;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (!s->transparency) if (!s->transparency)
s->plane_count = 2; s->plane_count = 2;
if (!s->chroma_planes && s->version > 3) if (!s->chroma_planes && s->version > 3)
@ -1000,7 +1006,7 @@ static int encode_slice(AVCodecContext *c, void *arg)
int height = fs->slice_height; int height = fs->slice_height;
int x = fs->slice_x; int x = fs->slice_x;
int y = fs->slice_y; int y = fs->slice_y;
AVFrame *const p = f->picture.f; const AVFrame *const p = f->picture.f;
const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1; const int ps = av_pix_fmt_desc_get(c->pix_fmt)->comp[0].step_minus1 + 1;
int ret; int ret;
RangeCoder c_bak = fs->c; RangeCoder c_bak = fs->c;
@ -1008,7 +1014,7 @@ static int encode_slice(AVCodecContext *c, void *arg)
fs->slice_coding_mode = 0; fs->slice_coding_mode = 0;
retry: retry:
if (p->key_frame) if (c->coded_frame->key_frame)
ffv1_clear_slice_state(f, fs); ffv1_clear_slice_state(f, fs);
if (f->version > 2) { if (f->version > 2) {
encode_slice_header(f, fs); encode_slice_header(f, fs);
@ -1129,16 +1135,16 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
av_frame_unref(p); av_frame_unref(p);
if ((ret = av_frame_ref(p, pict)) < 0) if ((ret = av_frame_ref(p, pict)) < 0)
return ret; return ret;
p->pict_type = AV_PICTURE_TYPE_I; avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) { if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
put_rac(c, &keystate, 1); put_rac(c, &keystate, 1);
p->key_frame = 1; avctx->coded_frame->key_frame = 1;
f->gob_count++; f->gob_count++;
write_header(f); write_header(f);
} else { } else {
put_rac(c, &keystate, 0); put_rac(c, &keystate, 0);
p->key_frame = 0; avctx->coded_frame->key_frame = 0;
} }
if (f->ac > 1) { if (f->ac > 1) {
@ -1195,12 +1201,19 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
pkt->size = buf_p - pkt->data; pkt->size = buf_p - pkt->data;
pkt->pts = pkt->pts =
pkt->dts = pict->pts; pkt->dts = pict->pts;
pkt->flags |= AV_PKT_FLAG_KEY * p->key_frame; pkt->flags |= AV_PKT_FLAG_KEY * avctx->coded_frame->key_frame;
*got_packet = 1; *got_packet = 1;
return 0; return 0;
} }
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
ffv1_close(avctx);
return 0;
}
#define OFFSET(x) offsetof(FFV1Context, x) #define OFFSET(x) offsetof(FFV1Context, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
@ -1228,7 +1241,7 @@ AVCodec ff_ffv1_encoder = {
.priv_data_size = sizeof(FFV1Context), .priv_data_size = sizeof(FFV1Context),
.init = encode_init, .init = encode_init,
.encode2 = encode_frame, .encode2 = encode_frame,
.close = ffv1_close, .close = encode_close,
.capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_DELAY, .capabilities = CODEC_CAP_SLICE_THREADS | CODEC_CAP_DELAY,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUV444P,

View File

@ -78,7 +78,6 @@ typedef struct HYuvContext {
uint32_t bits[3][256]; uint32_t bits[3][256];
uint32_t pix_bgr_map[1<<VLC_BITS]; uint32_t pix_bgr_map[1<<VLC_BITS];
VLC vlc[6]; //Y,U,V,YY,YU,YV VLC vlc[6]; //Y,U,V,YY,YU,YV
AVFrame picture;
uint8_t *bitstream_buffer; uint8_t *bitstream_buffer;
unsigned int bitstream_buffer_size; unsigned int bitstream_buffer_size;
DSPContext dsp; DSPContext dsp;

View File

@ -256,7 +256,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
ff_huffyuv_common_init(avctx); ff_huffyuv_common_init(avctx);
memset(s->vlc, 0, 3 * sizeof(VLC)); memset(s->vlc, 0, 3 * sizeof(VLC));
avcodec_get_frame_defaults(&s->picture);
s->interlaced = s->height > 288; s->interlaced = s->height > 288;
s->bgr32 = 1; s->bgr32 = 1;

View File

@ -156,7 +156,12 @@ static av_cold int encode_init(AVCodecContext *avctx)
} }
s->version = 2; s->version = 2;
avctx->coded_frame = &s->picture; avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
switch (avctx->pix_fmt) { switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P: case AV_PIX_FMT_YUV420P:
@ -446,16 +451,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0]; const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1]; const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2]; const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
AVFrame * const p = &s->picture; const AVFrame * const p = pict;
int i, j, size = 0, ret; int i, j, size = 0, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0) if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
return ret; return ret;
*p = *pict;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (s->context) { if (s->context) {
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
ff_huff_gen_len_table(s->len[i], s->stats[i]); ff_huff_gen_len_table(s->len[i], s->stats[i]);
@ -681,6 +682,8 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
av_freep(&avctx->stats_out); av_freep(&avctx->stats_out);
av_frame_free(&avctx->coded_frame);
return 0; return 0;
} }

View File

@ -33,7 +33,6 @@
typedef struct JpeglsContext { typedef struct JpeglsContext {
AVCodecContext *avctx; AVCodecContext *avctx;
AVFrame picture;
} JpeglsContext; } JpeglsContext;
typedef struct JLSState { typedef struct JLSState {

View File

@ -249,8 +249,7 @@ static void ls_store_lse(JLSState *state, PutBitContext *pb)
static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt, static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet) const AVFrame *pict, int *got_packet)
{ {
JpeglsContext *const s = avctx->priv_data; const AVFrame *const p = pict;
AVFrame *const p = &s->picture;
const int near = avctx->prediction_method; const int near = avctx->prediction_method;
PutBitContext pb, pb2; PutBitContext pb, pb2;
GetBitContext gb; GetBitContext gb;
@ -259,10 +258,6 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
int i, size, ret; int i, size, ret;
int comps; int comps;
*p = *pict;
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 || if (avctx->pix_fmt == AV_PIX_FMT_GRAY8 ||
avctx->pix_fmt == AV_PIX_FMT_GRAY16) avctx->pix_fmt == AV_PIX_FMT_GRAY16)
comps = 1; comps = 1;
@ -349,7 +344,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
Rc[j] = last[j]; Rc[j] = last[j];
} }
last = cur; last = cur;
cur += s->picture.linesize[0]; cur += p->linesize[0];
} }
} else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) { } else if (avctx->pix_fmt == AV_PIX_FMT_BGR24) {
int j, width; int j, width;
@ -363,7 +358,7 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
Rc[j] = last[j]; Rc[j] = last[j];
} }
last = cur; last = cur;
cur += s->picture.linesize[0]; cur += p->linesize[0];
} }
} }
@ -403,12 +398,20 @@ static int encode_picture_ls(AVCodecContext *avctx, AVPacket *pkt,
return 0; return 0;
} }
static av_cold int encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
static av_cold int encode_init_ls(AVCodecContext *ctx) static av_cold int encode_init_ls(AVCodecContext *ctx)
{ {
JpeglsContext *c = (JpeglsContext *)ctx->priv_data; ctx->coded_frame = av_frame_alloc();
if (!ctx->coded_frame)
return AVERROR(ENOMEM);
c->avctx = ctx; ctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
ctx->coded_frame = &c->picture; ctx->coded_frame->key_frame = 1;
if (ctx->pix_fmt != AV_PIX_FMT_GRAY8 && if (ctx->pix_fmt != AV_PIX_FMT_GRAY8 &&
ctx->pix_fmt != AV_PIX_FMT_GRAY16 && ctx->pix_fmt != AV_PIX_FMT_GRAY16 &&
@ -426,8 +429,8 @@ AVCodec ff_jpegls_encoder = {
.long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"), .long_name = NULL_IF_CONFIG_SMALL("JPEG-LS"),
.type = AVMEDIA_TYPE_VIDEO, .type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_JPEGLS, .id = AV_CODEC_ID_JPEGLS,
.priv_data_size = sizeof(JpeglsContext),
.init = encode_init_ls, .init = encode_init_ls,
.close = encode_close,
.encode2 = encode_picture_ls, .encode2 = encode_picture_ls,
.pix_fmts = (const enum AVPixelFormat[]) { .pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24, AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,

View File

@ -775,6 +775,9 @@ typedef struct MpegEncContext {
ERContext er; ERContext er;
int error_rate; int error_rate;
/* temporary frames used by b_frame_strategy = 2 */
AVFrame *tmp_frames[MAX_B_FRAMES + 2];
} MpegEncContext; } MpegEncContext;
#define REBASE_PICTURE(pic, new_ctx, old_ctx) \ #define REBASE_PICTURE(pic, new_ctx, old_ctx) \

View File

@ -237,7 +237,7 @@ av_cold int ff_dct_encode_init(MpegEncContext *s) {
av_cold int ff_MPV_encode_init(AVCodecContext *avctx) av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int i; int i, ret;
int chroma_h_shift, chroma_v_shift; int chroma_h_shift, chroma_v_shift;
MPV_encode_defaults(s); MPV_encode_defaults(s);
@ -894,12 +894,29 @@ av_cold int ff_MPV_encode_init(AVCodecContext *avctx)
FF_ENABLE_DEPRECATION_WARNINGS; FF_ENABLE_DEPRECATION_WARNINGS;
#endif #endif
if (avctx->b_frame_strategy == 2) {
for (i = 0; i < s->max_b_frames + 2; i++) {
s->tmp_frames[i] = av_frame_alloc();
if (!s->tmp_frames[i])
return AVERROR(ENOMEM);
s->tmp_frames[i]->format = AV_PIX_FMT_YUV420P;
s->tmp_frames[i]->width = s->width >> avctx->brd_scale;
s->tmp_frames[i]->height = s->height >> avctx->brd_scale;
ret = av_frame_get_buffer(s->tmp_frames[i], 32);
if (ret < 0)
return ret;
}
}
return 0; return 0;
} }
av_cold int ff_MPV_encode_end(AVCodecContext *avctx) av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
{ {
MpegEncContext *s = avctx->priv_data; MpegEncContext *s = avctx->priv_data;
int i;
ff_rate_control_uninit(s); ff_rate_control_uninit(s);
@ -910,6 +927,9 @@ av_cold int ff_MPV_encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata); av_freep(&avctx->extradata);
for (i = 0; i < FF_ARRAY_ELEMS(s->tmp_frames); i++)
av_frame_free(&s->tmp_frames[i]);
return 0; return 0;
} }
@ -1151,7 +1171,6 @@ static int estimate_best_b_count(MpegEncContext *s)
{ {
AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id); AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
AVCodecContext *c = avcodec_alloc_context3(NULL); AVCodecContext *c = avcodec_alloc_context3(NULL);
AVFrame input[MAX_B_FRAMES + 2];
const int scale = s->avctx->brd_scale; const int scale = s->avctx->brd_scale;
int i, j, out_size, p_lambda, b_lambda, lambda2; int i, j, out_size, p_lambda, b_lambda, lambda2;
int64_t best_rd = INT64_MAX; int64_t best_rd = INT64_MAX;
@ -1186,19 +1205,9 @@ static int estimate_best_b_count(MpegEncContext *s)
return -1; return -1;
for (i = 0; i < s->max_b_frames + 2; i++) { for (i = 0; i < s->max_b_frames + 2; i++) {
int ysize = c->width * c->height;
int csize = (c->width / 2) * (c->height / 2);
Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] : Picture pre_input, *pre_input_ptr = i ? s->input_picture[i - 1] :
s->next_picture_ptr; s->next_picture_ptr;
avcodec_get_frame_defaults(&input[i]);
input[i].data[0] = av_malloc(ysize + 2 * csize);
input[i].data[1] = input[i].data[0] + ysize;
input[i].data[2] = input[i].data[1] + csize;
input[i].linesize[0] = c->width;
input[i].linesize[1] =
input[i].linesize[2] = c->width / 2;
if (pre_input_ptr && (!i || s->input_picture[i - 1])) { if (pre_input_ptr && (!i || s->input_picture[i - 1])) {
pre_input = *pre_input_ptr; pre_input = *pre_input_ptr;
@ -1208,13 +1217,13 @@ static int estimate_best_b_count(MpegEncContext *s)
pre_input.f.data[2] += INPLACE_OFFSET; pre_input.f.data[2] += INPLACE_OFFSET;
} }
s->dsp.shrink[scale](input[i].data[0], input[i].linesize[0], s->dsp.shrink[scale](s->tmp_frames[i]->data[0], s->tmp_frames[i]->linesize[0],
pre_input.f.data[0], pre_input.f.linesize[0], pre_input.f.data[0], pre_input.f.linesize[0],
c->width, c->height); c->width, c->height);
s->dsp.shrink[scale](input[i].data[1], input[i].linesize[1], s->dsp.shrink[scale](s->tmp_frames[i]->data[1], s->tmp_frames[i]->linesize[1],
pre_input.f.data[1], pre_input.f.linesize[1], pre_input.f.data[1], pre_input.f.linesize[1],
c->width >> 1, c->height >> 1); c->width >> 1, c->height >> 1);
s->dsp.shrink[scale](input[i].data[2], input[i].linesize[2], s->dsp.shrink[scale](s->tmp_frames[i]->data[2], s->tmp_frames[i]->linesize[2],
pre_input.f.data[2], pre_input.f.linesize[2], pre_input.f.data[2], pre_input.f.linesize[2],
c->width >> 1, c->height >> 1); c->width >> 1, c->height >> 1);
} }
@ -1228,21 +1237,21 @@ static int estimate_best_b_count(MpegEncContext *s)
c->error[0] = c->error[1] = c->error[2] = 0; c->error[0] = c->error[1] = c->error[2] = 0;
input[0].pict_type = AV_PICTURE_TYPE_I; s->tmp_frames[0]->pict_type = AV_PICTURE_TYPE_I;
input[0].quality = 1 * FF_QP2LAMBDA; s->tmp_frames[0]->quality = 1 * FF_QP2LAMBDA;
out_size = encode_frame(c, &input[0]); out_size = encode_frame(c, s->tmp_frames[0]);
//rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT; //rd += (out_size * lambda2) >> FF_LAMBDA_SHIFT;
for (i = 0; i < s->max_b_frames + 1; i++) { for (i = 0; i < s->max_b_frames + 1; i++) {
int is_p = i % (j + 1) == j || i == s->max_b_frames; int is_p = i % (j + 1) == j || i == s->max_b_frames;
input[i + 1].pict_type = is_p ? s->tmp_frames[i + 1]->pict_type = is_p ?
AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B; AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_B;
input[i + 1].quality = is_p ? p_lambda : b_lambda; s->tmp_frames[i + 1]->quality = is_p ? p_lambda : b_lambda;
out_size = encode_frame(c, &input[i + 1]); out_size = encode_frame(c, s->tmp_frames[i + 1]);
rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3); rd += (out_size * lambda2) >> (FF_LAMBDA_SHIFT - 3);
} }
@ -1264,10 +1273,6 @@ static int estimate_best_b_count(MpegEncContext *s)
avcodec_close(c); avcodec_close(c);
av_freep(&c); av_freep(&c);
for (i = 0; i < s->max_b_frames + 2; i++) {
av_freep(&input[i].data[0]);
}
return best_b_count; return best_b_count;
} }