mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
ffv1: use the AVFrame API properly.
This commit is contained in:
parent
706a92926c
commit
a6064b12b4
@ -141,8 +141,6 @@ av_cold int ffv1_common_init(AVCodecContext *avctx)
|
|||||||
if (!avctx->width || !avctx->height)
|
if (!avctx->width || !avctx->height)
|
||||||
return AVERROR_INVALIDDATA;
|
return AVERROR_INVALIDDATA;
|
||||||
|
|
||||||
avcodec_get_frame_defaults(&s->picture);
|
|
||||||
|
|
||||||
ff_dsputil_init(&s->dsp, avctx);
|
ff_dsputil_init(&s->dsp, avctx);
|
||||||
|
|
||||||
s->width = avctx->width;
|
s->width = avctx->width;
|
||||||
@ -271,8 +269,6 @@ av_cold int ffv1_close(AVCodecContext *avctx)
|
|||||||
FFV1Context *s = avctx->priv_data;
|
FFV1Context *s = avctx->priv_data;
|
||||||
int i, j;
|
int i, j;
|
||||||
|
|
||||||
av_frame_unref(&s->last_picture);
|
|
||||||
|
|
||||||
for (j = 0; j < s->slice_count; j++) {
|
for (j = 0; j < s->slice_count; j++) {
|
||||||
FFV1Context *fs = s->slice_context[j];
|
FFV1Context *fs = s->slice_context[j];
|
||||||
for (i = 0; i < s->plane_count; i++) {
|
for (i = 0; i < s->plane_count; i++) {
|
||||||
|
@ -79,7 +79,8 @@ typedef struct FFV1Context {
|
|||||||
int transparency;
|
int transparency;
|
||||||
int flags;
|
int flags;
|
||||||
int picture_number;
|
int picture_number;
|
||||||
AVFrame picture, last_picture;
|
AVFrame *frame;
|
||||||
|
AVFrame *last_picture;
|
||||||
|
|
||||||
AVFrame *cur;
|
AVFrame *cur;
|
||||||
int plane_count;
|
int plane_count;
|
||||||
|
@ -784,6 +784,10 @@ static av_cold int ffv1_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
ffv1_common_init(avctx);
|
ffv1_common_init(avctx);
|
||||||
|
|
||||||
|
f->last_picture = av_frame_alloc();
|
||||||
|
if (!f->last_picture)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
if (avctx->extradata && (ret = read_extra_header(f)) < 0)
|
if (avctx->extradata && (ret = read_extra_header(f)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
@ -876,7 +880,7 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
for (i = f->slice_count - 1; i >= 0; i--) {
|
for (i = f->slice_count - 1; i >= 0; i--) {
|
||||||
FFV1Context *fs = f->slice_context[i];
|
FFV1Context *fs = f->slice_context[i];
|
||||||
int j;
|
int j;
|
||||||
if (fs->slice_damaged && f->last_picture.data[0]) {
|
if (fs->slice_damaged && f->last_picture->data[0]) {
|
||||||
const uint8_t *src[4];
|
const uint8_t *src[4];
|
||||||
uint8_t *dst[4];
|
uint8_t *dst[4];
|
||||||
for (j = 0; j < 4; j++) {
|
for (j = 0; j < 4; j++) {
|
||||||
@ -884,12 +888,12 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
|
int sv = (j == 1 || j == 2) ? f->chroma_v_shift : 0;
|
||||||
dst[j] = p->data[j] + p->linesize[j] *
|
dst[j] = p->data[j] + p->linesize[j] *
|
||||||
(fs->slice_y >> sv) + (fs->slice_x >> sh);
|
(fs->slice_y >> sv) + (fs->slice_x >> sh);
|
||||||
src[j] = f->last_picture.data[j] +
|
src[j] = f->last_picture->data[j] +
|
||||||
f->last_picture.linesize[j] *
|
f->last_picture->linesize[j] *
|
||||||
(fs->slice_y >> sv) + (fs->slice_x >> sh);
|
(fs->slice_y >> sv) + (fs->slice_x >> sh);
|
||||||
}
|
}
|
||||||
av_image_copy(dst, p->linesize, (const uint8_t **)src,
|
av_image_copy(dst, p->linesize, (const uint8_t **)src,
|
||||||
f->last_picture.linesize,
|
f->last_picture->linesize,
|
||||||
avctx->pix_fmt, fs->slice_width,
|
avctx->pix_fmt, fs->slice_width,
|
||||||
fs->slice_height);
|
fs->slice_height);
|
||||||
}
|
}
|
||||||
@ -897,8 +901,8 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
|
|
||||||
f->picture_number++;
|
f->picture_number++;
|
||||||
|
|
||||||
av_frame_unref(&f->last_picture);
|
av_frame_unref(f->last_picture);
|
||||||
if ((ret = av_frame_ref(&f->last_picture, p)) < 0)
|
if ((ret = av_frame_ref(f->last_picture, p)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
f->cur = NULL;
|
f->cur = NULL;
|
||||||
|
|
||||||
@ -907,6 +911,17 @@ static int ffv1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static av_cold int ffv1_decode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
FFV1Context *s = avctx->priv_data;;
|
||||||
|
|
||||||
|
av_frame_free(&s->last_picture);
|
||||||
|
|
||||||
|
ffv1_close(avctx);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
AVCodec ff_ffv1_decoder = {
|
AVCodec ff_ffv1_decoder = {
|
||||||
.name = "ffv1",
|
.name = "ffv1",
|
||||||
.long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
|
.long_name = NULL_IF_CONFIG_SMALL("FFmpeg video codec #1"),
|
||||||
@ -914,7 +929,7 @@ AVCodec ff_ffv1_decoder = {
|
|||||||
.id = AV_CODEC_ID_FFV1,
|
.id = AV_CODEC_ID_FFV1,
|
||||||
.priv_data_size = sizeof(FFV1Context),
|
.priv_data_size = sizeof(FFV1Context),
|
||||||
.init = ffv1_decode_init,
|
.init = ffv1_decode_init,
|
||||||
.close = ffv1_close,
|
.close = ffv1_decode_close,
|
||||||
.decode = ffv1_decode_frame,
|
.decode = ffv1_decode_frame,
|
||||||
.capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
|
.capabilities = CODEC_CAP_DR1 /*| CODEC_CAP_DRAW_HORIZ_BAND*/ |
|
||||||
CODEC_CAP_SLICE_THREADS,
|
CODEC_CAP_SLICE_THREADS,
|
||||||
|
@ -721,7 +721,12 @@ static av_cold int ffv1_encode_init(AVCodecContext *avctx)
|
|||||||
if ((ret = ffv1_allocate_initial_states(s)) < 0)
|
if ((ret = ffv1_allocate_initial_states(s)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
avctx->coded_frame = &s->picture;
|
avctx->coded_frame = av_frame_alloc();
|
||||||
|
if (!avctx->coded_frame)
|
||||||
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
|
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
|
||||||
|
|
||||||
if (!s->transparency)
|
if (!s->transparency)
|
||||||
s->plane_count = 2;
|
s->plane_count = 2;
|
||||||
|
|
||||||
@ -858,12 +863,12 @@ static void encode_slice_header(FFV1Context *f, FFV1Context *fs)
|
|||||||
put_symbol(c, state, f->plane[j].quant_table_index, 0);
|
put_symbol(c, state, f->plane[j].quant_table_index, 0);
|
||||||
av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
|
av_assert0(f->plane[j].quant_table_index == f->avctx->context_model);
|
||||||
}
|
}
|
||||||
if (!f->picture.interlaced_frame)
|
if (!f->avctx->coded_frame->interlaced_frame)
|
||||||
put_symbol(c, state, 3, 0);
|
put_symbol(c, state, 3, 0);
|
||||||
else
|
else
|
||||||
put_symbol(c, state, 1 + !f->picture.top_field_first, 0);
|
put_symbol(c, state, 1 + !f->avctx->coded_frame->top_field_first, 0);
|
||||||
put_symbol(c, state, f->picture.sample_aspect_ratio.num, 0);
|
put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.num, 0);
|
||||||
put_symbol(c, state, f->picture.sample_aspect_ratio.den, 0);
|
put_symbol(c, state, f->avctx->coded_frame->sample_aspect_ratio.den, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
static int encode_slice(AVCodecContext *c, void *arg)
|
static int encode_slice(AVCodecContext *c, void *arg)
|
||||||
@ -874,12 +879,12 @@ static int encode_slice(AVCodecContext *c, void *arg)
|
|||||||
int height = fs->slice_height;
|
int height = fs->slice_height;
|
||||||
int x = fs->slice_x;
|
int x = fs->slice_x;
|
||||||
int y = fs->slice_y;
|
int y = fs->slice_y;
|
||||||
AVFrame *const p = &f->picture;
|
const AVFrame *const p = f->frame;
|
||||||
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
|
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
|
||||||
? (f->bits_per_raw_sample > 8) + 1
|
? (f->bits_per_raw_sample > 8) + 1
|
||||||
: 4;
|
: 4;
|
||||||
|
|
||||||
if (p->key_frame)
|
if (c->coded_frame->key_frame)
|
||||||
ffv1_clear_slice_state(f, fs);
|
ffv1_clear_slice_state(f, fs);
|
||||||
if (f->version > 2) {
|
if (f->version > 2) {
|
||||||
encode_slice_header(f, fs);
|
encode_slice_header(f, fs);
|
||||||
@ -926,12 +931,14 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
{
|
{
|
||||||
FFV1Context *f = avctx->priv_data;
|
FFV1Context *f = avctx->priv_data;
|
||||||
RangeCoder *const c = &f->slice_context[0]->c;
|
RangeCoder *const c = &f->slice_context[0]->c;
|
||||||
AVFrame *const p = &f->picture;
|
AVFrame *const p = avctx->coded_frame;
|
||||||
int used_count = 0;
|
int used_count = 0;
|
||||||
uint8_t keystate = 128;
|
uint8_t keystate = 128;
|
||||||
uint8_t *buf_p;
|
uint8_t *buf_p;
|
||||||
int i, ret;
|
int i, ret;
|
||||||
|
|
||||||
|
f->frame = pict;
|
||||||
|
|
||||||
if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height *
|
if ((ret = ff_alloc_packet(pkt, avctx->width * avctx->height *
|
||||||
((8 * 2 + 1 + 1) * 4) / 8 +
|
((8 * 2 + 1 + 1) * 4) / 8 +
|
||||||
FF_MIN_BUFFER_SIZE)) < 0) {
|
FF_MIN_BUFFER_SIZE)) < 0) {
|
||||||
@ -942,9 +949,6 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
ff_init_range_encoder(c, pkt->data, pkt->size);
|
ff_init_range_encoder(c, pkt->data, pkt->size);
|
||||||
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
ff_build_rac_states(c, 0.05 * (1LL << 32), 256 - 8);
|
||||||
|
|
||||||
*p = *pict;
|
|
||||||
p->pict_type = AV_PICTURE_TYPE_I;
|
|
||||||
|
|
||||||
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
|
if (avctx->gop_size == 0 || f->picture_number % avctx->gop_size == 0) {
|
||||||
put_rac(c, &keystate, 1);
|
put_rac(c, &keystate, 1);
|
||||||
p->key_frame = 1;
|
p->key_frame = 1;
|
||||||
@ -1054,6 +1058,13 @@ static int ffv1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static av_cold int ffv1_encode_close(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
av_frame_free(&avctx->coded_frame);
|
||||||
|
ffv1_close(avctx);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
#define OFFSET(x) offsetof(FFV1Context, x)
|
#define OFFSET(x) offsetof(FFV1Context, x)
|
||||||
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
|
||||||
static const AVOption options[] = {
|
static const AVOption options[] = {
|
||||||
@ -1082,7 +1093,7 @@ AVCodec ff_ffv1_encoder = {
|
|||||||
.priv_data_size = sizeof(FFV1Context),
|
.priv_data_size = sizeof(FFV1Context),
|
||||||
.init = ffv1_encode_init,
|
.init = ffv1_encode_init,
|
||||||
.encode2 = ffv1_encode_frame,
|
.encode2 = ffv1_encode_frame,
|
||||||
.close = ffv1_close,
|
.close = ffv1_encode_close,
|
||||||
.capabilities = CODEC_CAP_SLICE_THREADS,
|
.capabilities = CODEC_CAP_SLICE_THREADS,
|
||||||
.pix_fmts = (const enum AVPixelFormat[]) {
|
.pix_fmts = (const enum AVPixelFormat[]) {
|
||||||
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
|
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
|
||||||
|
Loading…
Reference in New Issue
Block a user