1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Merge commit '45bde93eefa78c1bdb0936109fbd2e2fb27fbfe7'

* commit '45bde93eefa78c1bdb0936109fbd2e2fb27fbfe7':
  sunrastenc: use the AVFrame API properly.
  targaenc: use the AVFrame API properly.
  tiffenc: use the AVFrame API properly.
  pngenc: use the AVFrame API properly.

Conflicts:
	libavcodec/pngenc.c
	libavcodec/sunrastenc.c
	libavcodec/targaenc.c
	libavcodec/tiffenc.c

Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
Michael Niedermayer 2013-11-17 12:02:09 +01:00
commit 3ea168edeb
4 changed files with 64 additions and 29 deletions

View File

@ -38,7 +38,6 @@ typedef struct PNGEncContext {
uint8_t *bytestream;
uint8_t *bytestream_start;
uint8_t *bytestream_end;
AVFrame picture;
int filter_type;
@ -218,7 +217,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
PNGEncContext *s = avctx->priv_data;
AVFrame * const p= &s->picture;
const AVFrame * const p = pict;
int bit_depth, color_type, y, len, row_size, ret, is_progressive;
int bits_per_pixel, pass_row_size, enc_row_size;
int64_t max_packet_size;
@ -228,10 +227,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
uint8_t *progressive_buf = NULL;
uint8_t *top_buf = NULL;
*p = *pict;
p->pict_type= AV_PICTURE_TYPE_I;
p->key_frame= 1;
is_progressive = !!(avctx->flags & CODEC_FLAG_INTERLACED_DCT);
switch(avctx->pix_fmt) {
case AV_PIX_FMT_RGBA64BE:
@ -454,8 +449,13 @@ static av_cold int png_enc_init(AVCodecContext *avctx){
avctx->bits_per_coded_sample = 8;
}
avcodec_get_frame_defaults(&s->picture);
avctx->coded_frame= &s->picture;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
ff_dsputil_init(&s->dsp, avctx);
s->filter_type = av_clip(avctx->prediction_method, PNG_FILTER_VALUE_NONE, PNG_FILTER_VALUE_MIXED);
@ -472,6 +472,12 @@ static av_cold int png_enc_init(AVCodecContext *avctx){
return 0;
}
static av_cold int png_enc_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
#define OFFSET(x) offsetof(PNGEncContext, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
@ -494,6 +500,7 @@ AVCodec ff_png_encoder = {
.id = AV_CODEC_ID_PNG,
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
.close = png_enc_close,
.encode2 = encode_frame,
.capabilities = CODEC_CAP_FRAME_THREADS | CODEC_CAP_INTRA_ONLY,
.pix_fmts = (const enum AVPixelFormat[]){

View File

@ -198,6 +198,12 @@ static int sunrast_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
return 0;
}
static av_cold int sunrast_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
static const AVCodecDefault sunrast_defaults[] = {
{ "coder", "rle" },
{ NULL },
@ -210,6 +216,7 @@ AVCodec ff_sunrast_encoder = {
.id = AV_CODEC_ID_SUNRAST,
.priv_data_size = sizeof(SUNRASTContext),
.init = sunrast_encode_init,
.close = sunrast_encode_close,
.encode2 = sunrast_encode_frame,
.defaults = sunrast_defaults,
.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24,

View File

@ -170,11 +170,31 @@ static int targa_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
return 0;
}
static av_cold int targa_encode_init(AVCodecContext *avctx)
{
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->key_frame = 1;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
return 0;
}
static av_cold int targa_encode_close(AVCodecContext *avctx)
{
av_frame_free(&avctx->coded_frame);
return 0;
}
AVCodec ff_targa_encoder = {
.name = "targa",
.long_name = NULL_IF_CONFIG_SMALL("Truevision Targa image"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_TARGA,
.init = targa_encode_init,
.close = targa_encode_close,
.encode2 = targa_encode_frame,
.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_GRAY8, AV_PIX_FMT_PAL8,

View File

@ -52,7 +52,6 @@ static const uint8_t type_sizes2[14] = {
typedef struct TiffEncoderContext {
AVClass *class; ///< for private options
AVCodecContext *avctx;
AVFrame picture;
int width; ///< picture width
int height; ///< picture height
@ -195,9 +194,9 @@ static int encode_strip(TiffEncoderContext *s, const int8_t *src,
}
}
static void pack_yuv(TiffEncoderContext *s, uint8_t *dst, int lnum)
static void pack_yuv(TiffEncoderContext *s, const AVFrame *p,
uint8_t *dst, int lnum)
{
AVFrame *p = &s->picture;
int i, j, k;
int w = (s->width - 1) / s->subsampling[0] + 1;
uint8_t *pu = &p->data[1][lnum / s->subsampling[1] * p->linesize[1]];
@ -223,24 +222,12 @@ static void pack_yuv(TiffEncoderContext *s, uint8_t *dst, int lnum)
}
}
static av_cold int encode_init(AVCodecContext *avctx)
{
TiffEncoderContext *s = avctx->priv_data;
avctx->coded_frame = &s->picture;
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->avctx = avctx;
return 0;
}
static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pict, int *got_packet)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(avctx->pix_fmt);
TiffEncoderContext *s = avctx->priv_data;
AVFrame *const p = &s->picture;
const AVFrame *const p = pict;
int i;
uint8_t *ptr;
uint8_t *offset;
@ -252,8 +239,6 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
int is_yuv = 0, alpha = 0;
int shift_h, shift_v;
*p = *pict;
s->width = avctx->width;
s->height = avctx->height;
s->subsampling[0] = 1;
@ -373,7 +358,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
zn = 0;
for (j = 0; j < s->rps; j++) {
if (is_yuv) {
pack_yuv(s, s->yuv_line, j);
pack_yuv(s, p, s->yuv_line, j);
memcpy(zbuf + zn, s->yuv_line, bytes_per_row);
j += s->subsampling[1] - 1;
} else
@ -409,7 +394,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
s->strip_offsets[i / s->rps] = ptr - pkt->data;
}
if (is_yuv) {
pack_yuv(s, s->yuv_line, i);
pack_yuv(s, p, s->yuv_line, i);
ret = encode_strip(s, s->yuv_line, ptr, bytes_per_row, s->compr);
i += s->subsampling[1] - 1;
} else
@ -497,10 +482,26 @@ fail:
return ret < 0 ? ret : 0;
}
static av_cold int encode_init(AVCodecContext *avctx)
{
TiffEncoderContext *s = avctx->priv_data;
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
avctx->coded_frame->key_frame = 1;
s->avctx = avctx;
return 0;
}
static av_cold int encode_close(AVCodecContext *avctx)
{
TiffEncoderContext *s = avctx->priv_data;
av_frame_free(&avctx->coded_frame);
av_freep(&s->strip_sizes);
av_freep(&s->strip_offsets);
av_freep(&s->yuv_line);
@ -536,8 +537,8 @@ AVCodec ff_tiff_encoder = {
.id = AV_CODEC_ID_TIFF,
.priv_data_size = sizeof(TiffEncoderContext),
.init = encode_init,
.encode2 = encode_frame,
.close = encode_close,
.encode2 = encode_frame,
.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_PAL8, AV_PIX_FMT_GRAY8,
AV_PIX_FMT_GRAY8A, AV_PIX_FMT_GRAY16LE,