mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-08 13:22:53 +02:00
Merge remote-tracking branch 'qatar/master'
* qatar/master: doxy: remove reference to removed api examples: unbreak compilation ttadec: cosmetics: reindent sunrast: use RLE trigger macro inplace of the hard coded value. sunrastenc: set keyframe flag for the output packet. mpegvideo_enc: switch to encode2(). mpegvideo_enc: force encoding delay of at least 1 frame when low_delay=0 Conflicts: doc/examples/muxing.c Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
be2e211dce
@ -70,7 +70,7 @@ static void audio_encode_example(const char *filename)
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@ -135,7 +135,7 @@ static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
c = avcodec_alloc_context3(codec);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@ -243,7 +243,7 @@ static void video_encode_example(const char *filename, int codec_id)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@ -366,7 +366,7 @@ static void video_decode_example(const char *outfilename, const char *filename)
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@ -103,7 +103,7 @@ static void open_audio(AVFormatContext *oc, AVStream *st)
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
@ -164,7 +164,7 @@ static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
|
||||
get_audio_frame(samples, audio_input_frame_size, c->channels);
|
||||
|
||||
pkt.size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
|
||||
pkt.size = avcodec_encode_audio2(c, audio_outbuf, audio_outbuf_size, samples);
|
||||
|
||||
if (c->coded_frame && c->coded_frame->pts != AV_NOPTS_VALUE)
|
||||
pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
|
||||
@ -284,7 +284,7 @@ static void open_video(AVFormatContext *oc, AVStream *st)
|
||||
}
|
||||
|
||||
/* open the codec */
|
||||
if (avcodec_open(c, codec) < 0) {
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ AVCodec ff_flv_encoder = {
|
||||
.id = CODEC_ID_FLV1,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
|
||||
|
@ -327,7 +327,7 @@ AVCodec ff_h261_encoder = {
|
||||
.id = CODEC_ID_H261,
|
||||
.priv_data_size = sizeof(H261Context),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.261"),
|
||||
|
@ -452,11 +452,10 @@ void ff_mjpeg_encode_mb(MpegEncContext *s, DCTELEM block[6][64])
|
||||
|
||||
// maximum over s->mjpeg_vsample[i]
|
||||
#define V_MAX 2
|
||||
static int amv_encode_picture(AVCodecContext *avctx,
|
||||
unsigned char *buf, int buf_size, void *data)
|
||||
{
|
||||
static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
AVFrame *pic, int *got_packet)
|
||||
|
||||
AVFrame* pic=data;
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
int i;
|
||||
|
||||
@ -469,7 +468,7 @@ static int amv_encode_picture(AVCodecContext *avctx,
|
||||
pic->data[i] += (pic->linesize[i] * (s->mjpeg_vsample[i] * (8 * s->mb_height -((s->height/V_MAX)&7)) - 1 ));
|
||||
pic->linesize[i] *= -1;
|
||||
}
|
||||
return ff_MPV_encode_picture(avctx,buf, buf_size, pic);
|
||||
return ff_MPV_encode_picture(avctx, pkt, pic, got_packet);
|
||||
}
|
||||
|
||||
AVCodec ff_mjpeg_encoder = {
|
||||
@ -478,7 +477,7 @@ AVCodec ff_mjpeg_encoder = {
|
||||
.id = CODEC_ID_MJPEG,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MJPEG (Motion JPEG)"),
|
||||
@ -490,7 +489,7 @@ AVCodec ff_amv_encoder = {
|
||||
.id = CODEC_ID_AMV,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = amv_encode_picture,
|
||||
.encode2 = amv_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUVJ420P, PIX_FMT_YUVJ422P, PIX_FMT_NONE},
|
||||
.long_name = NULL_IF_CONFIG_SMALL("AMV Video"),
|
||||
|
@ -966,7 +966,7 @@ AVCodec ff_mpeg1video_encoder = {
|
||||
.id = CODEC_ID_MPEG1VIDEO,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.supported_framerates= avpriv_frame_rate_tab+1,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
@ -981,7 +981,7 @@ AVCodec ff_mpeg2video_encoder = {
|
||||
.id = CODEC_ID_MPEG2VIDEO,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.supported_framerates= avpriv_frame_rate_tab+1,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_YUV422P, PIX_FMT_NONE},
|
||||
|
@ -1346,7 +1346,7 @@ AVCodec ff_mpeg4_encoder = {
|
||||
.id = CODEC_ID_MPEG4,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.capabilities= CODEC_CAP_DELAY | CODEC_CAP_SLICE_THREADS,
|
||||
|
@ -263,6 +263,14 @@ typedef struct MpegEncContext {
|
||||
* offsets used in asm. */
|
||||
|
||||
int64_t user_specified_pts;///< last non zero pts from AVFrame which was passed into avcodec_encode_video()
|
||||
/**
|
||||
* pts difference between the first and second input frame, used for
|
||||
* calculating dts of the first frame when there's a delay */
|
||||
int64_t dts_delta;
|
||||
/**
|
||||
* reordered pts to be used as dts for the next output frame when there's
|
||||
* a delay */
|
||||
int64_t reordered_pts;
|
||||
|
||||
/** bit output */
|
||||
PutBitContext pb;
|
||||
@ -702,7 +710,8 @@ int ff_MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx);
|
||||
void ff_MPV_frame_end(MpegEncContext *s);
|
||||
int ff_MPV_encode_init(AVCodecContext *avctx);
|
||||
int ff_MPV_encode_end(AVCodecContext *avctx);
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data);
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *frame, int *got_packet);
|
||||
void ff_MPV_common_init_mmx(MpegEncContext *s);
|
||||
void ff_MPV_common_init_axp(MpegEncContext *s);
|
||||
void ff_MPV_common_init_mmi(MpegEncContext *s);
|
||||
|
@ -897,7 +897,8 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
|
||||
AVFrame *pic = NULL;
|
||||
int64_t pts;
|
||||
int i;
|
||||
const int encoding_delay = s->max_b_frames;
|
||||
const int encoding_delay = s->max_b_frames ? s->max_b_frames :
|
||||
(s->low_delay ? 0 : 1);
|
||||
int direct = 1;
|
||||
|
||||
if (pic_arg) {
|
||||
@ -915,6 +916,9 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg)
|
||||
"last=%"PRId64"\n", pts, s->user_specified_pts);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!s->low_delay && pic_arg->display_picture_number == 1)
|
||||
s->dts_delta = time - last;
|
||||
}
|
||||
s->user_specified_pts = pts;
|
||||
} else {
|
||||
@ -1384,20 +1388,23 @@ no_output_pic:
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx,
|
||||
unsigned char *buf, int buf_size, void *data)
|
||||
int ff_MPV_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
|
||||
const AVFrame *pic_arg, int *got_packet)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
AVFrame *pic_arg = data;
|
||||
int i, stuffing_count;
|
||||
int i, stuffing_count, ret;
|
||||
int context_count = s->slice_context_count;
|
||||
|
||||
if (!pkt->data &&
|
||||
(ret = ff_alloc_packet(pkt, s->mb_width*s->mb_height*(MAX_MB_BYTES+100)+10000)) < 0)
|
||||
return ret;
|
||||
|
||||
for (i = 0; i < context_count; i++) {
|
||||
int start_y = s->thread_context[i]->start_mb_y;
|
||||
int end_y = s->thread_context[i]-> end_mb_y;
|
||||
int h = s->mb_height;
|
||||
uint8_t *start = buf + (size_t)(((int64_t) buf_size) * start_y / h);
|
||||
uint8_t *end = buf + (size_t)(((int64_t) buf_size) * end_y / h);
|
||||
uint8_t *start = pkt->data + (size_t)(((int64_t) pkt->size) * start_y / h);
|
||||
uint8_t *end = pkt->data + (size_t)(((int64_t) pkt->size) * end_y / h);
|
||||
|
||||
init_put_bits(&s->thread_context[i]->pb, start, end - start);
|
||||
}
|
||||
@ -1557,13 +1564,27 @@ vbv_retry:
|
||||
}
|
||||
s->total_bits += s->frame_bits;
|
||||
avctx->frame_bits = s->frame_bits;
|
||||
|
||||
pkt->pts = s->current_picture.f.pts;
|
||||
if (!s->low_delay) {
|
||||
if (!s->current_picture.f.coded_picture_number)
|
||||
pkt->dts = pkt->pts - s->dts_delta;
|
||||
else
|
||||
pkt->dts = s->reordered_pts;
|
||||
s->reordered_pts = s->input_picture[0]->f.pts;
|
||||
} else
|
||||
pkt->dts = pkt->pts;
|
||||
if (s->current_picture.f.key_frame)
|
||||
pkt->flags |= AV_PKT_FLAG_KEY;
|
||||
} else {
|
||||
assert((put_bits_ptr(&s->pb) == s->pb.buf));
|
||||
s->frame_bits = 0;
|
||||
}
|
||||
assert((s->frame_bits & 7) == 0);
|
||||
|
||||
return s->frame_bits / 8;
|
||||
pkt->size = s->frame_bits / 8;
|
||||
*got_packet = !!pkt->size;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline void dct_single_coeff_elimination(MpegEncContext *s,
|
||||
@ -2385,7 +2406,7 @@ static int encode_thread(AVCodecContext *c, void *arg){
|
||||
if(s->data_partitioning){
|
||||
if( s->pb2 .buf_end - s->pb2 .buf - (put_bits_count(&s-> pb2)>>3) < MAX_MB_BYTES
|
||||
|| s->tex_pb.buf_end - s->tex_pb.buf - (put_bits_count(&s->tex_pb )>>3) < MAX_MB_BYTES){
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
|
||||
av_log(s->avctx, AV_LOG_ERROR, "encoded partitioned frame too large\n");
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
@ -4084,7 +4105,7 @@ AVCodec ff_h263_encoder = {
|
||||
.id = CODEC_ID_H263,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("H.263 / H.263-1996"),
|
||||
@ -4111,7 +4132,7 @@ AVCodec ff_h263p_encoder = {
|
||||
.id = CODEC_ID_H263P,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.capabilities = CODEC_CAP_SLICE_THREADS,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
@ -4125,7 +4146,7 @@ AVCodec ff_msmpeg4v2_encoder = {
|
||||
.id = CODEC_ID_MSMPEG4V2,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
|
||||
@ -4137,7 +4158,7 @@ AVCodec ff_msmpeg4v3_encoder = {
|
||||
.id = CODEC_ID_MSMPEG4V3,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
|
||||
@ -4149,7 +4170,7 @@ AVCodec ff_wmv1_encoder = {
|
||||
.id = CODEC_ID_WMV1,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
|
||||
|
@ -62,7 +62,7 @@ AVCodec ff_rv10_encoder = {
|
||||
.id = CODEC_ID_RV10,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 1.0"),
|
||||
|
@ -63,7 +63,7 @@ AVCodec ff_rv20_encoder = {
|
||||
.id = CODEC_ID_RV20,
|
||||
.priv_data_size = sizeof(MpegEncContext),
|
||||
.init = ff_MPV_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("RealVideo 2.0"),
|
||||
|
@ -165,7 +165,7 @@ static int sunrast_decode_frame(AVCodecContext *avctx, void *data,
|
||||
if (buf_end - buf < 1)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
if ((value = *buf++) == 0x80) {
|
||||
if ((value = *buf++) == RLE_TRIGGER) {
|
||||
run = *buf++ + 1;
|
||||
if (run != 1)
|
||||
value = *buf++;
|
||||
|
@ -199,6 +199,7 @@ static int sunrast_encode_frame(AVCodecContext *avctx, AVPacket *avpkt,
|
||||
AV_WB32(&avpkt->data[16], s->length);
|
||||
|
||||
*got_packet_ptr = 1;
|
||||
avpkt->flags |= AV_PKT_FLAG_KEY;
|
||||
avpkt->size = bytestream2_tell_p(&s->p);
|
||||
return 0;
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ AVCodec ff_wmv2_encoder = {
|
||||
.id = CODEC_ID_WMV2,
|
||||
.priv_data_size = sizeof(Wmv2Context),
|
||||
.init = wmv2_encode_init,
|
||||
.encode = ff_MPV_encode_picture,
|
||||
.encode2 = ff_MPV_encode_picture,
|
||||
.close = ff_MPV_encode_end,
|
||||
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_YUV420P, PIX_FMT_NONE},
|
||||
.long_name= NULL_IF_CONFIG_SMALL("Windows Media Video 8"),
|
||||
|
@ -439,15 +439,14 @@ typedef struct AVInputFormat {
|
||||
|
||||
/**
|
||||
* Read the format header and initialize the AVFormatContext
|
||||
* structure. Return 0 if OK. 'ap' if non-NULL contains
|
||||
* additional parameters. Only used in raw format right
|
||||
* now. 'av_new_stream' should be called to create new streams.
|
||||
* structure. Return 0 if OK. Only used in raw format right
|
||||
* now. 'avformat_new_stream' should be called to create new streams.
|
||||
*/
|
||||
int (*read_header)(struct AVFormatContext *);
|
||||
|
||||
/**
|
||||
* Read one packet and put it in 'pkt'. pts and flags are also
|
||||
* set. 'av_new_stream' can be called only if the flag
|
||||
* set. 'avformat_new_stream' can be called only if the flag
|
||||
* AVFMTCTX_NOHEADER is used and only in the calling thread (not in a
|
||||
* background thread).
|
||||
* @return 0 on success, < 0 on error.
|
||||
|
Loading…
Reference in New Issue
Block a user