1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

pass AVPacket into av_write_frame()

fixes the random dts/pts during encoding
asf preroll fix
no more initial zero frames for b frame encoding
mpeg-es dts during demuxing fixed
.ffm timestamp scale fixed, ffm is still broken though

Originally committed as revision 3168 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Michael Niedermayer 2004-05-29 02:06:32 +00:00
parent a7b2871cd1
commit e928649b0b
33 changed files with 328 additions and 245 deletions

130
ffmpeg.c
View File

@ -230,9 +230,8 @@ typedef struct AVOutputStream {
int frame_number; int frame_number;
/* input pts and corresponding output pts /* input pts and corresponding output pts
for A/V sync */ for A/V sync */
double sync_ipts; double sync_ipts; /* dts from the AVPacket of the demuxer in second units */
double sync_ipts_offset; int64_t sync_opts; /* output frame counter, could be changed to some true timestamp */ //FIXME look at frame_number
int64_t sync_opts;
/* video only */ /* video only */
int video_resample; /* video_resample and video_crop are mutually exclusive */ int video_resample; /* video_resample and video_crop are mutually exclusive */
AVFrame pict_tmp; /* temporary image for resampling */ AVFrame pict_tmp; /* temporary image for resampling */
@ -447,12 +446,23 @@ static void do_audio_out(AVFormatContext *s,
while (fifo_read(&ost->fifo, audio_buf, frame_bytes, while (fifo_read(&ost->fifo, audio_buf, frame_bytes,
&ost->fifo.rptr) == 0) { &ost->fifo.rptr) == 0) {
AVPacket pkt;
av_init_packet(&pkt);
ret = avcodec_encode_audio(enc, audio_out, audio_out_size, ret = avcodec_encode_audio(enc, audio_out, audio_out_size,
(short *)audio_buf); (short *)audio_buf);
audio_size += ret; audio_size += ret;
av_write_frame(s, ost->index, audio_out, ret); pkt.stream_index= ost->index;
pkt.data= audio_out;
pkt.size= ret;
if(enc->coded_frame)
pkt.pts= enc->coded_frame->pts;
pkt.flags |= PKT_FLAG_KEY;
av_write_frame(s, &pkt);
} }
} else { } else {
AVPacket pkt;
av_init_packet(&pkt);
/* output a pcm frame */ /* output a pcm frame */
/* XXX: change encoding codec API to avoid this ? */ /* XXX: change encoding codec API to avoid this ? */
switch(enc->codec->id) { switch(enc->codec->id) {
@ -468,7 +478,13 @@ static void do_audio_out(AVFormatContext *s,
ret = avcodec_encode_audio(enc, audio_out, size_out, ret = avcodec_encode_audio(enc, audio_out, size_out,
(short *)buftmp); (short *)buftmp);
audio_size += ret; audio_size += ret;
av_write_frame(s, ost->index, audio_out, ret); pkt.stream_index= ost->index;
pkt.data= audio_out;
pkt.size= ret;
if(enc->coded_frame)
pkt.pts= enc->coded_frame->pts;
pkt.flags |= PKT_FLAG_KEY;
av_write_frame(s, &pkt);
} }
} }
@ -586,58 +602,17 @@ static void do_video_out(AVFormatContext *s,
*frame_size = 0; *frame_size = 0;
/* NOTE: the A/V sync is always done by considering the audio is
the master clock. It is suffisant for transcoding or playing,
but not for the general case */
if(sync_method){ if(sync_method){
if (audio_sync) {
/* compute the A-V delay and duplicate/remove frames if needed */
double adelta, vdelta, av_delay;
adelta = audio_sync->sync_ipts - ((double)audio_sync->sync_opts *
audio_sync->st->time_base.num / audio_sync->st->time_base.den);
vdelta = ost->sync_ipts - ((double)ost->sync_opts *
ost->st->time_base.num / ost->st->time_base.den);
av_delay = adelta - vdelta;
if (av_delay < -AV_DELAY_MAX)
nb_frames = 2;
else if (av_delay > AV_DELAY_MAX)
nb_frames = 0;
// printf("adelta=%f vdelta=%f delay=%f nb=%d (A)\n", adelta, vdelta, av_delay, nb_frames);
} else {
double vdelta; double vdelta;
vdelta = ost->sync_ipts * enc->frame_rate / enc->frame_rate_base - ost->sync_opts;
vdelta = (double)(ost->st->pts.val) * ost->st->time_base.num / ost->st->time_base.den - (ost->sync_ipts - ost->sync_ipts_offset); //FIXME set to 0.5 after we fix some dts/pts bugs like in avidec.c
if (vdelta < 100 && vdelta > -100 && ost->sync_ipts_offset) { if (vdelta < -1.1)
if (vdelta < -AV_DELAY_MAX)
nb_frames = 2;
else if (vdelta > AV_DELAY_MAX)
nb_frames = 0; nb_frames = 0;
} else { else if (vdelta > 1.1)
ost->sync_ipts_offset -= vdelta; nb_frames = 2;
if (!ost->sync_ipts_offset) //printf("vdelta:%f, ost->sync_opts:%lld, ost->sync_ipts:%f nb_frames:%d\n", vdelta, ost->sync_opts, ost->sync_ipts, nb_frames);
ost->sync_ipts_offset = 0.000001; /* one microsecond */
} }
// printf("delay=%f nb=%d (V)\n",vdelta, nb_frames); ost->sync_opts+= nb_frames;
}
}
#if defined(AVSYNC_DEBUG)
{
static char *action[] = { "drop frame", "copy frame", "dup frame" };
if (audio_sync && verbose >=0) {
fprintf(stderr, "Input APTS %12.6f, output APTS %12.6f, ",
(double) audio_sync->sync_ipts,
(double) audio_sync->st->pts.val * st->time_base.num / st->time_base.den);
fprintf(stderr, "Input VPTS %12.6f, output VPTS %12.6f: %s\n",
(double) ost->sync_ipts,
(double) ost->st->pts.val * st->time_base.num / st->time_base.den,
action[nb_frames]);
}
}
#endif
if (nb_frames <= 0) if (nb_frames <= 0)
return; return;
@ -779,14 +754,24 @@ static void do_video_out(AVFormatContext *s,
/* duplicates frame if needed */ /* duplicates frame if needed */
/* XXX: pb because no interleaving */ /* XXX: pb because no interleaving */
for(i=0;i<nb_frames;i++) { for(i=0;i<nb_frames;i++) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.stream_index= ost->index;
if (s->oformat->flags & AVFMT_RAWPICTURE) { if (s->oformat->flags & AVFMT_RAWPICTURE) {
/* raw pictures are written as AVPicture structure to /* raw pictures are written as AVPicture structure to
avoid any copies. We support temorarily the older avoid any copies. We support temorarily the older
method. */ method. */
AVFrame* old_frame = enc->coded_frame; AVFrame* old_frame = enc->coded_frame;
enc->coded_frame = dec->coded_frame; enc->coded_frame = dec->coded_frame; //FIXME/XXX remove this hack
av_write_frame(s, ost->index, pkt.data= (uint8_t *)final_picture;
(uint8_t *)final_picture, sizeof(AVPicture)); pkt.size= sizeof(AVPicture);
if(dec->coded_frame)
pkt.pts= dec->coded_frame->pts;
if(dec->coded_frame && dec->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
av_write_frame(s, &pkt);
enc->coded_frame = old_frame; enc->coded_frame = old_frame;
} else { } else {
AVFrame big_picture; AVFrame big_picture;
@ -815,7 +800,14 @@ static void do_video_out(AVFormatContext *s,
video_buffer, VIDEO_BUFFER_SIZE, video_buffer, VIDEO_BUFFER_SIZE,
&big_picture); &big_picture);
//enc->frame_number = enc->real_pict_num; //enc->frame_number = enc->real_pict_num;
av_write_frame(s, ost->index, video_buffer, ret); if(ret){
pkt.data= video_buffer;
pkt.size= ret;
if(enc->coded_frame)
pkt.pts= enc->coded_frame->pts;
if(enc->coded_frame && enc->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
av_write_frame(s, &pkt);
*frame_size = ret; *frame_size = ret;
//fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d", //fprintf(stderr,"\nFrame: %3d %3d size: %5d type: %d",
// enc->frame_number-1, enc->real_pict_num, ret, // enc->frame_number-1, enc->real_pict_num, ret,
@ -825,6 +817,7 @@ static void do_video_out(AVFormatContext *s,
fprintf(ost->logfile, "%s", enc->stats_out); fprintf(ost->logfile, "%s", enc->stats_out);
} }
} }
}
ost->frame_number++; ost->frame_number++;
} }
the_end: the_end:
@ -872,7 +865,7 @@ static void do_video_stats(AVFormatContext *os, AVOutputStream *ost,
fprintf(fvstats,"f_size= %6d ", frame_size); fprintf(fvstats,"f_size= %6d ", frame_size);
/* compute pts value */ /* compute pts value */
ti1 = (double)ost->st->pts.val * ost->st->time_base.num / ost->st->time_base.den; ti1 = (double)ost->sync_opts *enc->frame_rate_base / enc->frame_rate;
if (ti1 < 0.01) if (ti1 < 0.01)
ti1 = 0.01; ti1 = 0.01;
@ -1003,8 +996,8 @@ static int output_packet(AVInputStream *ist, int ist_index,
short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2]; short samples[AVCODEC_MAX_AUDIO_FRAME_SIZE / 2];
void *buffer_to_free; void *buffer_to_free;
if (pkt && pkt->pts != AV_NOPTS_VALUE) { if (pkt && pkt->pts != AV_NOPTS_VALUE) { //FIXME seems redundant, as libavformat does this too
ist->next_pts = ist->pts = pkt->pts; ist->next_pts = ist->pts = pkt->dts;
} else { } else {
ist->pts = ist->next_pts; ist->pts = ist->next_pts;
} }
@ -1122,10 +1115,6 @@ static int output_packet(AVInputStream *ist, int ist_index,
#endif #endif
/* set the input output pts pairs */ /* set the input output pts pairs */
ost->sync_ipts = (double)ist->pts / AV_TIME_BASE; ost->sync_ipts = (double)ist->pts / AV_TIME_BASE;
/* XXX: take into account the various fifos,
in particular for audio */
ost->sync_opts = ost->st->pts.val;
//printf("ipts=%lld sync_ipts=%f sync_opts=%lld pts.val=%lld pkt->pts=%lld\n", ist->pts, ost->sync_ipts, ost->sync_opts, ost->st->pts.val, pkt->pts);
if (ost->encoding_needed) { if (ost->encoding_needed) {
switch(ost->st->codec.codec_type) { switch(ost->st->codec.codec_type) {
@ -1157,7 +1146,9 @@ static int output_packet(AVInputStream *ist, int ist_index,
av_abort(); av_abort();
} }
} else { } else {
AVFrame avframe; AVFrame avframe; //FIXME/XXX remove this
AVPacket opkt;
av_init_packet(&opkt);
/* no reencoding needed : output the packet directly */ /* no reencoding needed : output the packet directly */
/* force the input stream PTS */ /* force the input stream PTS */
@ -1165,14 +1156,19 @@ static int output_packet(AVInputStream *ist, int ist_index,
avcodec_get_frame_defaults(&avframe); avcodec_get_frame_defaults(&avframe);
ost->st->codec.coded_frame= &avframe; ost->st->codec.coded_frame= &avframe;
avframe.key_frame = pkt->flags & PKT_FLAG_KEY; avframe.key_frame = pkt->flags & PKT_FLAG_KEY;
ost->st->pts.val= av_rescale(ist->pts, ost->st->time_base.den, ost->st->time_base.num*AV_TIME_BASE);
if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO) if(ost->st->codec.codec_type == CODEC_TYPE_AUDIO)
audio_size += data_size; audio_size += data_size;
else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO) else if (ost->st->codec.codec_type == CODEC_TYPE_VIDEO)
video_size += data_size; video_size += data_size;
av_write_frame(os, ost->index, data_buf, data_size); opkt.stream_index= ost->index;
opkt.data= data_buf;
opkt.size= data_size;
opkt.pts= ist->pts; //FIXME dts vs. pts
opkt.flags= pkt->flags;
av_write_frame(os, &opkt);
ost->st->codec.frame_number++; ost->st->codec.frame_number++;
ost->frame_number++; ost->frame_number++;
} }

View File

@ -2163,7 +2163,7 @@ static int http_prepare_data(HTTPContext *c)
/* XXX: potential leak */ /* XXX: potential leak */
return -1; return -1;
} }
if (av_write_frame(ctx, pkt.stream_index, pkt.data, pkt.size)) { if (av_write_frame(ctx, &pkt)) {
c->state = HTTPSTATE_SEND_DATA_TRAILER; c->state = HTTPSTATE_SEND_DATA_TRAILER;
} }

View File

@ -51,10 +51,9 @@ static int amr_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int amr_write_packet(AVFormatContext *s, int stream_index_ptr, static int amr_write_packet(AVFormatContext *s, AVPacket *pkt)
uint8_t *buf, int size, int force_pts)
{ {
put_buffer(&s->pb, buf, size); put_buffer(&s->pb, pkt->data, pkt->size);
put_flush_packet(&s->pb); put_flush_packet(&s->pb);
return 0; return 0;
} }

View File

@ -310,7 +310,7 @@ static int asf_write_header1(AVFormatContext *s, int64_t file_size, int64_t data
put_le64(pb, asf->nb_packets); /* number of packets */ put_le64(pb, asf->nb_packets); /* number of packets */
put_le64(pb, asf->duration); /* end time stamp (in 100ns units) */ put_le64(pb, asf->duration); /* end time stamp (in 100ns units) */
put_le64(pb, asf->duration); /* duration (in 100ns units) */ put_le64(pb, asf->duration); /* duration (in 100ns units) */
put_le32(pb, 0); /* start time stamp */ put_le32(pb, preroll_time); /* start time stamp */
put_le32(pb, 0); /* ??? */ put_le32(pb, 0); /* ??? */
put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */ put_le32(pb, asf->is_streamed ? 1 : 0); /* ??? */
put_le32(pb, asf->packet_size); /* packet size */ put_le32(pb, asf->packet_size); /* packet size */
@ -686,17 +686,17 @@ static void put_frame(
stream->seq++; stream->seq++;
} }
static int asf_write_packet(AVFormatContext *s, int stream_index, static int asf_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t timestamp)
{ {
ASFContext *asf = s->priv_data; ASFContext *asf = s->priv_data;
ASFStream *stream; ASFStream *stream;
int64_t duration; int64_t duration;
AVCodecContext *codec; AVCodecContext *codec;
codec = &s->streams[stream_index]->codec; codec = &s->streams[pkt->stream_index]->codec;
stream = &asf->streams[stream_index]; stream = &asf->streams[pkt->stream_index];
//XXX /FIXME use duration from AVPacket
if (codec->codec_type == CODEC_TYPE_AUDIO) { if (codec->codec_type == CODEC_TYPE_AUDIO) {
duration = (codec->frame_number * codec->frame_size * int64_t_C(10000000)) / duration = (codec->frame_number * codec->frame_size * int64_t_C(10000000)) /
codec->sample_rate; codec->sample_rate;
@ -706,7 +706,7 @@ static int asf_write_packet(AVFormatContext *s, int stream_index,
if (duration > asf->duration) if (duration > asf->duration)
asf->duration = duration; asf->duration = duration;
put_frame(s, stream, timestamp, buf, size); put_frame(s, stream, pkt->pts, pkt->data, pkt->size);
return 0; return 0;
} }

View File

@ -72,11 +72,10 @@ static int au_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int au_write_packet(AVFormatContext *s, int stream_index_ptr, static int au_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
put_buffer(pb, buf, size); put_buffer(pb, pkt->data, pkt->size);
return 0; return 0;
} }

View File

@ -164,11 +164,12 @@ static int audio_write_header(AVFormatContext *s1)
} }
} }
static int audio_write_packet(AVFormatContext *s1, int stream_index, static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
AudioData *s = s1->priv_data; AudioData *s = s1->priv_data;
int len, ret; int len, ret;
int size= pkt->size;
uint8_t *buf= pkt->data;
while (size > 0) { while (size > 0) {
len = AUDIO_BLOCK_SIZE - s->buffer_ptr; len = AUDIO_BLOCK_SIZE - s->buffer_ptr;

View File

@ -5,7 +5,7 @@
extern "C" { extern "C" {
#endif #endif
#define LIBAVFORMAT_BUILD 4614 #define LIBAVFORMAT_BUILD 4615
#define LIBAVFORMAT_VERSION_INT FFMPEG_VERSION_INT #define LIBAVFORMAT_VERSION_INT FFMPEG_VERSION_INT
#define LIBAVFORMAT_VERSION FFMPEG_VERSION #define LIBAVFORMAT_VERSION FFMPEG_VERSION
@ -131,9 +131,7 @@ typedef struct AVOutputFormat {
enum CodecID audio_codec; /* default audio codec */ enum CodecID audio_codec; /* default audio codec */
enum CodecID video_codec; /* default video codec */ enum CodecID video_codec; /* default video codec */
int (*write_header)(struct AVFormatContext *); int (*write_header)(struct AVFormatContext *);
int (*write_packet)(struct AVFormatContext *, int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
int stream_index,
const uint8_t *buf, int size, int64_t pts);
int (*write_trailer)(struct AVFormatContext *); int (*write_trailer)(struct AVFormatContext *);
/* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */ /* can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER */
int flags; int flags;
@ -558,8 +556,8 @@ int av_seek_frame_binary(AVFormatContext *s, int stream_index, int64_t target_ts
/* media file output */ /* media file output */
int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap); int av_set_parameters(AVFormatContext *s, AVFormatParameters *ap);
int av_write_header(AVFormatContext *s); int av_write_header(AVFormatContext *s);
int av_write_frame(AVFormatContext *s, int stream_index, const uint8_t *buf, int av_write_frame(AVFormatContext *s, AVPacket *pkt);
int size);
int av_write_trailer(AVFormatContext *s); int av_write_trailer(AVFormatContext *s);
void dump_format(AVFormatContext *ic, void dump_format(AVFormatContext *ic,

View File

@ -607,14 +607,15 @@ static int avi_write_idx1(AVFormatContext *s)
return 0; return 0;
} }
static int avi_write_packet(AVFormatContext *s, int stream_index, static int avi_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
AVIContext *avi = s->priv_data; AVIContext *avi = s->priv_data;
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
unsigned char tag[5]; unsigned char tag[5];
unsigned int flags; unsigned int flags=0;
AVCodecContext *enc; AVCodecContext *enc;
const int stream_index= pkt->stream_index;
int size= pkt->size;
if (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE) { if (url_ftell(pb) - avi->riff_start > AVI_MAX_RIFF_SIZE) {
avi_write_ix(s); avi_write_ix(s);
@ -629,11 +630,11 @@ static int avi_write_packet(AVFormatContext *s, int stream_index,
enc = &s->streams[stream_index]->codec; enc = &s->streams[stream_index]->codec;
avi_stream2fourcc(&tag[0], stream_index, enc->codec_type); avi_stream2fourcc(&tag[0], stream_index, enc->codec_type);
if(pkt->flags&PKT_FLAG_KEY)
flags = 0x10;
if (enc->codec_type == CODEC_TYPE_AUDIO) { if (enc->codec_type == CODEC_TYPE_AUDIO) {
avi->audio_strm_length[stream_index] += size; avi->audio_strm_length[stream_index] += size;
flags = 0x10; }
} else
flags = enc->coded_frame->key_frame ? 0x10 : 0x00;
if (!url_is_streamed(&s->pb)) { if (!url_is_streamed(&s->pb)) {
AVIIndex* idx = &avi->indexes[stream_index]; AVIIndex* idx = &avi->indexes[stream_index];
@ -657,7 +658,7 @@ static int avi_write_packet(AVFormatContext *s, int stream_index,
put_buffer(pb, tag, 4); put_buffer(pb, tag, 4);
put_le32(pb, size); put_le32(pb, size);
put_buffer(pb, buf, size); put_buffer(pb, pkt->data, size);
if (size & 1) if (size & 1)
put_byte(pb, 0); put_byte(pb, 0);

View File

@ -71,12 +71,10 @@ static int crc_write_header(struct AVFormatContext *s)
return 0; return 0;
} }
static int crc_write_packet(struct AVFormatContext *s, static int crc_write_packet(struct AVFormatContext *s, AVPacket *pkt)
int stream_index,
const uint8_t *buf, int size, int64_t pts)
{ {
CRCState *crc = s->priv_data; CRCState *crc = s->priv_data;
crc->crcval = update_adler32(crc->crcval, buf, size); crc->crcval = update_adler32(crc->crcval, pkt->data, pkt->size);
return 0; return 0;
} }

View File

@ -888,15 +888,13 @@ static int dv_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int dv_write_packet(struct AVFormatContext *s, static int dv_write_packet(struct AVFormatContext *s, AVPacket *pkt)
int stream_index,
const uint8_t *buf, int size, int64_t pts)
{ {
uint8_t* frame; uint8_t* frame;
int fsize; int fsize;
fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[stream_index], fsize = dv_assemble_frame((DVMuxContext *)s->priv_data, s->streams[pkt->stream_index],
buf, size, &frame); pkt->data, pkt->size, &frame);
if (fsize > 0) { if (fsize > 0) {
put_buffer(&s->pb, frame, fsize); put_buffer(&s->pb, frame, fsize);
put_flush_packet(&s->pb); put_flush_packet(&s->pb);

View File

@ -222,15 +222,16 @@ static int ffm_write_header(AVFormatContext *s)
return -1; return -1;
} }
static int ffm_write_packet(AVFormatContext *s, int stream_index, static int ffm_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t force_pts)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[pkt->stream_index];
FFMStream *fst = st->priv_data; FFMStream *fst = st->priv_data;
int64_t pts; int64_t pts;
uint8_t header[FRAME_HEADER_SIZE]; uint8_t header[FRAME_HEADER_SIZE];
int duration; int duration;
int size= pkt->size;
//XXX/FIXME use duration from pkt
if (st->codec.codec_type == CODEC_TYPE_AUDIO) { if (st->codec.codec_type == CODEC_TYPE_AUDIO) {
duration = ((float)st->codec.frame_size / st->codec.sample_rate * 1000000.0); duration = ((float)st->codec.frame_size / st->codec.sample_rate * 1000000.0);
} else { } else {
@ -239,9 +240,9 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index,
pts = fst->pts; pts = fst->pts;
/* packet size & key_frame */ /* packet size & key_frame */
header[0] = stream_index; header[0] = pkt->stream_index;
header[1] = 0; header[1] = 0;
if (st->codec.coded_frame->key_frame) //if st->codec.coded_frame==NULL then there is a bug somewhere else if (pkt->flags & PKT_FLAG_KEY)
header[1] |= FLAG_KEY_FRAME; header[1] |= FLAG_KEY_FRAME;
header[2] = (size >> 16) & 0xff; header[2] = (size >> 16) & 0xff;
header[3] = (size >> 8) & 0xff; header[3] = (size >> 8) & 0xff;
@ -250,7 +251,7 @@ static int ffm_write_packet(AVFormatContext *s, int stream_index,
header[6] = (duration >> 8) & 0xff; header[6] = (duration >> 8) & 0xff;
header[7] = duration & 0xff; header[7] = duration & 0xff;
ffm_write_data(s, header, FRAME_HEADER_SIZE, pts, 1); ffm_write_data(s, header, FRAME_HEADER_SIZE, pts, 1);
ffm_write_data(s, buf, size, pts, 0); ffm_write_data(s, pkt->data, size, pts, 0);
fst->pts += duration; fst->pts += duration;
return 0; return 0;
@ -467,6 +468,9 @@ static int ffm_read_header(AVFormatContext *s, AVFormatParameters *ap)
fst = av_mallocz(sizeof(FFMStream)); fst = av_mallocz(sizeof(FFMStream));
if (!fst) if (!fst)
goto fail; goto fail;
av_set_pts_info(st, 64, 1, 1000000);
st->priv_data = fst; st->priv_data = fst;
codec = &st->codec; codec = &st->codec;

View File

@ -239,18 +239,19 @@ static int flv_write_trailer(AVFormatContext *s)
return 0; return 0;
} }
static int flv_write_packet(AVFormatContext *s, int stream_index, static int flv_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t timestamp)
{ {
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
AVCodecContext *enc = &s->streams[stream_index]->codec; AVCodecContext *enc = &s->streams[pkt->stream_index]->codec;
FLVContext *flv = s->priv_data; FLVContext *flv = s->priv_data;
FLVFrame *frame = av_malloc(sizeof(FLVFrame)); FLVFrame *frame = av_malloc(sizeof(FLVFrame));
int size= pkt->size;
uint8_t *buf= pkt->data;
frame->next = 0; frame->next = 0;
frame->size = size; frame->size = size;
frame->data = av_malloc(size); frame->data = av_malloc(size);
frame->timestamp = timestamp; frame->timestamp = pkt->pts;
frame->reserved= flv->reserved; frame->reserved= flv->reserved;
memcpy(frame->data,buf,size); memcpy(frame->data,buf,size);
@ -259,7 +260,7 @@ static int flv_write_packet(AVFormatContext *s, int stream_index,
if (enc->codec_type == CODEC_TYPE_VIDEO) { if (enc->codec_type == CODEC_TYPE_VIDEO) {
frame->type = 9; frame->type = 9;
frame->flags = 2; // choose h263 frame->flags = 2; // choose h263
frame->flags |= enc->coded_frame->key_frame ? 0x10 : 0x20; // add keyframe indicator frame->flags |= pkt->flags & PKT_FLAG_KEY ? 0x10 : 0x20; // add keyframe indicator
//frame->timestamp = ( ( flv->frameCount * (int64_t)FRAME_RATE_BASE * (int64_t)1000 ) / (int64_t)enc->frame_rate ); //frame->timestamp = ( ( flv->frameCount * (int64_t)FRAME_RATE_BASE * (int64_t)1000 ) / (int64_t)enc->frame_rate );
//printf("%08x %f %f\n",frame->timestamp,(double)enc->frame_rate/(double)FRAME_RATE_BASE,1000*(double)FRAME_RATE_BASE/(double)enc->frame_rate); //printf("%08x %f %f\n",frame->timestamp,(double)enc->frame_rate/(double)FRAME_RATE_BASE,1000*(double)FRAME_RATE_BASE/(double)enc->frame_rate);
flv->hasVideo = 1; flv->hasVideo = 1;
@ -306,7 +307,7 @@ static int flv_write_packet(AVFormatContext *s, int stream_index,
assert(size); assert(size);
if ( flv->initDelay == -1 ) { if ( flv->initDelay == -1 ) {
flv->initDelay = timestamp; flv->initDelay = pkt->pts;
} }
frame->type = 8; frame->type = 8;

View File

@ -364,14 +364,13 @@ static int gif_write_video(AVFormatContext *s,
return 0; return 0;
} }
static int gif_write_packet(AVFormatContext *s, int stream_index, static int gif_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
AVCodecContext *codec = &s->streams[stream_index]->codec; AVCodecContext *codec = &s->streams[pkt->stream_index]->codec;
if (codec->codec_type == CODEC_TYPE_AUDIO) if (codec->codec_type == CODEC_TYPE_AUDIO)
return 0; /* just ignore audio */ return 0; /* just ignore audio */
else else
return gif_write_video(s, codec, buf, size); return gif_write_video(s, codec, pkt->data, pkt->size);
} }
static int gif_write_trailer(AVFormatContext *s) static int gif_write_trailer(AVFormatContext *s)

View File

@ -300,11 +300,10 @@ static int img_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int img_write_packet(AVFormatContext *s, int stream_index, static int img_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
VideoData *img = s->priv_data; VideoData *img = s->priv_data;
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[pkt->stream_index];
ByteIOContext pb1, *pb; ByteIOContext pb1, *pb;
AVPicture *picture; AVPicture *picture;
int width, height, ret; int width, height, ret;
@ -314,7 +313,7 @@ static int img_write_packet(AVFormatContext *s, int stream_index,
width = st->codec.width; width = st->codec.width;
height = st->codec.height; height = st->codec.height;
picture = (AVPicture *)buf; picture = (AVPicture *)pkt->data;
if (!img->is_pipe) { if (!img->is_pipe) {
if (get_frame_filename(filename, sizeof(filename), if (get_frame_filename(filename, sizeof(filename),

View File

@ -950,15 +950,15 @@ static int mov_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int mov_write_packet(AVFormatContext *s, int stream_index, static int mov_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
MOVContext *mov = s->priv_data; MOVContext *mov = s->priv_data;
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
AVCodecContext *enc = &s->streams[stream_index]->codec; AVCodecContext *enc = &s->streams[pkt->stream_index]->codec;
MOVTrack* trk = &mov->tracks[stream_index]; MOVTrack* trk = &mov->tracks[pkt->stream_index];
int cl, id; int cl, id;
unsigned int samplesInChunk = 0; unsigned int samplesInChunk = 0;
int size= pkt->size;
if (url_is_streamed(&s->pb)) return 0; /* Can't handle that */ if (url_is_streamed(&s->pb)) return 0; /* Can't handle that */
if (!size) return 0; /* Discard 0 sized packets */ if (!size) return 0; /* Discard 0 sized packets */
@ -974,7 +974,7 @@ static int mov_write_packet(AVFormatContext *s, int stream_index,
int len = 0; int len = 0;
while (len < size && samplesInChunk < 100) { while (len < size && samplesInChunk < 100) {
len += packed_size[(buf[len] >> 3) & 0x0F]; len += packed_size[(pkt->data[len] >> 3) & 0x0F];
samplesInChunk++; samplesInChunk++;
} }
} }
@ -1021,8 +1021,8 @@ static int mov_write_packet(AVFormatContext *s, int stream_index,
trk->cluster[cl][id].size = size; trk->cluster[cl][id].size = size;
trk->cluster[cl][id].entries = samplesInChunk; trk->cluster[cl][id].entries = samplesInChunk;
if(enc->codec_type == CODEC_TYPE_VIDEO) { if(enc->codec_type == CODEC_TYPE_VIDEO) {
trk->cluster[cl][id].key_frame = enc->coded_frame->key_frame; trk->cluster[cl][id].key_frame = !!(pkt->flags & PKT_FLAG_KEY);
if(enc->coded_frame->pict_type == FF_I_TYPE) if(trk->cluster[cl][id].key_frame)
trk->hasKeyframes = 1; trk->hasKeyframes = 1;
} }
trk->enc = enc; trk->enc = enc;
@ -1030,7 +1030,7 @@ static int mov_write_packet(AVFormatContext *s, int stream_index,
trk->sampleCount += samplesInChunk; trk->sampleCount += samplesInChunk;
trk->mdat_size += size; trk->mdat_size += size;
put_buffer(pb, buf, size); put_buffer(pb, pkt->data, size);
put_flush_packet(pb); put_flush_packet(pb);
return 0; return 0;

View File

@ -324,10 +324,9 @@ static int mp3_write_header(struct AVFormatContext *s)
return 0; return 0;
} }
static int mp3_write_packet(struct AVFormatContext *s, int stream_index, static int mp3_write_packet(struct AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
put_buffer(&s->pb, buf, size); put_buffer(&s->pb, pkt->data, pkt->size);
put_flush_packet(&s->pb); put_flush_packet(&s->pb);
return 0; return 0;
} }

View File

@ -912,17 +912,22 @@ static int64_t update_scr(AVFormatContext *ctx,int stream_index,int64_t pts)
} }
static int mpeg_mux_write_packet(AVFormatContext *ctx, int stream_index, static int mpeg_mux_write_packet(AVFormatContext *ctx, AVPacket *pkt)
const uint8_t *buf, int size,
int64_t timestamp)
{ {
MpegMuxContext *s = ctx->priv_data; MpegMuxContext *s = ctx->priv_data;
int stream_index= pkt->stream_index;
int size= pkt->size;
uint8_t *buf= pkt->data;
AVStream *st = ctx->streams[stream_index]; AVStream *st = ctx->streams[stream_index];
StreamInfo *stream = st->priv_data; StreamInfo *stream = st->priv_data;
int64_t pts, dts, new_start_pts, new_start_dts; int64_t pts, dts, new_start_pts, new_start_dts;
int len, avail_size; int len, avail_size;
compute_pts_dts(st, &pts, &dts, timestamp); //XXX/FIXME this is and always was broken
// compute_pts_dts(st, &pts, &dts, pkt->pts);
pts= pkt->pts;
dts= pkt->dts;
if(s->is_svcd) { if(s->is_svcd) {
/* offset pts and dts slightly into the future to be able /* offset pts and dts slightly into the future to be able

View File

@ -549,10 +549,11 @@ static void mpegts_write_pes(AVFormatContext *s, AVStream *st,
put_flush_packet(&s->pb); put_flush_packet(&s->pb);
} }
static int mpegts_write_packet(AVFormatContext *s, int stream_index, static int mpegts_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts1)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[pkt->stream_index];
int size= pkt->size;
uint8_t *buf= pkt->data;
MpegTSWriteStream *ts_st = st->priv_data; MpegTSWriteStream *ts_st = st->priv_data;
int len; int len;
@ -565,7 +566,7 @@ static int mpegts_write_packet(AVFormatContext *s, int stream_index,
size -= len; size -= len;
ts_st->payload_index += len; ts_st->payload_index += len;
if (ts_st->payload_pts == AV_NOPTS_VALUE) if (ts_st->payload_pts == AV_NOPTS_VALUE)
ts_st->payload_pts = pts1; ts_st->payload_pts = pkt->pts;
if (ts_st->payload_index >= DEFAULT_PES_PAYLOAD_SIZE) { if (ts_st->payload_index >= DEFAULT_PES_PAYLOAD_SIZE) {
mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index, mpegts_write_pes(s, st, ts_st->payload, ts_st->payload_index,
ts_st->payload_pts); ts_st->payload_pts);

View File

@ -33,14 +33,13 @@ static int mpjpeg_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int mpjpeg_write_packet(AVFormatContext *s, int stream_index, static int mpjpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
uint8_t buf1[256]; uint8_t buf1[256];
snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\n\n"); snprintf(buf1, sizeof(buf1), "Content-type: image/jpeg\n\n");
put_buffer(&s->pb, buf1, strlen(buf1)); put_buffer(&s->pb, buf1, strlen(buf1));
put_buffer(&s->pb, buf, size); put_buffer(&s->pb, pkt->data, pkt->size);
snprintf(buf1, sizeof(buf1), "\n--%s\n", BOUNDARY_TAG); snprintf(buf1, sizeof(buf1), "\n--%s\n", BOUNDARY_TAG);
put_buffer(&s->pb, buf1, strlen(buf1)); put_buffer(&s->pb, buf1, strlen(buf1));
@ -75,10 +74,9 @@ static int single_jpeg_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int single_jpeg_write_packet(AVFormatContext *s, int stream_index, static int single_jpeg_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
put_buffer(&s->pb, buf, size); put_buffer(&s->pb, pkt->data, pkt->size);
put_flush_packet(&s->pb); put_flush_packet(&s->pb);
return 1; /* no more data can be sent */ return 1; /* no more data can be sent */
} }

View File

@ -691,25 +691,22 @@ static int64_t lsb2full(StreamContext *stream, int64_t lsb){
return ((lsb - delta)&mask) + delta; return ((lsb - delta)&mask) + delta;
} }
static int nut_write_packet(AVFormatContext *s, int stream_index, static int nut_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
NUTContext *nut = s->priv_data; NUTContext *nut = s->priv_data;
StreamContext *stream= &nut->stream[stream_index]; StreamContext *stream= &nut->stream[pkt->stream_index];
ByteIOContext *bc = &s->pb; ByteIOContext *bc = &s->pb;
int key_frame = 0, full_pts=0; int key_frame = 0, full_pts=0;
AVCodecContext *enc; AVCodecContext *enc;
int64_t coded_pts; int64_t coded_pts;
int frame_type, best_length, frame_code, flags, i, size_mul, size_lsb, time_delta; int frame_type, best_length, frame_code, flags, i, size_mul, size_lsb, time_delta;
const int64_t frame_start= url_ftell(bc); const int64_t frame_start= url_ftell(bc);
int64_t pts= pkt->pts;
if (stream_index > s->nb_streams) int size= pkt->size;
return 1; int stream_index= pkt->stream_index;
enc = &s->streams[stream_index]->codec; enc = &s->streams[stream_index]->codec;
key_frame = enc->coded_frame->key_frame; key_frame = !!(pkt->flags & PKT_FLAG_KEY);
if(enc->coded_frame->pts != AV_NOPTS_VALUE)
pts= av_rescale(enc->coded_frame->pts, stream->rate_num, stream->rate_den*(int64_t)AV_TIME_BASE); //FIXME XXX HACK
frame_type=0; frame_type=0;
if(frame_start + size + 20 - FFMAX(nut->packet_start[1], nut->packet_start[2]) > MAX_DISTANCE) if(frame_start + size + 20 - FFMAX(nut->packet_start[1], nut->packet_start[2]) > MAX_DISTANCE)
@ -808,7 +805,7 @@ static int nut_write_packet(AVFormatContext *s, int stream_index,
assert(frame_type > 1); assert(frame_type > 1);
} }
put_buffer(bc, buf, size); put_buffer(bc, pkt->data, size);
update(nut, stream_index, frame_start, frame_type, frame_code, key_frame, size, pts); update(nut, stream_index, frame_start, frame_type, frame_code, key_frame, size, pts);

View File

@ -62,16 +62,15 @@ static int ogg_write_header(AVFormatContext *avfcontext)
return 0 ; return 0 ;
} }
static int ogg_write_packet(AVFormatContext *avfcontext, static int ogg_write_packet(AVFormatContext *avfcontext, AVPacket *pkt)
int stream_index,
const uint8_t *buf, int size, int64_t pts)
{ {
OggContext *context = avfcontext->priv_data ; OggContext *context = avfcontext->priv_data ;
AVCodecContext *avctx= &avfcontext->streams[stream_index]->codec; AVCodecContext *avctx= &avfcontext->streams[pkt->stream_index]->codec;
ogg_packet *op= &context->op; ogg_packet *op= &context->op;
ogg_page og ; ogg_page og ;
int64_t pts;
pts= av_rescale(pts, avctx->sample_rate, AV_TIME_BASE); pts= av_rescale(pkt->pts, avctx->sample_rate, AV_TIME_BASE);
// av_log(avfcontext, AV_LOG_DEBUG, "M%d\n", size); // av_log(avfcontext, AV_LOG_DEBUG, "M%d\n", size);
@ -86,8 +85,8 @@ static int ogg_write_packet(AVFormatContext *avfcontext,
context->header_handled = 1 ; context->header_handled = 1 ;
} }
op->packet = (uint8_t*) buf; op->packet = (uint8_t*) pkt->data;
op->bytes = size; op->bytes = pkt->size;
op->b_o_s = op->packetno == 0; op->b_o_s = op->packetno == 0;
op->granulepos= pts; op->granulepos= pts;

View File

@ -25,10 +25,9 @@ static int raw_write_header(struct AVFormatContext *s)
return 0; return 0;
} }
static int raw_write_packet(struct AVFormatContext *s, int stream_index, static int raw_write_packet(struct AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
put_buffer(&s->pb, buf, size); put_buffer(&s->pb, pkt->data, pkt->size);
put_flush_packet(&s->pb); put_flush_packet(&s->pb);
return 0; return 0;
} }
@ -551,9 +550,7 @@ AVOutputFormat rawvideo_oformat = {
#endif //CONFIG_ENCODERS #endif //CONFIG_ENCODERS
#ifdef CONFIG_ENCODERS #ifdef CONFIG_ENCODERS
static int null_write_packet(struct AVFormatContext *s, static int null_write_packet(struct AVFormatContext *s, AVPacket *pkt)
int stream_index,
const uint8_t *buf, int size, int64_t pts)
{ {
return 0; return 0;
} }

View File

@ -389,14 +389,13 @@ static int rm_write_video(AVFormatContext *s, const uint8_t *buf, int size)
return 0; return 0;
} }
static int rm_write_packet(AVFormatContext *s, int stream_index, static int rm_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
if (s->streams[stream_index]->codec.codec_type == if (s->streams[pkt->stream_index]->codec.codec_type ==
CODEC_TYPE_AUDIO) CODEC_TYPE_AUDIO)
return rm_write_audio(s, buf, size); return rm_write_audio(s, pkt->data, pkt->size);
else else
return rm_write_video(s, buf, size); return rm_write_video(s, pkt->data, pkt->size);
} }
static int rm_write_trailer(AVFormatContext *s) static int rm_write_trailer(AVFormatContext *s)

View File

@ -669,16 +669,17 @@ static void rtp_send_mpegts_raw(AVFormatContext *s1,
} }
/* write an RTP packet. 'buf1' must contain a single specific frame. */ /* write an RTP packet. 'buf1' must contain a single specific frame. */
static int rtp_write_packet(AVFormatContext *s1, int stream_index, static int rtp_write_packet(AVFormatContext *s1, AVPacket *pkt)
const uint8_t *buf1, int size, int64_t pts)
{ {
RTPDemuxContext *s = s1->priv_data; RTPDemuxContext *s = s1->priv_data;
AVStream *st = s1->streams[0]; AVStream *st = s1->streams[0];
int rtcp_bytes; int rtcp_bytes;
int64_t ntp_time; int64_t ntp_time;
int size= pkt->size;
uint8_t *buf1= pkt->data;
#ifdef DEBUG #ifdef DEBUG
printf("%d: write len=%d\n", stream_index, size); printf("%d: write len=%d\n", pkt->stream_index, size);
#endif #endif
/* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */ /* XXX: mpeg pts hardcoded. RTCP send every 0.5 seconds */
@ -687,7 +688,7 @@ static int rtp_write_packet(AVFormatContext *s1, int stream_index,
if (s->first_packet || rtcp_bytes >= 28) { if (s->first_packet || rtcp_bytes >= 28) {
/* compute NTP time */ /* compute NTP time */
/* XXX: 90 kHz timestamp hardcoded */ /* XXX: 90 kHz timestamp hardcoded */
ntp_time = (pts << 28) / 5625; ntp_time = (pkt->pts << 28) / 5625;
rtcp_send_sr(s1, ntp_time); rtcp_send_sr(s1, ntp_time);
s->last_octet_count = s->octet_count; s->last_octet_count = s->octet_count;
s->first_packet = 0; s->first_packet = 0;

View File

@ -700,14 +700,13 @@ static int swf_write_audio(AVFormatContext *s,
return 0; return 0;
} }
static int swf_write_packet(AVFormatContext *s, int stream_index, static int swf_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
AVCodecContext *codec = &s->streams[stream_index]->codec; AVCodecContext *codec = &s->streams[pkt->stream_index]->codec;
if (codec->codec_type == CODEC_TYPE_AUDIO) if (codec->codec_type == CODEC_TYPE_AUDIO)
return swf_write_audio(s, codec, buf, size); return swf_write_audio(s, codec, pkt->data, pkt->size);
else else
return swf_write_video(s, codec, buf, size); return swf_write_video(s, codec, pkt->data, pkt->size);
} }
static int swf_write_trailer(AVFormatContext *s) static int swf_write_trailer(AVFormatContext *s)

View File

@ -569,10 +569,12 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
int num, den, presentation_delayed; int num, den, presentation_delayed;
/* handle wrapping */ /* handle wrapping */
if(st->cur_dts != AV_NOPTS_VALUE){
if(pkt->pts != AV_NOPTS_VALUE) if(pkt->pts != AV_NOPTS_VALUE)
pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits); pkt->pts= lsb2full(pkt->pts, st->cur_dts, st->pts_wrap_bits);
if(pkt->dts != AV_NOPTS_VALUE) if(pkt->dts != AV_NOPTS_VALUE)
pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits); pkt->dts= lsb2full(pkt->dts, st->cur_dts, st->pts_wrap_bits);
}
if (pkt->duration == 0) { if (pkt->duration == 0) {
compute_frame_duration(&num, &den, s, st, pc, pkt); compute_frame_duration(&num, &den, s, st, pc, pkt);
@ -597,6 +599,12 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
presentation_delayed = 1; presentation_delayed = 1;
} }
if(st->cur_dts == AV_NOPTS_VALUE){
if(presentation_delayed) st->cur_dts = -pkt->duration;
else st->cur_dts = 0;
}
// av_log(NULL, AV_LOG_DEBUG, "IN delayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
/* interpolate PTS and DTS if they are not present */ /* interpolate PTS and DTS if they are not present */
if (presentation_delayed) { if (presentation_delayed) {
/* DTS = decompression time stamp */ /* DTS = decompression time stamp */
@ -637,6 +645,7 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
} }
st->cur_dts += pkt->duration; st->cur_dts += pkt->duration;
} }
// av_log(NULL, AV_LOG_DEBUG, "OUTdelayed:%d pts:%lld, dts:%lld cur_dts:%lld\n", presentation_delayed, pkt->pts, pkt->dts, st->cur_dts);
/* update flags */ /* update flags */
if (pc) { if (pc) {
@ -1672,6 +1681,30 @@ int av_find_stream_info(AVFormatContext *ic)
} }
av_estimate_timings(ic); av_estimate_timings(ic);
#if 0
/* correct DTS for b frame streams with no timestamps */
for(i=0;i<ic->nb_streams;i++) {
st = ic->streams[i];
if (st->codec.codec_type == CODEC_TYPE_VIDEO) {
if(b-frames){
ppktl = &ic->packet_buffer;
while(ppkt1){
if(ppkt1->stream_index != i)
continue;
if(ppkt1->pkt->dts < 0)
break;
if(ppkt1->pkt->pts != AV_NOPTS_VALUE)
break;
ppkt1->pkt->dts -= delta;
ppkt1= ppkt1->next;
}
if(ppkt1)
continue;
st->cur_dts -= delta;
}
}
}
#endif
return ret; return ret;
} }
@ -1764,6 +1797,7 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
st->id = id; st->id = id;
st->start_time = AV_NOPTS_VALUE; st->start_time = AV_NOPTS_VALUE;
st->duration = AV_NOPTS_VALUE; st->duration = AV_NOPTS_VALUE;
st->cur_dts = AV_NOPTS_VALUE;
/* default pts settings is MPEG like */ /* default pts settings is MPEG like */
av_set_pts_info(st, 33, 1, 90000); av_set_pts_info(st, 33, 1, 90000);
@ -1836,27 +1870,68 @@ int av_write_header(AVFormatContext *s)
* one audio or video frame. * one audio or video frame.
* *
* @param s media file handle * @param s media file handle
* @param stream_index stream index * @param pkt the packet, which contains the stream_index, buf/buf_size, dts/pts, ...
* @param buf buffer containing the frame data
* @param size size of buffer
* @return < 0 if error, = 0 if OK, 1 if end of stream wanted. * @return < 0 if error, = 0 if OK, 1 if end of stream wanted.
*/ */
int av_write_frame(AVFormatContext *s, int stream_index, const uint8_t *buf, int av_write_frame(AVFormatContext *s, AVPacket *pkt)
int size)
{ {
AVStream *st; AVStream *st;
int64_t pts_mask; int64_t pts_mask;
int ret, frame_size; int ret, frame_size;
int b_frames;
st = s->streams[stream_index]; if(pkt->stream_index<0)
pts_mask = (1LL << st->pts_wrap_bits) - 1; return -1;
st = s->streams[pkt->stream_index];
/* HACK/FIXME we skip all zero size audio packets so a encoder can pass pts by outputing zero size packets */ b_frames = FFMAX(st->codec.has_b_frames, st->codec.max_b_frames);
if(st->codec.codec_type==CODEC_TYPE_AUDIO && size==0)
ret = 0; // av_log(s, AV_LOG_DEBUG, "av_write_frame: pts:%lld dts:%lld cur_dts:%lld b:%d size:%d\n", pkt->pts, pkt->dts, st->cur_dts, b_frames, pkt->size);
else
ret = s->oformat->write_packet(s, stream_index, buf, size, /* if(pkt->pts == AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE)
st->pts.val & pts_mask); return -1;*/
if(pkt->pts != AV_NOPTS_VALUE)
pkt->pts = av_rescale(pkt->pts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
if(pkt->dts != AV_NOPTS_VALUE)
pkt->dts = av_rescale(pkt->dts, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
/* duration field */
pkt->duration = av_rescale(pkt->duration, st->time_base.den, AV_TIME_BASE * (int64_t)st->time_base.num);
//XXX/FIXME this is a temporary hack until all encoders output pts
if((pkt->pts == 0 || pkt->pts == AV_NOPTS_VALUE) && pkt->dts == AV_NOPTS_VALUE && !b_frames){
pkt->dts=
// pkt->pts= st->cur_dts;
pkt->pts= st->pts.val;
}
//calculate dts from pts
if(pkt->pts != AV_NOPTS_VALUE && pkt->dts == AV_NOPTS_VALUE){
if(b_frames){
if(st->last_IP_pts == AV_NOPTS_VALUE){
st->last_IP_pts= -av_rescale(1,
st->codec.frame_rate_base*(int64_t)st->time_base.den,
st->codec.frame_rate *(int64_t)st->time_base.num);
}
if(st->last_IP_pts < pkt->pts){
pkt->dts= st->last_IP_pts;
st->last_IP_pts= pkt->pts;
}else
pkt->dts= pkt->pts;
}else
pkt->dts= pkt->pts;
}
// av_log(s, AV_LOG_DEBUG, "av_write_frame: pts2:%lld dts2:%lld\n", pkt->pts, pkt->dts);
st->cur_dts= pkt->dts;
st->pts.val= pkt->dts;
pts_mask = (2LL << (st->pts_wrap_bits-1)) - 1;
pkt->pts &= pts_mask;
pkt->dts &= pts_mask;
ret = s->oformat->write_packet(s, pkt);
if (ret < 0) if (ret < 0)
return ret; return ret;
@ -1864,11 +1939,11 @@ int av_write_frame(AVFormatContext *s, int stream_index, const uint8_t *buf,
/* update pts */ /* update pts */
switch (st->codec.codec_type) { switch (st->codec.codec_type) {
case CODEC_TYPE_AUDIO: case CODEC_TYPE_AUDIO:
frame_size = get_audio_frame_size(&st->codec, size); frame_size = get_audio_frame_size(&st->codec, pkt->size);
/* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay, /* HACK/FIXME, we skip the initial 0-size packets as they are most likely equal to the encoder delay,
but it would be better if we had the real timestamps from the encoder */ but it would be better if we had the real timestamps from the encoder */
if (frame_size >= 0 && (size || st->pts.num!=st->pts.den>>1 || st->pts.val)) { if (frame_size >= 0 && (pkt->size || st->pts.num!=st->pts.den>>1 || st->pts.val)) {
av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size); av_frac_add(&st->pts, (int64_t)st->time_base.den * frame_size);
} }
break; break;

View File

@ -203,11 +203,10 @@ static int wav_write_header(AVFormatContext *s)
return 0; return 0;
} }
static int wav_write_packet(AVFormatContext *s, int stream_index_ptr, static int wav_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
put_buffer(pb, buf, size); put_buffer(pb, pkt->data, pkt->size);
return 0; return 0;
} }

View File

@ -58,10 +58,9 @@ static int yuv4_generate_header(AVFormatContext *s, char* buf)
return n; return n;
} }
static int yuv4_write_packet(AVFormatContext *s, int stream_index, static int yuv4_write_packet(AVFormatContext *s, AVPacket *pkt)
const uint8_t *buf, int size, int64_t pts)
{ {
AVStream *st = s->streams[stream_index]; AVStream *st = s->streams[pkt->stream_index];
ByteIOContext *pb = &s->pb; ByteIOContext *pb = &s->pb;
AVPicture *picture; AVPicture *picture;
int* first_pkt = s->priv_data; int* first_pkt = s->priv_data;
@ -71,7 +70,7 @@ static int yuv4_write_packet(AVFormatContext *s, int stream_index,
char buf1[20]; char buf1[20];
uint8_t *ptr, *ptr1, *ptr2; uint8_t *ptr, *ptr1, *ptr2;
picture = (AVPicture *)buf; picture = (AVPicture *)pkt->data;
/* for the first packet we have to output the header as well */ /* for the first packet we have to output the header as well */
if (*first_pkt) { if (*first_pkt) {

View File

@ -142,16 +142,22 @@ void write_audio_frame(AVFormatContext *oc, AVStream *st)
{ {
int out_size; int out_size;
AVCodecContext *c; AVCodecContext *c;
AVPacket pkt;
av_init_packet(&pkt);
c = &st->codec; c = &st->codec;
get_audio_frame(samples, audio_input_frame_size, c->channels); get_audio_frame(samples, audio_input_frame_size, c->channels);
out_size = avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples); pkt.size= avcodec_encode_audio(c, audio_outbuf, audio_outbuf_size, samples);
pkt.pts= c->coded_frame->pts;
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= audio_outbuf;
/* write the compressed frame in the media file */ /* write the compressed frame in the media file */
if (av_write_frame(oc, st->index, audio_outbuf, out_size) != 0) { if (av_write_frame(oc, &pkt) != 0) {
fprintf(stderr, "Error while writing audio frame\n"); fprintf(stderr, "Error while writing audio frame\n");
exit(1); exit(1);
} }
@ -336,16 +342,32 @@ void write_video_frame(AVFormatContext *oc, AVStream *st)
if (oc->oformat->flags & AVFMT_RAWPICTURE) { if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* raw video case. The API will change slightly in the near /* raw video case. The API will change slightly in the near
futur for that */ futur for that */
ret = av_write_frame(oc, st->index, AVPacket pkt;
(uint8_t *)picture_ptr, sizeof(AVPicture)); av_init_packet(&pkt);
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= (uint8_t *)picture_ptr;
pkt.size= sizeof(AVPicture);
ret = av_write_frame(oc, &pkt);
} else { } else {
/* encode the image */ /* encode the image */
out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_ptr); out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_ptr);
/* if zero size, it means the image was buffered */ /* if zero size, it means the image was buffered */
if (out_size != 0) { if (out_size != 0) {
AVPacket pkt;
av_init_packet(&pkt);
pkt.pts= c->coded_frame->pts;
if(c->coded_frame->key_frame)
pkt.flags |= PKT_FLAG_KEY;
pkt.stream_index= st->index;
pkt.data= video_outbuf;
pkt.size= out_size;
/* write the compressed frame in the media file */ /* write the compressed frame in the media file */
/* XXX: in case of B frames, the pts is not yet valid */ ret = av_write_frame(oc, &pkt);
ret = av_write_frame(oc, st->index, video_outbuf, out_size);
} else { } else {
ret = 0; ret = 0;
} }

View File

@ -51,20 +51,20 @@ stddev: 8.05 PSNR:30.00 bytes:7602176
5986168 ./data/a-huffyuv.avi 5986168 ./data/a-huffyuv.avi
799d3db687f6cdd7a837ec156efc171f *./data/out.yuv 799d3db687f6cdd7a837ec156efc171f *./data/out.yuv
stddev: 0.00 PSNR:99.99 bytes:7602176 stddev: 0.00 PSNR:99.99 bytes:7602176
a06eb02738bc67c61392fe2fced2afa9 *./data/a-mpeg4-rc.avi a86d267dceb37beca514484ad5951675 *./data/a-mpeg4-rc.avi
816398 ./data/a-mpeg4-rc.avi 816350 ./data/a-mpeg4-rc.avi
11efbbf01c8473ac5eabd775f1c0bec5 *./data/out.yuv 11efbbf01c8473ac5eabd775f1c0bec5 *./data/out.yuv
stddev: 11.51 PSNR:26.89 bytes:7299072 stddev: 11.51 PSNR:26.89 bytes:7299072
451bd18acf3c169301c215a63a3b216d *./data/a-mpeg4-adv.avi 451bd18acf3c169301c215a63a3b216d *./data/a-mpeg4-adv.avi
592570 ./data/a-mpeg4-adv.avi 592570 ./data/a-mpeg4-adv.avi
9f4fbff593dd0bd7ad437ceee4afc11f *./data/out.yuv 9f4fbff593dd0bd7ad437ceee4afc11f *./data/out.yuv
stddev: 10.31 PSNR:27.85 bytes:7602176 stddev: 10.31 PSNR:27.85 bytes:7602176
3424145af47f83c77bd42f4707c1a722 *./data/a-mpeg4-thread.avi b99438023ff9dda75494b4095c42affa *./data/a-mpeg4-thread.avi
752408 ./data/a-mpeg4-thread.avi 752360 ./data/a-mpeg4-thread.avi
51b7bfb3cbf1580eefc61e610451e67e *./data/out.yuv 51b7bfb3cbf1580eefc61e610451e67e *./data/out.yuv
stddev: 13.17 PSNR:25.72 bytes:7299072 stddev: 13.17 PSNR:25.72 bytes:7299072
36688568f48f40163c338c6f3435c132 *./data/a-mpeg4-Q.avi 4f1ca5927928b8433bfcb5f231239462 *./data/a-mpeg4-Q.avi
875964 ./data/a-mpeg4-Q.avi 875916 ./data/a-mpeg4-Q.avi
dca5adb5fca49a806e71266f1a04c3a5 *./data/out.yuv dca5adb5fca49a806e71266f1a04c3a5 *./data/out.yuv
stddev: 8.15 PSNR:29.89 bytes:7299072 stddev: 8.15 PSNR:29.89 bytes:7299072
771f65ff24c0d32b37f2663892b2f433 *./data/a-error-mpeg4-adv.avi 771f65ff24c0d32b37f2663892b2f433 *./data/a-error-mpeg4-adv.avi

View File

@ -2,9 +2,9 @@ ffmpeg regression test
c23bbe3016afcb726a494858b7bb17e3 *./data/b-libav.avi c23bbe3016afcb726a494858b7bb17e3 *./data/b-libav.avi
337732 ./data/b-libav.avi 337732 ./data/b-libav.avi
./data/b-libav.avi CRC=658838d9 ./data/b-libav.avi CRC=658838d9
b166e89a9ec8c707573329d883c1b6f9 *./data/b-libav.asf a09d8460b207c4a67a26842c70fbb060 *./data/b-libav.asf
339767 ./data/b-libav.asf 339767 ./data/b-libav.asf
./data/b-libav.asf CRC=525fdb22 ./data/b-libav.asf CRC=4b9f25a1
be8eb1b5705c8105e4727258e448cb24 *./data/b-libav.rm be8eb1b5705c8105e4727258e448cb24 *./data/b-libav.rm
356950 ./data/b-libav.rm 356950 ./data/b-libav.rm
e826aa1637ff15144ab484c1efca7fe7 *./data/b-libav.mpg e826aa1637ff15144ab484c1efca7fe7 *./data/b-libav.mpg

View File

@ -477,7 +477,7 @@ do_ffmpeg_crc $file -i $file
# asf # asf
file=${outfile}libav.asf file=${outfile}libav.asf
do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src -acodec mp2 $file do_ffmpeg $file -t 1 -y -qscale 10 -f pgmyuv -i $raw_src -f s16le -i $pcm_src -acodec mp2 $file
do_ffmpeg_crc $file -i $file do_ffmpeg_crc $file -i $file -r 25
# rm # rm
file=${outfile}libav.rm file=${outfile}libav.rm

View File

@ -51,20 +51,20 @@ stddev: 5.44 PSNR:33.40 bytes:7602176
4987020 ./data/a-huffyuv.avi 4987020 ./data/a-huffyuv.avi
dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv dde5895817ad9d219f79a52d0bdfb001 *./data/out.yuv
stddev: 0.00 PSNR:99.99 bytes:7602176 stddev: 0.00 PSNR:99.99 bytes:7602176
ca8f249763ca162cbee43cb32b7b165b *./data/a-mpeg4-rc.avi 55b7400e3abf9b24d4a0f5d2963ffa29 *./data/a-mpeg4-rc.avi
223932 ./data/a-mpeg4-rc.avi 223884 ./data/a-mpeg4-rc.avi
5ad73d80df55d60384a43b8260174912 *./data/out.yuv 5ad73d80df55d60384a43b8260174912 *./data/out.yuv
stddev: 5.28 PSNR:33.66 bytes:7299072 stddev: 5.28 PSNR:33.66 bytes:7299072
8249cc83e341cd18e45adf589e1d254e *./data/a-mpeg4-adv.avi 8249cc83e341cd18e45adf589e1d254e *./data/a-mpeg4-adv.avi
180988 ./data/a-mpeg4-adv.avi 180988 ./data/a-mpeg4-adv.avi
a46b891f446aef2c025dc2847dc86ed6 *./data/out.yuv a46b891f446aef2c025dc2847dc86ed6 *./data/out.yuv
stddev: 4.95 PSNR:34.22 bytes:7602176 stddev: 4.95 PSNR:34.22 bytes:7602176
f2f6e3841d4a5def6d70297b09a6df6c *./data/a-mpeg4-thread.avi 43411d9f33c8112a25d805f6edf88c17 *./data/a-mpeg4-thread.avi
249670 ./data/a-mpeg4-thread.avi 249622 ./data/a-mpeg4-thread.avi
e01d0d5a98d2624b107e13c6f50c533c *./data/out.yuv e01d0d5a98d2624b107e13c6f50c533c *./data/out.yuv
stddev: 5.09 PSNR:33.97 bytes:7299072 stddev: 5.09 PSNR:33.97 bytes:7299072
96333905f57a579396c5121d6651fc7e *./data/a-mpeg4-Q.avi bfd6fcccc5aa8c3fa5222a33a0bb3b63 *./data/a-mpeg4-Q.avi
159744 ./data/a-mpeg4-Q.avi 159696 ./data/a-mpeg4-Q.avi
f44b074a83b177950055ddd6da7e518d *./data/out.yuv f44b074a83b177950055ddd6da7e518d *./data/out.yuv
stddev: 5.02 PSNR:34.09 bytes:7299072 stddev: 5.02 PSNR:34.09 bytes:7299072
901ebdbc7438744f0832a9a0cc7678e3 *./data/a-error-mpeg4-adv.avi 901ebdbc7438744f0832a9a0cc7678e3 *./data/a-error-mpeg4-adv.avi