You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-15 14:13:16 +02:00
revert 12156
Log: Make timestamp interpolation work with mpeg2 field pictures. Cleaner/simpler solutions are welcome. ---- A IMHO cleaner solution has been implemented. Originally committed as revision 12162 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
@@ -2788,7 +2788,6 @@ typedef struct AVCodecParserContext {
|
|||||||
/* video info */
|
/* video info */
|
||||||
int pict_type; /* XXX: Put it back in AVCodecContext. */
|
int pict_type; /* XXX: Put it back in AVCodecContext. */
|
||||||
int repeat_pict; /* XXX: Put it back in AVCodecContext. */
|
int repeat_pict; /* XXX: Put it back in AVCodecContext. */
|
||||||
int parity;
|
|
||||||
int64_t pts; /* pts of the current frame */
|
int64_t pts; /* pts of the current frame */
|
||||||
int64_t dts; /* dts of the current frame */
|
int64_t dts; /* dts of the current frame */
|
||||||
|
|
||||||
|
@@ -36,7 +36,6 @@ static void mpegvideo_extract_headers(AVCodecParserContext *s,
|
|||||||
int horiz_size_ext, vert_size_ext, bit_rate_ext;
|
int horiz_size_ext, vert_size_ext, bit_rate_ext;
|
||||||
//FIXME replace the crap with get_bits()
|
//FIXME replace the crap with get_bits()
|
||||||
s->repeat_pict = 0;
|
s->repeat_pict = 0;
|
||||||
s->parity = 0;
|
|
||||||
buf_end = buf + buf_size;
|
buf_end = buf + buf_size;
|
||||||
while (buf < buf_end) {
|
while (buf < buf_end) {
|
||||||
start_code= -1;
|
start_code= -1;
|
||||||
|
@@ -337,8 +337,8 @@ typedef struct AVStream {
|
|||||||
struct AVCodecParserContext *parser;
|
struct AVCodecParserContext *parser;
|
||||||
|
|
||||||
int64_t cur_dts;
|
int64_t cur_dts;
|
||||||
int last_IP_duration[2];
|
int last_IP_duration;
|
||||||
int64_t last_IP_pts[2];
|
int64_t last_IP_pts;
|
||||||
/* av_seek_frame() support */
|
/* av_seek_frame() support */
|
||||||
AVIndexEntry *index_entries; /**< only used if the format does not
|
AVIndexEntry *index_entries; /**< only used if the format does not
|
||||||
support seeking natively */
|
support seeking natively */
|
||||||
|
@@ -703,30 +703,21 @@ static void compute_pkt_fields(AVFormatContext *s, AVStream *st,
|
|||||||
/* interpolate PTS and DTS if they are not present */
|
/* interpolate PTS and DTS if they are not present */
|
||||||
if(delay <=1){
|
if(delay <=1){
|
||||||
if (presentation_delayed) {
|
if (presentation_delayed) {
|
||||||
int fields= 2 + (pc ? pc->repeat_pict : 0);
|
|
||||||
int field_duration= pkt->duration / fields;
|
|
||||||
int parity= pc ? pc->parity : 0;
|
|
||||||
/* DTS = decompression timestamp */
|
/* DTS = decompression timestamp */
|
||||||
/* PTS = presentation timestamp */
|
/* PTS = presentation timestamp */
|
||||||
if (pkt->dts == AV_NOPTS_VALUE)
|
if (pkt->dts == AV_NOPTS_VALUE)
|
||||||
pkt->dts = st->last_IP_pts[parity];
|
pkt->dts = st->last_IP_pts;
|
||||||
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
|
update_initial_timestamps(s, pkt->stream_index, pkt->dts, pkt->pts);
|
||||||
if (pkt->dts == AV_NOPTS_VALUE)
|
if (pkt->dts == AV_NOPTS_VALUE)
|
||||||
pkt->dts = st->cur_dts;
|
pkt->dts = st->cur_dts;
|
||||||
|
|
||||||
/* this is tricky: the dts must be incremented by the duration
|
/* this is tricky: the dts must be incremented by the duration
|
||||||
of the frame we are displaying, i.e. the last I- or P-frame */
|
of the frame we are displaying, i.e. the last I- or P-frame */
|
||||||
st->cur_dts = pkt->dts;
|
if (st->last_IP_duration == 0)
|
||||||
for(i=0; i<fields; i++){
|
st->last_IP_duration = pkt->duration;
|
||||||
int p= (parity + i)&1;
|
st->cur_dts = pkt->dts + st->last_IP_duration;
|
||||||
if(!st->last_IP_duration[p])
|
st->last_IP_duration = pkt->duration;
|
||||||
st->last_IP_duration[p]= field_duration;
|
st->last_IP_pts= pkt->pts;
|
||||||
st->cur_dts += st->last_IP_duration[p];
|
|
||||||
st->last_IP_pts[p]= pkt->pts;
|
|
||||||
if(pkt->pts != AV_NOPTS_VALUE)
|
|
||||||
st->last_IP_pts[p] += i*field_duration;
|
|
||||||
st->last_IP_duration[p]= field_duration;
|
|
||||||
}
|
|
||||||
/* cannot compute PTS if not present (we can compute it only
|
/* cannot compute PTS if not present (we can compute it only
|
||||||
by knowing the future */
|
by knowing the future */
|
||||||
} else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
|
} else if(pkt->pts != AV_NOPTS_VALUE || pkt->dts != AV_NOPTS_VALUE || pkt->duration){
|
||||||
@@ -1024,8 +1015,7 @@ static void av_read_frame_flush(AVFormatContext *s)
|
|||||||
av_parser_close(st->parser);
|
av_parser_close(st->parser);
|
||||||
st->parser = NULL;
|
st->parser = NULL;
|
||||||
}
|
}
|
||||||
st->last_IP_pts[0] =
|
st->last_IP_pts = AV_NOPTS_VALUE;
|
||||||
st->last_IP_pts[1] = AV_NOPTS_VALUE;
|
|
||||||
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
|
st->cur_dts = AV_NOPTS_VALUE; /* we set the current DTS to an unspecified origin */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -1633,8 +1623,7 @@ static void av_estimate_timings_from_pts(AVFormatContext *ic, offset_t old_offse
|
|||||||
for(i=0; i<ic->nb_streams; i++){
|
for(i=0; i<ic->nb_streams; i++){
|
||||||
st= ic->streams[i];
|
st= ic->streams[i];
|
||||||
st->cur_dts= st->first_dts;
|
st->cur_dts= st->first_dts;
|
||||||
st->last_IP_pts[0] =
|
st->last_IP_pts = AV_NOPTS_VALUE;
|
||||||
st->last_IP_pts[1] = AV_NOPTS_VALUE;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -2193,8 +2182,7 @@ AVStream *av_new_stream(AVFormatContext *s, int id)
|
|||||||
|
|
||||||
/* default pts setting is MPEG-like */
|
/* default pts setting is MPEG-like */
|
||||||
av_set_pts_info(st, 33, 1, 90000);
|
av_set_pts_info(st, 33, 1, 90000);
|
||||||
st->last_IP_pts[0] =
|
st->last_IP_pts = AV_NOPTS_VALUE;
|
||||||
st->last_IP_pts[1] = AV_NOPTS_VALUE;
|
|
||||||
for(i=0; i<MAX_REORDER_DELAY+1; i++)
|
for(i=0; i<MAX_REORDER_DELAY+1; i++)
|
||||||
st->pts_buffer[i]= AV_NOPTS_VALUE;
|
st->pts_buffer[i]= AV_NOPTS_VALUE;
|
||||||
|
|
||||||
|
Reference in New Issue
Block a user