1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

pts fix and related fixes

Originally committed as revision 2452 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Michael Niedermayer 2003-10-30 16:58:49 +00:00
parent 2a2bbcb05f
commit 5f1948111a
4 changed files with 32 additions and 31 deletions

View File

@ -1848,8 +1848,8 @@ void ff_mpeg4_stuffing(PutBitContext * pbc)
void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number){
int time_div, time_mod; int time_div, time_mod;
if(s->current_picture.pts) if(s->current_picture_ptr->pts)
s->time= (s->current_picture.pts*s->time_increment_resolution + 500*1000)/(1000*1000); s->time= (s->current_picture_ptr->pts*s->time_increment_resolution + 500*1000)/(1000*1000);
else else
s->time= av_rescale(picture_number*(int64_t)s->avctx->frame_rate_base, s->time_increment_resolution, s->avctx->frame_rate); s->time= av_rescale(picture_number*(int64_t)s->avctx->frame_rate_base, s->time_increment_resolution, s->avctx->frame_rate);
time_div= s->time/s->time_increment_resolution; time_div= s->time/s->time_increment_resolution;
@ -4994,9 +4994,9 @@ static int decode_vop_header(MpegEncContext *s, GetBitContext *gb){
- ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2; - ROUNDED_DIV(s->last_non_b_time - s->pp_time, s->t_frame))*2;
} }
s->current_picture.pts= s->time*1000LL*1000LL / s->time_increment_resolution; s->current_picture_ptr->pts= s->time*1000LL*1000LL / s->time_increment_resolution;
if(s->avctx->debug&FF_DEBUG_PTS) if(s->avctx->debug&FF_DEBUG_PTS)
printf("MPEG4 PTS: %f\n", s->current_picture.pts/(1000.0*1000.0)); printf("MPEG4 PTS: %f\n", s->current_picture_ptr->pts/(1000.0*1000.0));
check_marker(gb, "before vop_coded"); check_marker(gb, "before vop_coded");

View File

@ -28,17 +28,6 @@
//#define DEBUG //#define DEBUG
//#define PRINT_FRAME_TIME //#define PRINT_FRAME_TIME
#ifdef PRINT_FRAME_TIME
static inline long long rdtsc()
{
long long l;
asm volatile( "rdtsc\n\t"
: "=A" (l)
);
// printf("%d\n", int(l/1000));
return l;
}
#endif
int ff_h263_decode_init(AVCodecContext *avctx) int ff_h263_decode_init(AVCodecContext *avctx)
{ {
@ -447,6 +436,12 @@ retry:
return -1; return -1;
} }
//we need to set current_picture_ptr before reading the header, otherwise we cant store anyting im there
if(s->current_picture_ptr==NULL || s->current_picture_ptr->data[0]){
int i= ff_find_unused_picture(s, 0);
s->current_picture_ptr= &s->picture[i];
}
/* let's go :-) */ /* let's go :-) */
if (s->msmpeg4_version==5) { if (s->msmpeg4_version==5) {
ret= ff_wmv2_decode_picture_header(s); ret= ff_wmv2_decode_picture_header(s);

View File

@ -1014,32 +1014,33 @@ static void draw_edges_c(uint8_t *buf, int wrap, int width, int height, int w)
} }
} }
static int find_unused_picture(MpegEncContext *s, int shared){ int ff_find_unused_picture(MpegEncContext *s, int shared){
int i; int i;
if(shared){ if(shared){
for(i=0; i<MAX_PICTURE_COUNT; i++){ for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0]==NULL && s->picture[i].type==0) break; if(s->picture[i].data[0]==NULL && s->picture[i].type==0) return i;
} }
}else{ }else{
for(i=0; i<MAX_PICTURE_COUNT; i++){ for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) break; //FIXME if(s->picture[i].data[0]==NULL && s->picture[i].type!=0) return i; //FIXME
} }
for(i=0; i<MAX_PICTURE_COUNT; i++){ for(i=0; i<MAX_PICTURE_COUNT; i++){
if(s->picture[i].data[0]==NULL) break; if(s->picture[i].data[0]==NULL) return i;
} }
} }
assert(i<MAX_PICTURE_COUNT); assert(0);
return i; return -1;
} }
/* generic function for encode/decode called before a frame is coded/decoded */ /**
* generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded
*/
int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx)
{ {
int i; int i;
AVFrame *pic; AVFrame *pic;
s->mb_skiped = 0; s->mb_skiped = 0;
assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3); assert(s->last_picture_ptr==NULL || s->out_format != FMT_H264 || s->codec_id == CODEC_ID_SVQ3);
@ -1068,18 +1069,22 @@ alloc:
} }
} }
i= find_unused_picture(s, 0); if(s->current_picture_ptr && s->current_picture_ptr->data[0]==NULL)
pic= (AVFrame*)s->current_picture_ptr; //we allready have a unused image (maybe it was set before reading the header)
else{
i= ff_find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i]; pic= (AVFrame*)&s->picture[i];
}
pic->reference= s->pict_type != B_TYPE ? 3 : 0; pic->reference= s->pict_type != B_TYPE ? 3 : 0;
if(s->current_picture_ptr) if(s->current_picture_ptr) //FIXME broken, we need a coded_picture_number in MpegEncContext
pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1; pic->coded_picture_number= s->current_picture_ptr->coded_picture_number+1;
if( alloc_picture(s, (Picture*)pic, 0) < 0) if( alloc_picture(s, (Picture*)pic, 0) < 0)
return -1; return -1;
s->current_picture_ptr= &s->picture[i]; s->current_picture_ptr= (Picture*)pic;
} }
s->current_picture_ptr->pict_type= s->pict_type; s->current_picture_ptr->pict_type= s->pict_type;
@ -1425,7 +1430,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
// printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize); // printf("%d %d %d %d\n",pic_arg->linesize[0], pic_arg->linesize[1], s->linesize, s->uvlinesize);
if(direct){ if(direct){
i= find_unused_picture(s, 1); i= ff_find_unused_picture(s, 1);
pic= (AVFrame*)&s->picture[i]; pic= (AVFrame*)&s->picture[i];
pic->reference= 3; pic->reference= 3;
@ -1437,7 +1442,7 @@ static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg){
alloc_picture(s, (Picture*)pic, 1); alloc_picture(s, (Picture*)pic, 1);
}else{ }else{
int offset= 16; int offset= 16;
i= find_unused_picture(s, 0); i= ff_find_unused_picture(s, 0);
pic= (AVFrame*)&s->picture[i]; pic= (AVFrame*)&s->picture[i];
pic->reference= 3; pic->reference= 3;
@ -1587,7 +1592,7 @@ static void select_input_picture(MpegEncContext *s){
if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){
// input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable // input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable
int i= find_unused_picture(s, 0); int i= ff_find_unused_picture(s, 0);
Picture *pic= &s->picture[i]; Picture *pic= &s->picture[i];
/* mark us unused / free shared pic */ /* mark us unused / free shared pic */

View File

@ -718,6 +718,7 @@ int ff_combine_frame( MpegEncContext *s, int next, uint8_t **buf, int *buf_size)
void ff_mpeg_flush(AVCodecContext *avctx); void ff_mpeg_flush(AVCodecContext *avctx);
void ff_print_debug_info(MpegEncContext *s, Picture *pict); void ff_print_debug_info(MpegEncContext *s, Picture *pict);
void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix); void ff_write_quant_matrix(PutBitContext *pb, int16_t *matrix);
int ff_find_unused_picture(MpegEncContext *s, int shared);
void ff_er_frame_start(MpegEncContext *s); void ff_er_frame_start(MpegEncContext *s);
void ff_er_frame_end(MpegEncContext *s); void ff_er_frame_end(MpegEncContext *s);