mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-24 13:56:33 +02:00
Merge remote-tracking branch 'cus/stable'
* cus/stable: ffplay: simplify video pts calculation ffplay: fix indentation ffplay: handle audio buffersink output properly with buffering filters ffplay: set time_base of audio filter buffer src Merged-by: Michael Niedermayer <michaelni@gmx.at>
This commit is contained in:
commit
93e8fcb94b
117
ffplay.c
117
ffplay.c
@ -188,6 +188,7 @@ typedef struct VideoState {
|
|||||||
unsigned int audio_buf1_size;
|
unsigned int audio_buf1_size;
|
||||||
int audio_buf_index; /* in bytes */
|
int audio_buf_index; /* in bytes */
|
||||||
int audio_write_buf_size;
|
int audio_write_buf_size;
|
||||||
|
int audio_buf_frames_pending;
|
||||||
AVPacket audio_pkt_temp;
|
AVPacket audio_pkt_temp;
|
||||||
AVPacket audio_pkt;
|
AVPacket audio_pkt;
|
||||||
int audio_pkt_temp_serial;
|
int audio_pkt_temp_serial;
|
||||||
@ -1647,7 +1648,7 @@ static int queue_picture(VideoState *is, AVFrame *src_frame, double pts, int64_t
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt, int *serial)
|
static int get_video_frame(VideoState *is, AVFrame *frame, AVPacket *pkt, int *serial)
|
||||||
{
|
{
|
||||||
int got_picture;
|
int got_picture;
|
||||||
|
|
||||||
@ -1678,22 +1679,22 @@ static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacke
|
|||||||
int ret = 1;
|
int ret = 1;
|
||||||
|
|
||||||
if (decoder_reorder_pts == -1) {
|
if (decoder_reorder_pts == -1) {
|
||||||
*pts = av_frame_get_best_effort_timestamp(frame);
|
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||||
} else if (decoder_reorder_pts) {
|
} else if (decoder_reorder_pts) {
|
||||||
*pts = frame->pkt_pts;
|
frame->pts = frame->pkt_pts;
|
||||||
} else {
|
} else {
|
||||||
*pts = frame->pkt_dts;
|
frame->pts = frame->pkt_dts;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*pts == AV_NOPTS_VALUE) {
|
if (frame->pts == AV_NOPTS_VALUE) {
|
||||||
*pts = 0;
|
frame->pts = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
|
if (framedrop>0 || (framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) {
|
||||||
SDL_LockMutex(is->pictq_mutex);
|
SDL_LockMutex(is->pictq_mutex);
|
||||||
if (is->frame_last_pts != AV_NOPTS_VALUE && *pts) {
|
if (is->frame_last_pts != AV_NOPTS_VALUE && frame->pts) {
|
||||||
double clockdiff = get_video_clock(is) - get_master_clock(is);
|
double clockdiff = get_video_clock(is) - get_master_clock(is);
|
||||||
double dpts = av_q2d(is->video_st->time_base) * *pts;
|
double dpts = av_q2d(is->video_st->time_base) * frame->pts;
|
||||||
double ptsdiff = dpts - is->frame_last_pts;
|
double ptsdiff = dpts - is->frame_last_pts;
|
||||||
if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
|
if (!isnan(clockdiff) && fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
|
||||||
ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
|
ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
|
||||||
@ -1828,9 +1829,10 @@ static int configure_audio_filters(VideoState *is, const char *afilters, int for
|
|||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
|
|
||||||
ret = snprintf(asrc_args, sizeof(asrc_args),
|
ret = snprintf(asrc_args, sizeof(asrc_args),
|
||||||
"sample_rate=%d:sample_fmt=%s:channels=%d",
|
"sample_rate=%d:sample_fmt=%s:channels=%d:time_base=%d/%d",
|
||||||
is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
|
is->audio_filter_src.freq, av_get_sample_fmt_name(is->audio_filter_src.fmt),
|
||||||
is->audio_filter_src.channels);
|
is->audio_filter_src.channels,
|
||||||
|
1, is->audio_filter_src.freq);
|
||||||
if (is->audio_filter_src.channel_layout)
|
if (is->audio_filter_src.channel_layout)
|
||||||
snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
|
snprintf(asrc_args + ret, sizeof(asrc_args) - ret,
|
||||||
":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
|
":channel_layout=0x%"PRIx64, is->audio_filter_src.channel_layout);
|
||||||
@ -1884,7 +1886,6 @@ static int video_thread(void *arg)
|
|||||||
AVPacket pkt = { 0 };
|
AVPacket pkt = { 0 };
|
||||||
VideoState *is = arg;
|
VideoState *is = arg;
|
||||||
AVFrame *frame = av_frame_alloc();
|
AVFrame *frame = av_frame_alloc();
|
||||||
int64_t pts_int = AV_NOPTS_VALUE, pos = -1;
|
|
||||||
double pts;
|
double pts;
|
||||||
int ret;
|
int ret;
|
||||||
int serial = 0;
|
int serial = 0;
|
||||||
@ -1899,19 +1900,15 @@ static int video_thread(void *arg)
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
#if CONFIG_AVFILTER
|
|
||||||
AVRational tb;
|
|
||||||
#endif
|
|
||||||
while (is->paused && !is->videoq.abort_request)
|
while (is->paused && !is->videoq.abort_request)
|
||||||
SDL_Delay(10);
|
SDL_Delay(10);
|
||||||
|
|
||||||
avcodec_get_frame_defaults(frame);
|
avcodec_get_frame_defaults(frame);
|
||||||
av_free_packet(&pkt);
|
av_free_packet(&pkt);
|
||||||
|
|
||||||
ret = get_video_frame(is, frame, &pts_int, &pkt, &serial);
|
ret = get_video_frame(is, frame, &pkt, &serial);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
goto the_end;
|
goto the_end;
|
||||||
|
|
||||||
if (!ret)
|
if (!ret)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
@ -1944,7 +1941,6 @@ static int video_thread(void *arg)
|
|||||||
last_serial = serial;
|
last_serial = serial;
|
||||||
}
|
}
|
||||||
|
|
||||||
frame->pts = pts_int;
|
|
||||||
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
|
frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame);
|
||||||
ret = av_buffersrc_add_frame(filt_in, frame);
|
ret = av_buffersrc_add_frame(filt_in, frame);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
@ -1966,23 +1962,12 @@ static int video_thread(void *arg)
|
|||||||
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
|
if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
|
||||||
is->frame_last_filter_delay = 0;
|
is->frame_last_filter_delay = 0;
|
||||||
|
|
||||||
pts_int = frame->pts;
|
pts = frame->pts * av_q2d(filt_out->inputs[0]->time_base);
|
||||||
tb = filt_out->inputs[0]->time_base;
|
ret = queue_picture(is, frame, pts, av_frame_get_pkt_pos(frame), serial);
|
||||||
pos = av_frame_get_pkt_pos(frame);
|
|
||||||
if (av_cmp_q(tb, is->video_st->time_base)) {
|
|
||||||
av_unused int64_t pts1 = pts_int;
|
|
||||||
pts_int = av_rescale_q(pts_int, tb, is->video_st->time_base);
|
|
||||||
av_dlog(NULL, "video_thread(): "
|
|
||||||
"tb:%d/%d pts:%"PRId64" -> tb:%d/%d pts:%"PRId64"\n",
|
|
||||||
tb.num, tb.den, pts1,
|
|
||||||
is->video_st->time_base.num, is->video_st->time_base.den, pts_int);
|
|
||||||
}
|
|
||||||
pts = pts_int * av_q2d(is->video_st->time_base);
|
|
||||||
ret = queue_picture(is, frame, pts, pos, serial);
|
|
||||||
av_frame_unref(frame);
|
av_frame_unref(frame);
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
pts = pts_int * av_q2d(is->video_st->time_base);
|
pts = frame->pts * av_q2d(is->video_st->time_base);
|
||||||
ret = queue_picture(is, frame, pts, pkt.pos, serial);
|
ret = queue_picture(is, frame, pts, pkt.pos, serial);
|
||||||
av_frame_unref(frame);
|
av_frame_unref(frame);
|
||||||
#endif
|
#endif
|
||||||
@ -2152,10 +2137,12 @@ static int audio_decode_frame(VideoState *is)
|
|||||||
int flush_complete = 0;
|
int flush_complete = 0;
|
||||||
int wanted_nb_samples;
|
int wanted_nb_samples;
|
||||||
AVRational tb;
|
AVRational tb;
|
||||||
|
int ret;
|
||||||
|
int reconfigure;
|
||||||
|
|
||||||
for (;;) {
|
for (;;) {
|
||||||
/* NOTE: the audio packet can contain several frames */
|
/* NOTE: the audio packet can contain several frames */
|
||||||
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet)) {
|
while (pkt_temp->size > 0 || (!pkt_temp->data && new_packet) || is->audio_buf_frames_pending) {
|
||||||
if (!is->frame) {
|
if (!is->frame) {
|
||||||
if (!(is->frame = avcodec_alloc_frame()))
|
if (!(is->frame = avcodec_alloc_frame()))
|
||||||
return AVERROR(ENOMEM);
|
return AVERROR(ENOMEM);
|
||||||
@ -2170,37 +2157,36 @@ static int audio_decode_frame(VideoState *is)
|
|||||||
if (is->paused)
|
if (is->paused)
|
||||||
return -1;
|
return -1;
|
||||||
|
|
||||||
if (flush_complete)
|
if (!is->audio_buf_frames_pending) {
|
||||||
break;
|
if (flush_complete)
|
||||||
new_packet = 0;
|
break;
|
||||||
len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
|
new_packet = 0;
|
||||||
if (len1 < 0) {
|
len1 = avcodec_decode_audio4(dec, is->frame, &got_frame, pkt_temp);
|
||||||
/* if error, we skip the frame */
|
if (len1 < 0) {
|
||||||
pkt_temp->size = 0;
|
/* if error, we skip the frame */
|
||||||
break;
|
pkt_temp->size = 0;
|
||||||
}
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
pkt_temp->data += len1;
|
pkt_temp->data += len1;
|
||||||
pkt_temp->size -= len1;
|
pkt_temp->size -= len1;
|
||||||
|
|
||||||
if (!got_frame) {
|
if (!got_frame) {
|
||||||
/* stop sending empty packets if the decoder is finished */
|
/* stop sending empty packets if the decoder is finished */
|
||||||
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
|
if (!pkt_temp->data && dec->codec->capabilities & CODEC_CAP_DELAY)
|
||||||
flush_complete = 1;
|
flush_complete = 1;
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
|
tb = (AVRational){1, is->frame->sample_rate};
|
||||||
is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, dec->time_base);
|
if (is->frame->pts != AV_NOPTS_VALUE)
|
||||||
if (pkt_temp->pts != AV_NOPTS_VALUE)
|
is->frame->pts = av_rescale_q(is->frame->pts, dec->time_base, tb);
|
||||||
pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
|
if (is->frame->pts == AV_NOPTS_VALUE && pkt_temp->pts != AV_NOPTS_VALUE)
|
||||||
tb = dec->time_base;
|
is->frame->pts = av_rescale_q(pkt_temp->pts, is->audio_st->time_base, tb);
|
||||||
|
if (pkt_temp->pts != AV_NOPTS_VALUE)
|
||||||
|
pkt_temp->pts += (double) is->frame->nb_samples / is->frame->sample_rate / av_q2d(is->audio_st->time_base);
|
||||||
|
|
||||||
#if CONFIG_AVFILTER
|
#if CONFIG_AVFILTER
|
||||||
{
|
|
||||||
int ret;
|
|
||||||
int reconfigure;
|
|
||||||
|
|
||||||
dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
|
dec_channel_layout = get_valid_channel_layout(is->frame->channel_layout, av_frame_get_channels(is->frame));
|
||||||
|
|
||||||
reconfigure =
|
reconfigure =
|
||||||
@ -2232,10 +2218,18 @@ static int audio_decode_frame(VideoState *is)
|
|||||||
if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
|
if ((ret = av_buffersrc_add_frame(is->in_audio_filter, is->frame)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
av_frame_unref(is->frame);
|
av_frame_unref(is->frame);
|
||||||
if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0)
|
#endif
|
||||||
return ret;
|
|
||||||
tb = is->out_audio_filter->inputs[0]->time_base;
|
|
||||||
}
|
}
|
||||||
|
#if CONFIG_AVFILTER
|
||||||
|
if ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, is->frame, 0)) < 0) {
|
||||||
|
if (ret == AVERROR(EAGAIN)) {
|
||||||
|
is->audio_buf_frames_pending = 0;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
is->audio_buf_frames_pending = 1;
|
||||||
|
tb = is->out_audio_filter->inputs[0]->time_base;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
|
data_size = av_samples_get_buffer_size(NULL, av_frame_get_channels(is->frame),
|
||||||
@ -2337,6 +2331,7 @@ static int audio_decode_frame(VideoState *is)
|
|||||||
if (pkt->data == flush_pkt.data) {
|
if (pkt->data == flush_pkt.data) {
|
||||||
avcodec_flush_buffers(dec);
|
avcodec_flush_buffers(dec);
|
||||||
flush_complete = 0;
|
flush_complete = 0;
|
||||||
|
is->audio_buf_frames_pending = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
*pkt_temp = *pkt;
|
*pkt_temp = *pkt;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user