mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
doc/examples/demuxing_decoding: convert to new decoding API
This commit is contained in:
parent
e4edf220e5
commit
3bfe20389d
@ -55,87 +55,93 @@ static AVPacket pkt;
|
|||||||
static int video_frame_count = 0;
|
static int video_frame_count = 0;
|
||||||
static int audio_frame_count = 0;
|
static int audio_frame_count = 0;
|
||||||
|
|
||||||
static int decode_packet(int *got_frame, int cached)
|
static int output_video_frame(AVFrame *frame)
|
||||||
{
|
{
|
||||||
int ret = 0;
|
if (frame->width != width || frame->height != height ||
|
||||||
int decoded = pkt.size;
|
frame->format != pix_fmt) {
|
||||||
|
/* To handle this change, one could call av_image_alloc again and
|
||||||
*got_frame = 0;
|
* decode the following frames into another rawvideo file. */
|
||||||
|
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||||
if (pkt.stream_index == video_stream_idx) {
|
"constant in a rawvideo file, but the width, height or "
|
||||||
/* decode video frame */
|
"pixel format of the input video changed:\n"
|
||||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
"old: width = %d, height = %d, format = %s\n"
|
||||||
if (ret < 0) {
|
"new: width = %d, height = %d, format = %s\n",
|
||||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||||
return ret;
|
frame->width, frame->height,
|
||||||
}
|
av_get_pix_fmt_name(frame->format));
|
||||||
|
return -1;
|
||||||
if (*got_frame) {
|
|
||||||
|
|
||||||
if (frame->width != width || frame->height != height ||
|
|
||||||
frame->format != pix_fmt) {
|
|
||||||
/* To handle this change, one could call av_image_alloc again and
|
|
||||||
* decode the following frames into another rawvideo file. */
|
|
||||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
|
||||||
"constant in a rawvideo file, but the width, height or "
|
|
||||||
"pixel format of the input video changed:\n"
|
|
||||||
"old: width = %d, height = %d, format = %s\n"
|
|
||||||
"new: width = %d, height = %d, format = %s\n",
|
|
||||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
|
||||||
frame->width, frame->height,
|
|
||||||
av_get_pix_fmt_name(frame->format));
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
printf("video_frame%s n:%d coded_n:%d\n",
|
|
||||||
cached ? "(cached)" : "",
|
|
||||||
video_frame_count++, frame->coded_picture_number);
|
|
||||||
|
|
||||||
/* copy decoded frame to destination buffer:
|
|
||||||
* this is required since rawvideo expects non aligned data */
|
|
||||||
av_image_copy(video_dst_data, video_dst_linesize,
|
|
||||||
(const uint8_t **)(frame->data), frame->linesize,
|
|
||||||
pix_fmt, width, height);
|
|
||||||
|
|
||||||
/* write to rawvideo file */
|
|
||||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
|
||||||
}
|
|
||||||
} else if (pkt.stream_index == audio_stream_idx) {
|
|
||||||
/* decode audio frame */
|
|
||||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
|
||||||
if (ret < 0) {
|
|
||||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
|
||||||
return ret;
|
|
||||||
}
|
|
||||||
/* Some audio decoders decode only part of the packet, and have to be
|
|
||||||
* called again with the remainder of the packet data.
|
|
||||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
|
||||||
* Also, some decoders might over-read the packet. */
|
|
||||||
decoded = FFMIN(ret, pkt.size);
|
|
||||||
|
|
||||||
if (*got_frame) {
|
|
||||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
|
||||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
|
||||||
cached ? "(cached)" : "",
|
|
||||||
audio_frame_count++, frame->nb_samples,
|
|
||||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
|
||||||
|
|
||||||
/* Write the raw audio data samples of the first plane. This works
|
|
||||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
|
||||||
* most audio decoders output planar audio, which uses a separate
|
|
||||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
|
||||||
* In other words, this code will write only the first audio channel
|
|
||||||
* in these cases.
|
|
||||||
* You should use libswresample or libavfilter to convert the frame
|
|
||||||
* to packed data. */
|
|
||||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (*got_frame)
|
printf("video_frame n:%d coded_n:%d\n",
|
||||||
av_frame_unref(frame);
|
video_frame_count++, frame->coded_picture_number);
|
||||||
|
|
||||||
return decoded;
|
/* copy decoded frame to destination buffer:
|
||||||
|
* this is required since rawvideo expects non aligned data */
|
||||||
|
av_image_copy(video_dst_data, video_dst_linesize,
|
||||||
|
(const uint8_t **)(frame->data), frame->linesize,
|
||||||
|
pix_fmt, width, height);
|
||||||
|
|
||||||
|
/* write to rawvideo file */
|
||||||
|
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int output_audio_frame(AVFrame *frame)
|
||||||
|
{
|
||||||
|
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||||
|
printf("audio_frame n:%d nb_samples:%d pts:%s\n",
|
||||||
|
audio_frame_count++, frame->nb_samples,
|
||||||
|
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||||
|
|
||||||
|
/* Write the raw audio data samples of the first plane. This works
|
||||||
|
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||||
|
* most audio decoders output planar audio, which uses a separate
|
||||||
|
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||||
|
* In other words, this code will write only the first audio channel
|
||||||
|
* in these cases.
|
||||||
|
* You should use libswresample or libavfilter to convert the frame
|
||||||
|
* to packed data. */
|
||||||
|
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static int decode_packet(AVCodecContext *dec, const AVPacket *pkt)
|
||||||
|
{
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
|
// submit the packet to the decoder
|
||||||
|
ret = avcodec_send_packet(dec, pkt);
|
||||||
|
if (ret < 0) {
|
||||||
|
fprintf(stderr, "Error submitting a packet for decoding (%s)\n", av_err2str(ret));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// get all the available frames from the decoder
|
||||||
|
while (ret >= 0) {
|
||||||
|
ret = avcodec_receive_frame(dec, frame);
|
||||||
|
if (ret < 0) {
|
||||||
|
// those two return values are special and mean there is no output
|
||||||
|
// frame available, but there were no errors during decoding
|
||||||
|
if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
|
||||||
|
return 0;
|
||||||
|
|
||||||
|
fprintf(stderr, "Error during decoding (%s)\n", av_err2str(ret));
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
// write the frame data to output file
|
||||||
|
if (dec->codec->type == AVMEDIA_TYPE_VIDEO)
|
||||||
|
ret = output_video_frame(frame);
|
||||||
|
else
|
||||||
|
ret = output_audio_frame(frame);
|
||||||
|
|
||||||
|
av_frame_unref(frame);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int open_codec_context(int *stream_idx,
|
static int open_codec_context(int *stream_idx,
|
||||||
@ -221,7 +227,7 @@ static int get_format_from_sample_fmt(const char **fmt,
|
|||||||
|
|
||||||
int main (int argc, char **argv)
|
int main (int argc, char **argv)
|
||||||
{
|
{
|
||||||
int ret = 0, got_frame;
|
int ret = 0;
|
||||||
|
|
||||||
if (argc != 4) {
|
if (argc != 4) {
|
||||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||||
@ -309,23 +315,22 @@ int main (int argc, char **argv)
|
|||||||
|
|
||||||
/* read frames from the file */
|
/* read frames from the file */
|
||||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||||
AVPacket orig_pkt = pkt;
|
// check if the packet belongs to a stream we are interested in, otherwise
|
||||||
do {
|
// skip it
|
||||||
ret = decode_packet(&got_frame, 0);
|
if (pkt.stream_index == video_stream_idx)
|
||||||
if (ret < 0)
|
ret = decode_packet(video_dec_ctx, &pkt);
|
||||||
break;
|
else if (pkt.stream_index == audio_stream_idx)
|
||||||
pkt.data += ret;
|
ret = decode_packet(audio_dec_ctx, &pkt);
|
||||||
pkt.size -= ret;
|
av_packet_unref(&pkt);
|
||||||
} while (pkt.size > 0);
|
if (ret < 0)
|
||||||
av_packet_unref(&orig_pkt);
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* flush cached frames */
|
/* flush the decoders */
|
||||||
pkt.data = NULL;
|
if (video_dec_ctx)
|
||||||
pkt.size = 0;
|
decode_packet(video_dec_ctx, NULL);
|
||||||
do {
|
if (audio_dec_ctx)
|
||||||
decode_packet(&got_frame, 1);
|
decode_packet(audio_dec_ctx, NULL);
|
||||||
} while (got_frame);
|
|
||||||
|
|
||||||
printf("Demuxing succeeded.\n");
|
printf("Demuxing succeeded.\n");
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user