1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

avcodec/encode: restructure the old encode API

Following the same logic as 061a0c14bb, this commit turns the old encode API
into a wrapper for the new one.

Signed-off-by: James Almer <jamrial@gmail.com>
This commit is contained in:
James Almer 2020-06-09 18:35:00 -03:00
parent 827d6fe73d
commit 93016f5d1d
3 changed files with 116 additions and 263 deletions

View File

@ -32,43 +32,28 @@
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
{
if (avpkt->size < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
return AVERROR(EINVAL);
}
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
return AVERROR(EINVAL);
}
av_assert0(!avpkt->data);
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
if (!avpkt->data || avpkt->size < size) {
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer;
avpkt->size = avctx->internal->byte_buffer_size;
}
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer;
avpkt->size = size;
}
if (avpkt->data) {
AVBufferRef *buf = avpkt->buf;
if (avpkt->size < size) {
av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
return AVERROR(EINVAL);
}
av_init_packet(avpkt);
avpkt->buf = buf;
avpkt->size = size;
return 0;
} else {
if (!avpkt->data) {
int ret = av_new_packet(avpkt, size);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return ret;
}
return 0;
}
/**
@ -105,244 +90,6 @@ fail:
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
AVFrame *extended_frame = NULL;
AVFrame *padded_frame = NULL;
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
av_packet_unref(avpkt);
return 0;
}
/* ensure that extended_data is properly set */
if (frame && !frame->extended_data) {
if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
avctx->channels > AV_NUM_DATA_POINTERS) {
av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
"with more than %d channels, but extended_data is not set.\n",
AV_NUM_DATA_POINTERS);
return AVERROR(EINVAL);
}
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
extended_frame = av_frame_alloc();
if (!extended_frame)
return AVERROR(ENOMEM);
memcpy(extended_frame, frame, sizeof(AVFrame));
extended_frame->extended_data = extended_frame->data;
frame = extended_frame;
}
/* extract audio service type metadata */
if (frame) {
AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
}
/* check for valid frame size */
if (frame) {
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
if (frame->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
ret = AVERROR(EINVAL);
goto end;
}
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame (avcodec_encode_audio2)\n", avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
if (frame->nb_samples < avctx->frame_size) {
if (!(padded_frame = av_frame_alloc())) {
ret = AVERROR(ENOMEM);
goto end;
}
ret = pad_last_frame(avctx, padded_frame, frame);
if (ret < 0)
goto end;
frame = padded_frame;
avctx->internal->last_audio_frame = 1;
}
if (frame->nb_samples != avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
}
}
av_assert0(avctx->codec->encode2);
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (*got_packet_ptr) {
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
avpkt->dts = avpkt->pts;
} else {
avpkt->size = 0;
}
}
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
goto end;
}
}
if (!ret) {
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr) {
av_packet_unref(avpkt);
goto end;
}
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
end:
av_frame_free(&padded_frame);
av_free(extended_frame);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0';
if (!frame &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
(avctx->internal->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) {
av_packet_unref(avpkt);
return 0;
}
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
return AVERROR(EINVAL);
if (frame && frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame && (frame->width == 0 || frame->height == 0))
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
av_assert0(avctx->codec->encode2);
if (CONFIG_FRAME_THREAD_ENCODER &&
avctx->internal->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (*got_packet_ptr && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
}
av_assert0(ret <= 0);
emms_c();
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
return ret;
}
}
if (!ret) {
if (!*got_packet_ptr)
avpkt->size = 0;
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr)
av_packet_unref(avpkt);
return ret;
}
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVSubtitle *sub)
{
@ -606,3 +353,104 @@ int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *
return 0;
}
static int compat_encode(AVCodecContext *avctx, AVPacket *avpkt,
int *got_packet, const AVFrame *frame)
{
AVCodecInternal *avci = avctx->internal;
AVPacket user_pkt;
int ret;
*got_packet = 0;
if (frame && avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
if (frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame->width == 0 || frame->height == 0)
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
}
ret = avcodec_send_frame(avctx, frame);
if (ret == AVERROR_EOF)
ret = 0;
else if (ret == AVERROR(EAGAIN)) {
/* we fully drain all the output in each encode call, so this should not
* ever happen */
return AVERROR_BUG;
} else if (ret < 0)
return ret;
av_packet_move_ref(&user_pkt, avpkt);
while (ret >= 0) {
ret = avcodec_receive_packet(avctx, avpkt);
if (ret < 0) {
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
goto finish;
}
if (avpkt != avci->compat_encode_packet) {
if (avpkt->data && user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
av_buffer_unref(&avpkt->buf);
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
av_init_packet(&user_pkt);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
av_packet_unref(avpkt);
ret = AVERROR(EINVAL);
goto finish;
}
}
*got_packet = 1;
avpkt = avci->compat_encode_packet;
} else {
if (!avci->compat_decode_warned) {
av_log(avctx, AV_LOG_WARNING, "The deprecated avcodec_encode_* "
"API cannot return all the packets for this encoder. "
"Some packets will be dropped. Update your code to the "
"new encoding API to fix this.\n");
avci->compat_decode_warned = 1;
av_packet_unref(avpkt);
}
}
if (avci->draining)
break;
}
finish:
if (ret < 0)
av_packet_unref(&user_pkt);
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret = compat_encode(avctx, avpkt, got_packet_ptr, frame);
if (ret < 0)
av_packet_unref(avpkt);
return ret;
}

View File

@ -186,6 +186,7 @@ typedef struct AVCodecInternal {
* of the packet (that should be submitted in the next decode call */
size_t compat_decode_partial_size;
AVFrame *compat_decode_frame;
AVPacket *compat_encode_packet;
int showed_multi_packet_warning;

View File

@ -585,15 +585,16 @@ int attribute_align_arg avcodec_open2(AVCodecContext *avctx, const AVCodec *code
avci->to_free = av_frame_alloc();
avci->compat_decode_frame = av_frame_alloc();
avci->compat_encode_packet = av_packet_alloc();
avci->buffer_frame = av_frame_alloc();
avci->buffer_pkt = av_packet_alloc();
avci->es.in_frame = av_frame_alloc();
avci->ds.in_pkt = av_packet_alloc();
avci->last_pkt_props = av_packet_alloc();
if (!avci->to_free || !avci->compat_decode_frame ||
if (!avci->compat_decode_frame || !avci->compat_encode_packet ||
!avci->buffer_frame || !avci->buffer_pkt ||
!avci->es.in_frame || !avci->ds.in_pkt ||
!avci->last_pkt_props) {
!avci->to_free || !avci->last_pkt_props) {
ret = AVERROR(ENOMEM);
goto free_and_end;
}
@ -1043,6 +1044,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_frame_free(&avci->to_free);
av_frame_free(&avci->compat_decode_frame);
av_frame_free(&avci->buffer_frame);
av_packet_free(&avci->compat_encode_packet);
av_packet_free(&avci->buffer_pkt);
av_packet_free(&avci->last_pkt_props);
@ -1082,6 +1084,7 @@ void avcodec_flush_buffers(AVCodecContext *avctx)
avci->nb_draining_errors = 0;
av_frame_unref(avci->buffer_frame);
av_frame_unref(avci->compat_decode_frame);
av_packet_unref(avci->compat_encode_packet);
av_packet_unref(avci->buffer_pkt);
av_frame_unref(avci->es.in_frame);
@ -1142,6 +1145,7 @@ av_cold int avcodec_close(AVCodecContext *avctx)
av_frame_free(&avctx->internal->to_free);
av_frame_free(&avctx->internal->compat_decode_frame);
av_frame_free(&avctx->internal->buffer_frame);
av_packet_free(&avctx->internal->compat_encode_packet);
av_packet_free(&avctx->internal->buffer_pkt);
av_packet_free(&avctx->internal->last_pkt_props);