1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-15 14:13:16 +02:00

avcodec/nvenc: refactor encode_frame a bit

Signed-off-by: Timo Rothenpieler <timo@rothenpieler.org>
This commit is contained in:
Andrey Turkin
2016-05-29 15:50:06 +03:00
committed by Timo Rothenpieler
parent 214e63f851
commit 2f53b5b74b

View File

@@ -141,20 +141,6 @@ static int nvenc_print_error(void *log_ctx, NVENCSTATUS err,
return ret; return ret;
} }
static void timestamp_queue_enqueue(AVFifoBuffer* queue, int64_t timestamp)
{
av_fifo_generic_write(queue, &timestamp, sizeof(timestamp), NULL);
}
static int64_t timestamp_queue_dequeue(AVFifoBuffer* queue)
{
int64_t timestamp = AV_NOPTS_VALUE;
if (av_fifo_size(queue) > 0)
av_fifo_generic_read(queue, &timestamp, sizeof(timestamp), NULL);
return timestamp;
}
static av_cold int nvenc_load_libraries(AVCodecContext *avctx) static av_cold int nvenc_load_libraries(AVCodecContext *avctx)
{ {
NvencContext *ctx = avctx->priv_data; NvencContext *ctx = avctx->priv_data;
@@ -1395,16 +1381,34 @@ static void nvenc_codec_specific_pic_params(AVCodecContext *avctx,
switch (avctx->codec->id) { switch (avctx->codec->id) {
case AV_CODEC_ID_H264: case AV_CODEC_ID_H264:
params->codecPicParams.h264PicParams.sliceMode = ctx->encode_config.encodeCodecConfig.h264Config.sliceMode; params->codecPicParams.h264PicParams.sliceMode =
params->codecPicParams.h264PicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData; ctx->encode_config.encodeCodecConfig.h264Config.sliceMode;
params->codecPicParams.h264PicParams.sliceModeData =
ctx->encode_config.encodeCodecConfig.h264Config.sliceModeData;
break; break;
case AV_CODEC_ID_H265: case AV_CODEC_ID_HEVC:
params->codecPicParams.hevcPicParams.sliceMode = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode; params->codecPicParams.hevcPicParams.sliceMode =
params->codecPicParams.hevcPicParams.sliceModeData = ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData; ctx->encode_config.encodeCodecConfig.hevcConfig.sliceMode;
params->codecPicParams.hevcPicParams.sliceModeData =
ctx->encode_config.encodeCodecConfig.hevcConfig.sliceModeData;
break; break;
} }
} }
static inline void timestamp_queue_enqueue(AVFifoBuffer* queue, int64_t timestamp)
{
av_fifo_generic_write(queue, &timestamp, sizeof(timestamp), NULL);
}
static inline int64_t timestamp_queue_dequeue(AVFifoBuffer* queue)
{
int64_t timestamp = AV_NOPTS_VALUE;
if (av_fifo_size(queue) > 0)
av_fifo_generic_read(queue, &timestamp, sizeof(timestamp), NULL);
return timestamp;
}
static int nvenc_set_timestamp(AVCodecContext *avctx, static int nvenc_set_timestamp(AVCodecContext *avctx,
NV_ENC_LOCK_BITSTREAM *params, NV_ENC_LOCK_BITSTREAM *params,
AVPacket *pkt) AVPacket *pkt)
@@ -1539,13 +1543,16 @@ error2:
return res; return res;
} }
static int output_ready(NvencContext *ctx, int flush) static int output_ready(AVCodecContext *avctx, int flush)
{ {
NvencContext *ctx = avctx->priv_data;
int nb_ready, nb_pending; int nb_ready, nb_pending;
nb_ready = av_fifo_size(ctx->output_surface_ready_queue) / sizeof(NvencSurface*); nb_ready = av_fifo_size(ctx->output_surface_ready_queue) / sizeof(NvencSurface*);
nb_pending = av_fifo_size(ctx->output_surface_queue) / sizeof(NvencSurface*); nb_pending = av_fifo_size(ctx->output_surface_queue) / sizeof(NvencSurface*);
return nb_ready > 0 && (flush || nb_ready + nb_pending >= ctx->async_depth); if (flush)
return nb_ready > 0;
return (nb_ready > 0) && (nb_ready + nb_pending >= ctx->async_depth);
} }
int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt, int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
@@ -1564,7 +1571,10 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if (frame) { if (frame) {
inSurf = get_free_frame(ctx); inSurf = get_free_frame(ctx);
av_assert0(inSurf); if (!inSurf) {
av_log(avctx, AV_LOG_ERROR, "No free surfaces\n");
return AVERROR_BUG;
}
res = nvenc_upload_frame(avctx, frame, inSurf); res = nvenc_upload_frame(avctx, frame, inSurf);
if (res) { if (res) {
@@ -1577,14 +1587,12 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
pic_params.inputWidth = avctx->width; pic_params.inputWidth = avctx->width;
pic_params.inputHeight = avctx->height; pic_params.inputHeight = avctx->height;
pic_params.outputBitstream = inSurf->output_surface; pic_params.outputBitstream = inSurf->output_surface;
pic_params.completionEvent = 0;
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
if (frame->top_field_first) { if (frame->top_field_first)
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM; pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_TOP_BOTTOM;
} else { else
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP; pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FIELD_BOTTOM_TOP;
}
} else { } else {
pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME; pic_params.pictureStruct = NV_ENC_PIC_STRUCT_FRAME;
} }
@@ -1601,25 +1609,22 @@ int ff_nvenc_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
} }
nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params); nv_status = p_nvenc->nvEncEncodePicture(ctx->nvencoder, &pic_params);
if (nv_status != NV_ENC_SUCCESS &&
nv_status != NV_ENC_ERR_NEED_MORE_INPUT)
return nvenc_print_error(avctx, nv_status, "EncodePicture failed!");
if (frame && nv_status == NV_ENC_ERR_NEED_MORE_INPUT) if (frame)
av_fifo_generic_write(ctx->output_surface_queue, &inSurf, sizeof(inSurf), NULL); av_fifo_generic_write(ctx->output_surface_queue, &inSurf, sizeof(inSurf), NULL);
if (nv_status != NV_ENC_SUCCESS && nv_status != NV_ENC_ERR_NEED_MORE_INPUT) { /* all the pending buffers are now ready for output */
return nvenc_print_error(avctx, nv_status, "EncodePicture failed!"); if (nv_status == NV_ENC_SUCCESS) {
}
if (nv_status != NV_ENC_ERR_NEED_MORE_INPUT) {
while (av_fifo_size(ctx->output_surface_queue) > 0) { while (av_fifo_size(ctx->output_surface_queue) > 0) {
av_fifo_generic_read(ctx->output_surface_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL); av_fifo_generic_read(ctx->output_surface_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
av_fifo_generic_write(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL); av_fifo_generic_write(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
} }
if (frame)
av_fifo_generic_write(ctx->output_surface_ready_queue, &inSurf, sizeof(inSurf), NULL);
} }
if (output_ready(ctx, !frame)) { if (output_ready(avctx, !frame)) {
av_fifo_generic_read(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL); av_fifo_generic_read(ctx->output_surface_ready_queue, &tmpoutsurf, sizeof(tmpoutsurf), NULL);
res = process_output_surface(avctx, pkt, tmpoutsurf); res = process_output_surface(avctx, pkt, tmpoutsurf);