1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-04 22:03:09 +02:00

acvodec/amfenc: Enable use of AMF Surface in multiple encoders

Fixes the behavior of AMF encoders when the same AMF surface is passed
to multiple encoder objects.
for example when using -filter_complex
This commit is contained in:
Dmitrii Ovchinnikov
2025-06-12 00:54:53 +02:00
parent b2c0d37be5
commit 64fce7202c
3 changed files with 291 additions and 167 deletions

View File

@ -41,6 +41,7 @@
#include "libavutil/mastering_display_metadata.h" #include "libavutil/mastering_display_metadata.h"
#define AMF_AV_FRAME_REF L"av_frame_ref" #define AMF_AV_FRAME_REF L"av_frame_ref"
#define PTS_PROP L"PtsProp"
static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta) static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AMFHDRMetadata *hdrmeta)
{ {
@ -104,7 +105,6 @@ static int amf_save_hdr_metadata(AVCodecContext *avctx, const AVFrame *frame, AM
#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf" #define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
#define PTS_PROP L"PtsProp"
const enum AVPixelFormat ff_amf_pix_fmts[] = { const enum AVPixelFormat ff_amf_pix_fmts[] = {
AV_PIX_FMT_NV12, AV_PIX_FMT_NV12,
@ -127,6 +127,8 @@ const enum AVPixelFormat ff_amf_pix_fmts[] = {
AV_PIX_FMT_NONE AV_PIX_FMT_NONE
}; };
static int64_t next_encoder_index = 0;
static int amf_init_encoder(AVCodecContext *avctx) static int amf_init_encoder(AVCodecContext *avctx)
{ {
AMFEncoderContext *ctx = avctx->priv_data; AMFEncoderContext *ctx = avctx->priv_data;
@ -135,6 +137,21 @@ static int amf_init_encoder(AVCodecContext *avctx)
enum AVPixelFormat pix_fmt; enum AVPixelFormat pix_fmt;
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data; AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx; AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx;
int alloc_size;
wchar_t name[512];
alloc_size = swprintf(name, amf_countof(name), L"%s%lld",PTS_PROP, next_encoder_index) + 1;
ctx->pts_property_name = av_memdup(name, alloc_size * sizeof(wchar_t));
if(!ctx->pts_property_name)
return AVERROR(ENOMEM);
alloc_size = swprintf(name, amf_countof(name), L"%s%lld",AMF_AV_FRAME_REF, next_encoder_index) + 1;
ctx->av_frame_property_name = av_memdup(name, alloc_size * sizeof(wchar_t));
if(!ctx->av_frame_property_name)
return AVERROR(ENOMEM);
next_encoder_index++;
switch (avctx->codec->id) { switch (avctx->codec->id) {
case AV_CODEC_ID_H264: case AV_CODEC_ID_H264:
@ -187,6 +204,9 @@ int av_cold ff_amf_encode_close(AVCodecContext *avctx)
av_buffer_unref(&ctx->device_ctx_ref); av_buffer_unref(&ctx->device_ctx_ref);
av_fifo_freep2(&ctx->timestamp_list); av_fifo_freep2(&ctx->timestamp_list);
av_freep(&ctx->pts_property_name);
av_freep(&ctx->av_frame_property_name);
return 0; return 0;
} }
@ -249,7 +269,7 @@ static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buff
break; break;
} }
buffer->pVtbl->GetProperty(buffer, PTS_PROP, &var); buffer->pVtbl->GetProperty(buffer, ctx->pts_property_name, &var);
pkt->pts = var.int64Value; // original pts pkt->pts = var.int64Value; // original pts
@ -298,7 +318,7 @@ int ff_amf_encode_init(AVCodecContext *avctx)
} }
else { else {
ret = av_hwdevice_ctx_create_derived(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, avctx->hw_device_ctx, 0); ret = av_hwdevice_ctx_create_derived(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, avctx->hw_device_ctx, 0);
AMF_RETURN_IF_FALSE(avctx, ret == 0, ret, "Failed to create derived AMF device context: %s\n", av_err2str(ret)); AMF_RETURN_IF_FALSE(ctx, ret == 0, ret, "Failed to create derived AMF device context: %s\n", av_err2str(ret));
} }
} else if (avctx->hw_frames_ctx) { } else if (avctx->hw_frames_ctx) {
AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data; AVHWFramesContext *frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
@ -308,13 +328,13 @@ int ff_amf_encode_init(AVCodecContext *avctx)
} }
else { else {
ret = av_hwdevice_ctx_create_derived(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, frames_ctx->device_ref, 0); ret = av_hwdevice_ctx_create_derived(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, frames_ctx->device_ref, 0);
AMF_RETURN_IF_FALSE(avctx, ret == 0, ret, "Failed to create derived AMF device context: %s\n", av_err2str(ret)); AMF_RETURN_IF_FALSE(ctx, ret == 0, ret, "Failed to create derived AMF device context: %s\n", av_err2str(ret));
} }
} }
} }
else { else {
ret = av_hwdevice_ctx_create(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, NULL, NULL, 0); ret = av_hwdevice_ctx_create(&ctx->device_ctx_ref, AV_HWDEVICE_TYPE_AMF, NULL, NULL, 0);
AMF_RETURN_IF_FALSE(avctx, ret == 0, ret, "Failed to create hardware device context (AMF) : %s\n", av_err2str(ret)); AMF_RETURN_IF_FALSE(ctx, ret == 0, ret, "Failed to create hardware device context (AMF) : %s\n", av_err2str(ret));
} }
if ((ret = amf_init_encoder(avctx)) == 0) { if ((ret = amf_init_encoder(avctx)) == 0) {
@ -347,22 +367,105 @@ static AMF_RESULT amf_set_property_buffer(AMFSurface *object, const wchar_t *nam
return res; return res;
} }
static AMF_RESULT amf_store_attached_frame_ref(const AVFrame *frame, AMFSurface *surface) static AMF_RESULT amf_lock_context(AVCodecContext *avctx)
{
AMFEncoderContext *ctx = avctx->priv_data;
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx;
AMF_RESULT res;
switch(amf_device_ctx->memory_type) {
case AMF_MEMORY_DX11:
res = amf_device_ctx->context->pVtbl->LockDX11(amf_device_ctx->context);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX11() failed with error %d\n", res);
break;
case AMF_MEMORY_DX12:
{
AMFContext2 *context2 = NULL;
AMFGuid guid = IID_AMFContext2();
res = amf_device_ctx->context->pVtbl->QueryInterface(amf_device_ctx->context, &guid, (void**)&context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryInterface for AMFContext2 failed with error %d\n", res);
res = context2->pVtbl->LockDX12(context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX12() failed with error %d\n", res);
context2->pVtbl->Release(context2);
}
break;
case AMF_MEMORY_DX9:
res = amf_device_ctx->context->pVtbl->LockDX9(amf_device_ctx->context);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX9() failed with error %d\n", res);
case AMF_MEMORY_VULKAN:
{
AMFContext2 *context2 = NULL;
AMFGuid guid = IID_AMFContext2();
res = amf_device_ctx->context->pVtbl->QueryInterface(amf_device_ctx->context, &guid, (void**)&context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryInterface for AMFContext2 failed with error %d\n", res);
res = context2->pVtbl->LockVulkan(context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockVulkan() failed with error %d\n", res);
context2->pVtbl->Release(context2);
}
break;
}
return AMF_OK;
}
static AMF_RESULT amf_unlock_context(AVCodecContext *avctx)
{
AMFEncoderContext *ctx = avctx->priv_data;
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx;
AMF_RESULT res;
switch(amf_device_ctx->memory_type) {
case AMF_MEMORY_DX11:
res = amf_device_ctx->context->pVtbl->UnlockDX11(amf_device_ctx->context);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX11() failed with error %d\n", res);
break;
case AMF_MEMORY_DX12:
{
AMFContext2 *context2 = NULL;
AMFGuid guid = IID_AMFContext2();
res = amf_device_ctx->context->pVtbl->QueryInterface(amf_device_ctx->context, &guid, (void**)&context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryInterface for AMFContext2 failed with error %d\n", res);
res = context2->pVtbl->UnlockDX12(context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX12() failed with error %d\n", res);
context2->pVtbl->Release(context2);
}
break;
case AMF_MEMORY_DX9:
res = amf_device_ctx->context->pVtbl->UnlockDX9(amf_device_ctx->context);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockDX9() failed with error %d\n", res);
case AMF_MEMORY_VULKAN:
{
AMFContext2 *context2 = NULL;
AMFGuid guid = IID_AMFContext2();
res = amf_device_ctx->context->pVtbl->QueryInterface(amf_device_ctx->context, &guid, (void**)&context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "QueryInterface for AMFContext2 failed with error %d\n", res);
res = context2->pVtbl->UnlockVulkan(context2);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "LockVulkan() failed with error %d\n", res);
context2->pVtbl->Release(context2);
}
break;
}
return AMF_OK;
}
static AMF_RESULT amf_store_attached_frame_ref(AMFEncoderContext *ctx, const AVFrame *frame, AMFSurface *surface)
{ {
AMF_RESULT res = AMF_FAIL; AMF_RESULT res = AMF_FAIL;
int64_t data; int64_t data;
AVFrame *frame_ref = av_frame_clone(frame); AVFrame *frame_ref = av_frame_clone(frame);
if (frame_ref) { if (frame_ref) {
memcpy(&data, &frame_ref, sizeof(frame_ref)); // store pointer in 8 bytes memcpy(&data, &frame_ref, sizeof(frame_ref)); // store pointer in 8 bytes
AMF_ASSIGN_PROPERTY_INT64(res, surface, AMF_AV_FRAME_REF, data); AMF_ASSIGN_PROPERTY_INT64(res, surface, ctx->av_frame_property_name, data);
} }
return res; return res;
} }
static AMF_RESULT amf_release_attached_frame_ref(AMFBuffer *buffer) static AMF_RESULT amf_release_attached_frame_ref(AMFEncoderContext *ctx, AMFBuffer *buffer)
{ {
AMFVariantStruct var = {0}; AMFVariantStruct var = {0};
AMF_RESULT res = buffer->pVtbl->GetProperty(buffer, AMF_AV_FRAME_REF, &var); AMF_RESULT res = buffer->pVtbl->GetProperty(buffer, ctx->av_frame_property_name, &var);
if(res == AMF_OK && var.int64Value){ if(res == AMF_OK && var.int64Value){
AVFrame *frame_ref; AVFrame *frame_ref;
memcpy(&frame_ref, &var.int64Value, sizeof(frame_ref)); memcpy(&frame_ref, &var.int64Value, sizeof(frame_ref));
@ -371,7 +474,7 @@ static AMF_RESULT amf_release_attached_frame_ref(AMFBuffer *buffer)
return res; return res;
} }
int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt) static int amf_submit_frame(AVCodecContext *avctx, AVFrame *frame, AMFSurface **surface_resubmit)
{ {
AMFEncoderContext *ctx = avctx->priv_data; AMFEncoderContext *ctx = avctx->priv_data;
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data; AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
@ -379,79 +482,36 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
AMFSurface *surface; AMFSurface *surface;
AMF_RESULT res; AMF_RESULT res;
int ret; int ret;
AMF_RESULT res_query;
AMFData *data = NULL;
AVFrame *frame = av_frame_alloc();
int block_and_wait;
int input_full = 0;
int hw_surface = 0; int hw_surface = 0;
int64_t pts = 0;
int max_b_frames = ctx->max_b_frames < 0 ? 0 : ctx->max_b_frames; int max_b_frames = ctx->max_b_frames < 0 ? 0 : ctx->max_b_frames;
if (!ctx->encoder){ // prepare surface from frame
av_frame_free(&frame);
return AVERROR(EINVAL);
}
ret = ff_encode_get_frame(avctx, frame);
if(ret < 0){
if(ret != AVERROR_EOF){
av_frame_free(&frame);
if(ret == AVERROR(EAGAIN)){
if(ctx->submitted_frame <= ctx->encoded_frame + max_b_frames + 1) // too soon to poll
return ret;
}
}
}
if(ret != AVERROR(EAGAIN)){
if (!frame->buf[0]) { // submit drain
if (!ctx->eof) { // submit drain one time only
if(!ctx->delayed_drain) {
res = ctx->encoder->pVtbl->Drain(ctx->encoder);
if (res == AMF_INPUT_FULL) {
ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in receive loop
} else {
if (res == AMF_OK) {
ctx->eof = 1; // drain started
}
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Drain() failed with error %d\n", res);
}
}
}
} else { // submit frame
// prepare surface from frame
switch (frame->format) { switch (frame->format) {
#if CONFIG_D3D11VA #if CONFIG_D3D11VA
case AV_PIX_FMT_D3D11: case AV_PIX_FMT_D3D11:
{ {
static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } }; static const GUID AMFTextureArrayIndexGUID = { 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture ID3D11Texture2D *texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
int index = (intptr_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use int index = (intptr_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
av_assert0(frame->hw_frames_ctx && avctx->hw_frames_ctx && av_assert0(frame->hw_frames_ctx && avctx->hw_frames_ctx &&
frame->hw_frames_ctx->data == avctx->hw_frames_ctx->data); frame->hw_frames_ctx->data == avctx->hw_frames_ctx->data);
texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index); texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
res = amf_device_ctx->context->pVtbl->CreateSurfaceFromDX11Native(amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface res = amf_device_ctx->context->pVtbl->CreateSurfaceFromDX11Native(amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX11Native() failed with error %d\n", res);
hw_surface = 1; hw_surface = 1;
} }
break; break;
#endif #endif
#if CONFIG_DXVA2 #if CONFIG_DXVA2
case AV_PIX_FMT_DXVA2_VLD: case AV_PIX_FMT_DXVA2_VLD:
{ {
IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture IDirect3DSurface9 *texture = (IDirect3DSurface9 *)frame->data[3]; // actual texture
res = amf_device_ctx->context->pVtbl->CreateSurfaceFromDX9Native(amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface res = amf_device_ctx->context->pVtbl->CreateSurfaceFromDX9Native(amf_device_ctx->context, texture, &surface, NULL); // wrap to AMF surface
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR(ENOMEM), "CreateSurfaceFromDX9Native() failed with error %d\n", res);
hw_surface = 1; hw_surface = 1;
} }
break; break;
#endif #endif
case AV_PIX_FMT_AMF_SURFACE: case AV_PIX_FMT_AMF_SURFACE:
{ {
surface = (AMFSurface*)frame->data[0]; surface = (AMFSurface*)frame->data[0];
@ -467,14 +527,12 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
} }
break; break;
} }
if (hw_surface) { if (hw_surface) {
amf_store_attached_frame_ref(frame, surface); amf_store_attached_frame_ref(ctx, frame, surface);
ctx->hwsurfaces_in_queue++; ctx->hwsurfaces_in_queue++;
// input HW surfaces can be vertically aligned by 16; tell AMF the real size // input HW surfaces can be vertically aligned by 16; tell AMF the real size
surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height); surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height);
} }
// HDR10 metadata // HDR10 metadata
if (frame->color_trc == AVCOL_TRC_SMPTE2084) { if (frame->color_trc == AVCOL_TRC_SMPTE2084) {
AMFBuffer * hdrmeta_buffer = NULL; AMFBuffer * hdrmeta_buffer = NULL;
@ -491,14 +549,14 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break; AMF_ASSIGN_PROPERTY_INTERFACE(res, ctx->encoder, AMF_VIDEO_ENCODER_AV1_INPUT_HDR_METADATA, hdrmeta_buffer); break;
} }
res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer); res = amf_set_property_buffer(surface, L"av_frame_hdrmeta", hdrmeta_buffer);
AMF_RETURN_IF_FALSE(avctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SetProperty failed for \"av_frame_hdrmeta\" with error %d\n", res);
} }
hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer); hdrmeta_buffer->pVtbl->Release(hdrmeta_buffer);
} }
} }
surface->pVtbl->SetPts(surface, frame->pts); surface->pVtbl->SetPts(surface, frame->pts);
AMF_ASSIGN_PROPERTY_INT64(res, surface, PTS_PROP, frame->pts);
AMF_ASSIGN_PROPERTY_INT64(res, surface, ctx->pts_property_name, frame->pts);
switch (avctx->codec->id) { switch (avctx->codec->id) {
case AV_CODEC_ID_H264: case AV_CODEC_ID_H264:
@ -550,26 +608,89 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
default: default:
break; break;
} }
pts = frame->pts;
// submit surface // submit surface
res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
av_frame_free(&frame);
if (res == AMF_INPUT_FULL) { // handle full queue if (res == AMF_INPUT_FULL) { // handle full queue
//store surface for later submission //store surface for later submission
input_full = 1; *surface_resubmit = surface;
} else { } else {
surface->pVtbl->Release(surface); surface->pVtbl->Release(surface);
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res); AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d\n", res);
ctx->submitted_frame++; ctx->submitted_frame++;
ret = av_fifo_write(ctx->timestamp_list, &pts, 1); ret = av_fifo_write(ctx->timestamp_list, &frame->pts, 1);
if (ret < 0) if (ret < 0)
return ret; return ret;
if(ctx->submitted_frame <= ctx->encoded_frame + max_b_frames + 1) if(ctx->submitted_frame <= ctx->encoded_frame + max_b_frames + 1)
return AVERROR(EAGAIN); // if frame just submiited - don't poll or wait return AVERROR(EAGAIN); // if frame just submiited - don't poll or wait
} }
return 0;
}
static int amf_submit_frame_locked(AVCodecContext *avctx, AVFrame *frame, AMFSurface **surface_resubmit)
{
int ret;
int locked = amf_lock_context(avctx);
if(locked != AMF_OK)
av_log(avctx, AV_LOG_WARNING, "amf_lock_context() failed with %d - should not happen\n", locked);
ret = amf_submit_frame(avctx, frame, surface_resubmit);
if(locked == AMF_OK)
amf_unlock_context(avctx);
return ret;
}
int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
AMFEncoderContext *ctx = avctx->priv_data;
AVHWDeviceContext *hw_device_ctx = (AVHWDeviceContext*)ctx->device_ctx_ref->data;
AVAMFDeviceContext *amf_device_ctx = (AVAMFDeviceContext *)hw_device_ctx->hwctx;
AMFSurface *surface = NULL;
AMF_RESULT res;
int ret;
AMF_RESULT res_query;
AMFData *data = NULL;
AVFrame *frame = av_frame_alloc();
int block_and_wait;
int64_t pts = 0;
int max_b_frames = ctx->max_b_frames < 0 ? 0 : ctx->max_b_frames;
if (!ctx->encoder){
av_frame_free(&frame);
return AVERROR(EINVAL);
}
ret = ff_encode_get_frame(avctx, frame);
if(ret < 0){
if(ret != AVERROR_EOF){
av_frame_free(&frame);
if(ret == AVERROR(EAGAIN)){
if(ctx->submitted_frame <= ctx->encoded_frame + max_b_frames + 1) // too soon to poll
return ret;
}
}
}
if(ret != AVERROR(EAGAIN)){
if (!frame->buf[0]) { // submit drain
if (!ctx->eof) { // submit drain one time only
if(!ctx->delayed_drain) {
res = ctx->encoder->pVtbl->Drain(ctx->encoder);
if (res == AMF_INPUT_FULL) {
ctx->delayed_drain = 1; // input queue is full: resubmit Drain() in receive loop
} else {
if (res == AMF_OK) {
ctx->eof = 1; // drain started
}
AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "Drain() failed with error %d\n", res);
}
}
}
} else { // submit frame
ret = amf_submit_frame_locked(avctx, frame, &surface);
if(ret < 0){
av_frame_free(&frame);
return ret;
}
pts = frame->pts;
} }
} }
av_frame_free(&frame); av_frame_free(&frame);
@ -585,7 +706,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
AMFGuid guid = IID_AMFBuffer(); AMFGuid guid = IID_AMFBuffer();
data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
ret = amf_copy_buffer(avctx, avpkt, buffer); ret = amf_copy_buffer(avctx, avpkt, buffer);
if (amf_release_attached_frame_ref(buffer) == AMF_OK) { if (amf_release_attached_frame_ref(ctx, buffer) == AMF_OK) {
ctx->hwsurfaces_in_queue--; ctx->hwsurfaces_in_queue--;
} }
ctx->encoded_frame++; ctx->encoded_frame++;
@ -604,7 +725,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n"); av_log(avctx, AV_LOG_WARNING, "Data acquired but delayed drain submission got AMF_INPUT_FULL- should not happen\n");
} }
} }
} else if (ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max) || input_full) { } else if (ctx->delayed_drain || (ctx->eof && res_query != AMF_EOF) || (ctx->hwsurfaces_in_queue >= ctx->hwsurfaces_in_queue_max) || surface) {
block_and_wait = 1; block_and_wait = 1;
// Only sleep if the driver doesn't support waiting in QueryOutput() // Only sleep if the driver doesn't support waiting in QueryOutput()
// or if we already have output data so we will skip calling it. // or if we already have output data so we will skip calling it.
@ -619,7 +740,7 @@ int ff_amf_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
} else if (data == NULL) { } else if (data == NULL) {
ret = AVERROR(EAGAIN); ret = AVERROR(EAGAIN);
} else { } else {
if(input_full) { if(surface) {
// resubmit surface // resubmit surface
res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface); res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
surface->pVtbl->Release(surface); surface->pVtbl->Release(surface);

View File

@ -46,6 +46,8 @@ typedef struct AMFEncoderContext {
AMFComponent *encoder; ///< AMF encoder object AMFComponent *encoder; ///< AMF encoder object
amf_bool eof; ///< flag indicating EOF happened amf_bool eof; ///< flag indicating EOF happened
AMF_SURFACE_FORMAT format; ///< AMF surface format AMF_SURFACE_FORMAT format; ///< AMF surface format
wchar_t *pts_property_name;
wchar_t *av_frame_property_name;
int hwsurfaces_in_queue; int hwsurfaces_in_queue;
int hwsurfaces_in_queue_max; int hwsurfaces_in_queue_max;

View File

@ -37,6 +37,7 @@ typedef struct AVAMFDeviceContext {
int64_t version; ///< version of AMF runtime int64_t version; ///< version of AMF runtime
AMFContext *context; AMFContext *context;
AMF_MEMORY_TYPE memory_type;
} AVAMFDeviceContext; } AVAMFDeviceContext;
enum AMF_SURFACE_FORMAT av_av_to_amf_format(enum AVPixelFormat fmt); enum AMF_SURFACE_FORMAT av_av_to_amf_format(enum AVPixelFormat fmt);