You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-04 22:03:09 +02:00
FFHWAccel: add buffer_ref argument to start_frame
This commit adds a reference to the buffer as an argument to start_frame, and adapts all existing code. This allows for asynchronous hardware accelerators to skip copying packet data by referencing it.
This commit is contained in:
@ -1389,7 +1389,8 @@ static int av1_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
|
||||
s->cur_frame.temporal_id = header->temporal_id;
|
||||
|
||||
if (avctx->hwaccel && s->cur_frame.f) {
|
||||
ret = FF_HW_CALL(avctx, start_frame, unit->data, unit->data_size);
|
||||
ret = FF_HW_CALL(avctx, start_frame, s->pkt->buf,
|
||||
unit->data, unit->data_size);
|
||||
if (ret < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "HW accel start frame fail.\n");
|
||||
goto end;
|
||||
|
@ -45,7 +45,10 @@ typedef struct AV1DecodePictureContext {
|
||||
unsigned bitstream_size;
|
||||
} AV1DecodePictureContext;
|
||||
|
||||
static int d3d12va_av1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int d3d12va_av1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const AV1DecContext *h = avctx->priv_data;
|
||||
AV1DecodePictureContext *ctx_pic = h->cur_frame.hwaccel_picture_private;
|
||||
|
@ -50,8 +50,9 @@ static void fill_slice_short(DXVA_Slice_H264_Short *slice,
|
||||
}
|
||||
|
||||
static int d3d12va_h264_start_frame(AVCodecContext *avctx,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const H264Context *h = avctx->priv_data;
|
||||
H264DecodePictureContext *ctx_pic = h->cur_pic_ptr->hwaccel_picture_private;
|
||||
|
@ -49,7 +49,10 @@ static void fill_slice_short(DXVA_Slice_HEVC_Short *slice, unsigned position, un
|
||||
slice->wBadSliceChopping = 0;
|
||||
}
|
||||
|
||||
static int d3d12va_hevc_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int d3d12va_hevc_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const HEVCContext *h = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
|
@ -40,7 +40,10 @@ typedef struct D3D12DecodePictureContext {
|
||||
unsigned bitstream_size;
|
||||
} D3D12DecodePictureContext;
|
||||
|
||||
static int d3d12va_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int d3d12va_mpeg2_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
|
@ -41,7 +41,10 @@ typedef struct D3D12DecodePictureContext {
|
||||
unsigned bitstream_size;
|
||||
} D3D12DecodePictureContext;
|
||||
|
||||
static int d3d12va_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int d3d12va_vc1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
|
@ -45,7 +45,10 @@ static void fill_slice_short(DXVA_Slice_VPx_Short *slice, unsigned position, uns
|
||||
slice->wBadSliceChopping = 0;
|
||||
}
|
||||
|
||||
static int d3d12va_vp9_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int d3d12va_vp9_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const VP9SharedContext *h = avctx->priv_data;
|
||||
D3D12VADecodeContext *ctx = D3D12VA_DECODE_CONTEXT(avctx);
|
||||
|
@ -272,6 +272,7 @@ int ff_dxva2_av1_fill_picture_parameters(const AVCodecContext *avctx, AVDXVACont
|
||||
}
|
||||
|
||||
static int dxva2_av1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -444,6 +444,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
|
||||
|
||||
static int dxva2_h264_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -363,6 +363,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
|
||||
|
||||
static int dxva2_hevc_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -257,6 +257,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -315,6 +315,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
}
|
||||
|
||||
static int dxva2_vc1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -254,6 +254,7 @@ static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
|
||||
|
||||
|
||||
static int dxva2_vp9_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -742,7 +742,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *rframe,
|
||||
|
||||
/* Start */
|
||||
if (hwaccel) {
|
||||
ret = hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, avpkt->data, avpkt->size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
|
@ -561,7 +561,7 @@ retry:
|
||||
ff_thread_finish_setup(avctx);
|
||||
|
||||
if (avctx->hwaccel) {
|
||||
ret = FF_HW_CALL(avctx, start_frame,
|
||||
ret = FF_HW_CALL(avctx, start_frame, NULL,
|
||||
s->gb.buffer, s->gb.buffer_end - s->gb.buffer);
|
||||
if (ret < 0 )
|
||||
return ret;
|
||||
|
@ -587,7 +587,8 @@ static void debug_green_metadata(const H264SEIGreenMetaData *gm, void *logctx)
|
||||
}
|
||||
}
|
||||
|
||||
static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
|
||||
static int decode_nal_units(H264Context *h, AVBufferRef *buf_ref,
|
||||
const uint8_t *buf, int buf_size)
|
||||
{
|
||||
AVCodecContext *const avctx = h->avctx;
|
||||
int nals_needed = 0; ///< number of NALs that need decoding before the next frame thread starts
|
||||
@ -668,7 +669,8 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
|
||||
}
|
||||
|
||||
if (h->avctx->hwaccel &&
|
||||
(ret = FF_HW_CALL(h->avctx, start_frame, buf, buf_size)) < 0)
|
||||
(ret = FF_HW_CALL(h->avctx, start_frame, buf_ref,
|
||||
buf, buf_size)) < 0)
|
||||
goto end;
|
||||
}
|
||||
|
||||
@ -1053,7 +1055,7 @@ static int h264_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
avctx->err_recognition, avctx);
|
||||
}
|
||||
|
||||
buf_index = decode_nal_units(h, buf, buf_size);
|
||||
buf_index = decode_nal_units(h, avpkt->buf, buf, buf_size);
|
||||
if (buf_index < 0)
|
||||
return AVERROR_INVALIDDATA;
|
||||
|
||||
|
@ -3401,7 +3401,10 @@ static int hevc_frame_start(HEVCContext *s, HEVCLayerContext *l,
|
||||
goto fail;
|
||||
|
||||
if (s->avctx->hwaccel) {
|
||||
ret = FF_HW_CALL(s->avctx, start_frame, NULL, 0);
|
||||
AVCodecInternal *avci = s->avctx->internal;
|
||||
AVPacket *avpkt = avci->in_pkt;
|
||||
ret = FF_HW_CALL(s->avctx, start_frame,
|
||||
avpkt->buf, NULL, 0);
|
||||
if (ret < 0)
|
||||
goto fail;
|
||||
}
|
||||
|
@ -52,11 +52,13 @@ typedef struct FFHWAccel {
|
||||
* Otherwise, this means the whole frame is available at this point.
|
||||
*
|
||||
* @param avctx the codec context
|
||||
* @param buf_ref the frame data buffer reference (optional)
|
||||
* @param buf the frame data buffer base
|
||||
* @param buf_size the size of the frame in bytes
|
||||
* @return zero if successful, a negative value otherwise
|
||||
*/
|
||||
int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
|
||||
int (*start_frame)(AVCodecContext *avctx, const AVBufferRef *buf_ref,
|
||||
const uint8_t *buf, uint32_t buf_size);
|
||||
|
||||
/**
|
||||
* Callback for parameter data (SPS/PPS/VPS etc).
|
||||
|
@ -808,7 +808,7 @@ int ff_mjpeg_decode_sof(MJpegDecodeContext *s)
|
||||
if (!s->hwaccel_picture_private)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
ret = hwaccel->start_frame(s->avctx, s->raw_image_buffer,
|
||||
ret = hwaccel->start_frame(s->avctx, NULL, s->raw_image_buffer,
|
||||
s->raw_image_buffer_size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
|
@ -1361,7 +1361,7 @@ static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
|
||||
}
|
||||
|
||||
if (avctx->hwaccel) {
|
||||
if ((ret = FF_HW_CALL(avctx, start_frame, buf, buf_size)) < 0)
|
||||
if ((ret = FF_HW_CALL(avctx, start_frame, NULL, buf, buf_size)) < 0)
|
||||
return ret;
|
||||
} else if (s->codec_tag == MKTAG('V', 'C', 'R', '2')) {
|
||||
// Exchange UV
|
||||
|
@ -39,7 +39,9 @@ static int get_bit_depth_from_seq(const AV1RawSequenceHeader *seq)
|
||||
return 8;
|
||||
}
|
||||
|
||||
static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_av1_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const AV1DecContext *s = avctx->priv_data;
|
||||
const AV1RawSequenceHeader *seq = s->raw_seq;
|
||||
|
@ -47,6 +47,7 @@ static void dpb_add(const H264Context *h, CUVIDH264DPBENTRY *dst, const H264Pict
|
||||
}
|
||||
|
||||
static int nvdec_h264_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const H264Context *h = avctx->priv_data;
|
||||
|
@ -70,6 +70,7 @@ static void fill_scaling_lists(CUVIDHEVCPICPARAMS *ppc, const HEVCContext *s)
|
||||
}
|
||||
|
||||
static int nvdec_hevc_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
const HEVCContext *s = avctx->priv_data;
|
||||
|
@ -27,7 +27,9 @@
|
||||
#include "decode.h"
|
||||
#include "hwaccel_internal.h"
|
||||
|
||||
static int nvdec_mjpeg_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_mjpeg_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MJpegDecodeContext *s = avctx->priv_data;
|
||||
|
||||
|
@ -30,7 +30,9 @@
|
||||
#include "nvdec.h"
|
||||
#include "decode.h"
|
||||
|
||||
static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_mpeg12_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MpegEncContext *s = avctx->priv_data;
|
||||
|
||||
|
@ -28,7 +28,9 @@
|
||||
#include "decode.h"
|
||||
#include "hwaccel_internal.h"
|
||||
|
||||
static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
Mpeg4DecContext *m = avctx->priv_data;
|
||||
MpegEncContext *s = &m->m;
|
||||
|
@ -29,7 +29,9 @@
|
||||
#include "decode.h"
|
||||
#include "vc1.h"
|
||||
|
||||
static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_vc1_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
VC1Context *v = avctx->priv_data;
|
||||
MpegEncContext *s = &v->s;
|
||||
|
@ -32,7 +32,9 @@ static unsigned char safe_get_ref_idx(VP8Frame *frame)
|
||||
return frame ? ff_nvdec_get_ref_idx(frame->tf.f) : 255;
|
||||
}
|
||||
|
||||
static int nvdec_vp8_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_vp8_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
VP8Context *h = avctx->priv_data;
|
||||
|
||||
|
@ -29,7 +29,9 @@
|
||||
#include "internal.h"
|
||||
#include "vp9shared.h"
|
||||
|
||||
static int nvdec_vp9_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
|
||||
static int nvdec_vp9_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
VP9SharedContext *h = avctx->priv_data;
|
||||
const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
|
||||
|
@ -793,7 +793,7 @@ static int decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
|
||||
if (HWACCEL_MAX && avctx->hwaccel) {
|
||||
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
|
||||
ret = hwaccel->start_frame(avctx, NULL, 0);
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, avpkt->data, avpkt->size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = hwaccel->decode_slice(avctx, avpkt->data, avpkt->size);
|
||||
|
@ -108,6 +108,7 @@ static av_cold int vaapi_av1_decode_uninit(AVCodecContext *avctx)
|
||||
|
||||
|
||||
static int vaapi_av1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -232,6 +232,7 @@ static void fill_vaapi_plain_pred_weight_table(const H264Context *h,
|
||||
|
||||
/** Initialize and start decoding a frame with VA API. */
|
||||
static int vaapi_h264_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -122,6 +122,7 @@ static void fill_vaapi_reference_frames(const HEVCContext *h, const HEVCLayerCon
|
||||
}
|
||||
|
||||
static int vaapi_hevc_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -24,6 +24,7 @@
|
||||
#include "mjpegdec.h"
|
||||
|
||||
static int vaapi_mjpeg_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -39,7 +39,10 @@ static inline int mpeg2_get_is_frame_start(const MpegEncContext *s)
|
||||
return s->first_field || s->picture_structure == PICT_FRAME;
|
||||
}
|
||||
|
||||
static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int vaapi_mpeg2_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const MpegEncContext *s = avctx->priv_data;
|
||||
VAAPIDecodePicture *pic = s->cur_pic.ptr->hwaccel_picture_private;
|
||||
|
@ -45,7 +45,10 @@ static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int vaapi_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
Mpeg4DecContext *ctx = avctx->priv_data;
|
||||
MpegEncContext *s = &ctx->m;
|
||||
|
@ -249,7 +249,10 @@ static inline void vc1_pack_bitplanes(uint8_t *bitplane, int n, const uint8_t *f
|
||||
bitplane[bitplane_index] = (bitplane[bitplane_index] << 4) | v;
|
||||
}
|
||||
|
||||
static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
|
||||
static int vaapi_vc1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
const VC1Context *v = avctx->priv_data;
|
||||
const MpegEncContext *s = &v->s;
|
||||
|
@ -32,6 +32,7 @@ static VASurfaceID vaapi_vp8_surface_id(VP8Frame *vf)
|
||||
}
|
||||
|
||||
static int vaapi_vp8_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -35,6 +35,7 @@ static VASurfaceID vaapi_vp9_surface_id(const VP9Frame *vf)
|
||||
}
|
||||
|
||||
static int vaapi_vp9_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -80,6 +80,7 @@ static void fill_vaapi_reference_frames(const VVCFrameContext *h, VAPictureParam
|
||||
}
|
||||
|
||||
static int vaapi_vvc_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -1087,7 +1087,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
if (v->field_mode && buf_start_second_field) {
|
||||
// decode first field
|
||||
s->picture_structure = PICT_BOTTOM_FIELD - v->tff;
|
||||
ret = hwaccel->start_frame(avctx, buf_start,
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, buf_start,
|
||||
buf_start_second_field - buf_start);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
@ -1142,7 +1142,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
}
|
||||
v->s.cur_pic.ptr->f->pict_type = v->s.pict_type;
|
||||
|
||||
ret = hwaccel->start_frame(avctx, buf_start_second_field,
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, buf_start_second_field,
|
||||
(buf + buf_size) - buf_start_second_field);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
@ -1185,7 +1185,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, AVFrame *pict,
|
||||
goto err;
|
||||
} else {
|
||||
s->picture_structure = PICT_FRAME;
|
||||
ret = hwaccel->start_frame(avctx, buf_start,
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, buf_start,
|
||||
(buf + buf_size) - buf_start);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
@ -41,7 +41,8 @@ static int get_bit_depth_from_seq(const AV1RawSequenceHeader *seq)
|
||||
}
|
||||
|
||||
static int vdpau_av1_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
AV1DecContext *s = avctx->priv_data;
|
||||
const AV1RawSequenceHeader *seq = s->raw_seq;
|
||||
|
@ -118,6 +118,7 @@ static void vdpau_h264_set_reference_frames(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int vdpau_h264_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
H264Context * const h = avctx->priv_data;
|
||||
|
@ -32,6 +32,7 @@
|
||||
|
||||
|
||||
static int vdpau_hevc_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
HEVCContext *h = avctx->priv_data;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "vdpau_internal.h"
|
||||
|
||||
static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
MpegEncContext * const s = avctx->priv_data;
|
||||
|
@ -30,6 +30,7 @@
|
||||
#include "vdpau_internal.h"
|
||||
|
||||
static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
Mpeg4DecContext *ctx = avctx->priv_data;
|
||||
|
@ -32,6 +32,7 @@
|
||||
#include "vdpau_internal.h"
|
||||
|
||||
static int vdpau_vc1_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
VC1Context * const v = avctx->priv_data;
|
||||
|
@ -29,7 +29,8 @@
|
||||
#include "vdpau_internal.h"
|
||||
|
||||
static int vdpau_vp9_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer, uint32_t size)
|
||||
{
|
||||
VP9Context *s = avctx->priv_data;
|
||||
VP9SharedContext *h = &(s->s);
|
||||
|
@ -415,6 +415,7 @@ CFDataRef ff_videotoolbox_hvcc_extradata_create(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
@ -1084,6 +1085,7 @@ static int videotoolbox_h264_end_frame(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int videotoolbox_hevc_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
@ -1127,6 +1129,7 @@ static int videotoolbox_hevc_end_frame(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int videotoolbox_mpeg_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
@ -1151,8 +1154,9 @@ static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int videotoolbox_prores_start_frame(AVCodecContext *avctx,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -63,6 +63,7 @@ CFDataRef ff_videotoolbox_av1c_extradata_create(AVCodecContext *avctx)
|
||||
|
||||
|
||||
static int videotoolbox_av1_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
|
@ -104,6 +104,7 @@ CFDataRef ff_videotoolbox_vpcc_extradata_create(AVCodecContext *avctx)
|
||||
}
|
||||
|
||||
static int videotoolbox_vp9_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size)
|
||||
{
|
||||
|
@ -2729,7 +2729,7 @@ int vp78_decode_frame(AVCodecContext *avctx, AVFrame *rframe, int *got_frame,
|
||||
|
||||
if (!is_vp7 && avctx->hwaccel) {
|
||||
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
|
||||
ret = hwaccel->start_frame(avctx, avpkt->data, avpkt->size);
|
||||
ret = hwaccel->start_frame(avctx, avpkt->buf, avpkt->data, avpkt->size);
|
||||
if (ret < 0)
|
||||
goto err;
|
||||
|
||||
|
@ -1618,7 +1618,7 @@ static int vp9_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
|
||||
if (avctx->hwaccel) {
|
||||
const FFHWAccel *hwaccel = ffhwaccel(avctx->hwaccel);
|
||||
ret = hwaccel->start_frame(avctx, NULL, 0);
|
||||
ret = hwaccel->start_frame(avctx, pkt->buf, pkt->data, pkt->size);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ret = hwaccel->decode_slice(avctx, pkt->data, pkt->size);
|
||||
|
@ -61,6 +61,7 @@ int ff_videotoolbox_buffer_append(VTContext *vtctx,
|
||||
uint32_t size);
|
||||
int ff_videotoolbox_uninit(AVCodecContext *avctx);
|
||||
int ff_videotoolbox_h264_start_frame(AVCodecContext *avctx,
|
||||
const AVBufferRef *buffer_ref,
|
||||
const uint8_t *buffer,
|
||||
uint32_t size);
|
||||
int ff_videotoolbox_h264_decode_slice(AVCodecContext *avctx,
|
||||
|
@ -245,6 +245,7 @@ static int vk_av1_create_params(AVCodecContext *avctx, AVBufferRef **buf,
|
||||
}
|
||||
|
||||
static int vk_av1_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -359,6 +359,7 @@ static int vk_h264_create_params(AVCodecContext *avctx, AVBufferRef **buf)
|
||||
}
|
||||
|
||||
static int vk_h264_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -709,6 +709,7 @@ static int vk_hevc_create_params(AVCodecContext *avctx, AVBufferRef **buf)
|
||||
}
|
||||
|
||||
static int vk_hevc_start_frame(AVCodecContext *avctx,
|
||||
av_unused const AVBufferRef *buffer_ref,
|
||||
av_unused const uint8_t *buffer,
|
||||
av_unused uint32_t size)
|
||||
{
|
||||
|
@ -831,7 +831,8 @@ static int frame_setup(VVCFrameContext *fc, VVCContext *s)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_slice(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
|
||||
static int decode_slice(VVCContext *s, VVCFrameContext *fc, AVBufferRef *buf_ref,
|
||||
const H2645NAL *nal, const CodedBitstreamUnit *unit)
|
||||
{
|
||||
int ret;
|
||||
SliceContext *sc;
|
||||
@ -860,7 +861,7 @@ static int decode_slice(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal,
|
||||
|
||||
if (s->avctx->hwaccel) {
|
||||
if (is_first_slice) {
|
||||
ret = FF_HW_CALL(s->avctx, start_frame, NULL, 0);
|
||||
ret = FF_HW_CALL(s->avctx, start_frame, buf_ref, NULL, 0);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
@ -876,7 +877,8 @@ static int decode_slice(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal,
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int decode_nal_unit(VVCContext *s, VVCFrameContext *fc, const H2645NAL *nal, const CodedBitstreamUnit *unit)
|
||||
static int decode_nal_unit(VVCContext *s, VVCFrameContext *fc, AVBufferRef *buf_ref,
|
||||
const H2645NAL *nal, const CodedBitstreamUnit *unit)
|
||||
{
|
||||
int ret;
|
||||
|
||||
@ -902,7 +904,7 @@ static int decode_nal_unit(VVCContext *s, VVCFrameContext *fc, const H2645NAL *n
|
||||
case VVC_IDR_N_LP:
|
||||
case VVC_CRA_NUT:
|
||||
case VVC_GDR_NUT:
|
||||
ret = decode_slice(s, fc, nal, unit);
|
||||
ret = decode_slice(s, fc, buf_ref, nal, unit);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
break;
|
||||
@ -940,7 +942,7 @@ static int decode_nal_units(VVCContext *s, VVCFrameContext *fc, AVPacket *avpkt)
|
||||
if (unit->type == VVC_EOB_NUT || unit->type == VVC_EOS_NUT) {
|
||||
s->last_eos = 1;
|
||||
} else {
|
||||
ret = decode_nal_unit(s, fc, nal, unit);
|
||||
ret = decode_nal_unit(s, fc, avpkt->buf, nal, unit);
|
||||
if (ret < 0) {
|
||||
av_log(s->avctx, AV_LOG_WARNING,
|
||||
"Error parsing NAL unit #%d.\n", i);
|
||||
|
Reference in New Issue
Block a user