mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-01-24 13:56:33 +02:00
Merge commit '7b917041184874e7d7cba4450813de7e0bb28a33'
* commit '7b917041184874e7d7cba4450813de7e0bb28a33': lavc: Drop deprecated VDPAU codec capability Merged-by: James Almer <jamrial@gmail.com>
This commit is contained in:
commit
c68a3ab96e
@ -1031,13 +1031,6 @@ typedef struct RcOverride{
|
|||||||
*/
|
*/
|
||||||
#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
|
#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6)
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
/**
|
|
||||||
* Codec can export data for HW decoding (VDPAU).
|
|
||||||
*/
|
|
||||||
#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Codec can output multiple frames per AVPacket
|
* Codec can output multiple frames per AVPacket
|
||||||
* Normally demuxers return one frame at a time, demuxers which do not do
|
* Normally demuxers return one frame at a time, demuxers which do not do
|
||||||
@ -1222,12 +1215,7 @@ typedef struct RcOverride{
|
|||||||
* This can be used to prevent truncation of the last audio samples.
|
* This can be used to prevent truncation of the last audio samples.
|
||||||
*/
|
*/
|
||||||
#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME
|
#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
/**
|
|
||||||
* Codec can export data for HW decoding (VDPAU).
|
|
||||||
*/
|
|
||||||
#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
/**
|
/**
|
||||||
* Codec can output multiple frames per AVPacket
|
* Codec can output multiple frames per AVPacket
|
||||||
* Normally demuxers return one frame at a time, demuxers which do not do
|
* Normally demuxers return one frame at a time, demuxers which do not do
|
||||||
|
@ -1214,10 +1214,6 @@ int ff_get_format(AVCodecContext *avctx, const enum AVPixelFormat *fmt)
|
|||||||
|
|
||||||
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
|
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
|
||||||
break;
|
break;
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
break;
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (avctx->hw_frames_ctx) {
|
if (avctx->hw_frames_ctx) {
|
||||||
AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
AVHWFramesContext *hw_frames_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
|
||||||
|
@ -814,9 +814,6 @@ void ff_er_frame_start(ERContext *s)
|
|||||||
static int er_supported(ERContext *s)
|
static int er_supported(ERContext *s)
|
||||||
{
|
{
|
||||||
if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
|
if(s->avctx->hwaccel && s->avctx->hwaccel->decode_slice ||
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU ||
|
|
||||||
#endif
|
|
||||||
!s->cur_pic.f ||
|
!s->cur_pic.f ||
|
||||||
s->cur_pic.field_picture
|
s->cur_pic.field_picture
|
||||||
)
|
)
|
||||||
|
@ -603,13 +603,6 @@ retry:
|
|||||||
if (!s->divx_packed)
|
if (!s->divx_packed)
|
||||||
ff_thread_finish_setup(avctx);
|
ff_thread_finish_setup(avctx);
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)) {
|
|
||||||
ff_vdpau_mpeg4_decode_picture(avctx->priv_data, s->gb.buffer, s->gb.buffer_end - s->gb.buffer);
|
|
||||||
goto frame_end;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (avctx->hwaccel) {
|
if (avctx->hwaccel) {
|
||||||
ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer,
|
ret = avctx->hwaccel->start_frame(avctx, s->gb.buffer,
|
||||||
s->gb.buffer_end - s->gb.buffer);
|
s->gb.buffer_end - s->gb.buffer);
|
||||||
|
@ -152,12 +152,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
|
|||||||
int err = 0;
|
int err = 0;
|
||||||
h->mb_y = 0;
|
h->mb_y = 0;
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (CONFIG_H264_VDPAU_DECODER &&
|
|
||||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
ff_vdpau_h264_set_reference_frames(h);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
|
if (in_setup || !(avctx->active_thread_type & FF_THREAD_FRAME)) {
|
||||||
if (!h->droppable) {
|
if (!h->droppable) {
|
||||||
err = ff_h264_execute_ref_pic_marking(h);
|
err = ff_h264_execute_ref_pic_marking(h);
|
||||||
@ -175,12 +169,6 @@ int ff_h264_field_end(H264Context *h, H264SliceContext *sl, int in_setup)
|
|||||||
"hardware accelerator failed to decode picture\n");
|
"hardware accelerator failed to decode picture\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (CONFIG_H264_VDPAU_DECODER &&
|
|
||||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
ff_vdpau_h264_picture_complete(h);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!in_setup && !h->droppable)
|
if (!in_setup && !h->droppable)
|
||||||
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
|
ff_thread_report_progress(&h->cur_pic_ptr->tf, INT_MAX,
|
||||||
h->picture_structure == PICT_BOTTOM_FIELD);
|
h->picture_structure == PICT_BOTTOM_FIELD);
|
||||||
|
@ -497,11 +497,7 @@ static int h264_frame_start(H264Context *h)
|
|||||||
|
|
||||||
if ((ret = alloc_picture(h, pic)) < 0)
|
if ((ret = alloc_picture(h, pic)) < 0)
|
||||||
return ret;
|
return ret;
|
||||||
if(!h->frame_recovered && !h->avctx->hwaccel
|
if(!h->frame_recovered && !h->avctx->hwaccel)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
&& !(h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
ff_color_frame(pic->f, c);
|
ff_color_frame(pic->f, c);
|
||||||
|
|
||||||
h->cur_pic_ptr = pic;
|
h->cur_pic_ptr = pic;
|
||||||
@ -939,17 +935,6 @@ static int h264_slice_header_init(H264Context *h)
|
|||||||
goto fail;
|
goto fail;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (h->avctx->codec &&
|
|
||||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU &&
|
|
||||||
(sps->bit_depth_luma != 8 || sps->chroma_format_idc > 1)) {
|
|
||||||
av_log(h->avctx, AV_LOG_ERROR,
|
|
||||||
"VDPAU decoding does not support video colorspace.\n");
|
|
||||||
ret = AVERROR_INVALIDDATA;
|
|
||||||
goto fail;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
if (sps->bit_depth_luma < 8 || sps->bit_depth_luma > 14 ||
|
||||||
sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
|
sps->bit_depth_luma == 11 || sps->bit_depth_luma == 13
|
||||||
) {
|
) {
|
||||||
@ -2738,11 +2723,7 @@ int ff_h264_execute_decode_slices(H264Context *h)
|
|||||||
|
|
||||||
h->slice_ctx[0].next_slice_idx = INT_MAX;
|
h->slice_ctx[0].next_slice_idx = INT_MAX;
|
||||||
|
|
||||||
if (h->avctx->hwaccel || context_count < 1
|
if (h->avctx->hwaccel || context_count < 1)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
|
av_assert0(context_count && h->slice_ctx[context_count - 1].mb_y < h->mb_height);
|
||||||
|
@ -527,10 +527,6 @@ static void flush_dpb(AVCodecContext *avctx)
|
|||||||
h->context_initialized = 0;
|
h->context_initialized = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
static const uint8_t start_code[] = { 0x00, 0x00, 0x01 };
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static int get_last_needed_nal(H264Context *h)
|
static int get_last_needed_nal(H264Context *h)
|
||||||
{
|
{
|
||||||
int nals_needed = 0;
|
int nals_needed = 0;
|
||||||
@ -688,11 +684,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
|
|||||||
if (h->avctx->hwaccel &&
|
if (h->avctx->hwaccel &&
|
||||||
(ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
|
(ret = h->avctx->hwaccel->start_frame(h->avctx, buf, buf_size)) < 0)
|
||||||
goto end;
|
goto end;
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (CONFIG_H264_VDPAU_DECODER &&
|
|
||||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
ff_vdpau_h264_picture_start(h);
|
|
||||||
#endif
|
|
||||||
}
|
}
|
||||||
|
|
||||||
max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
|
max_slice_ctx = avctx->hwaccel ? 1 : h->nb_slice_ctx;
|
||||||
@ -701,18 +692,6 @@ static int decode_nal_units(H264Context *h, const uint8_t *buf, int buf_size)
|
|||||||
ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
|
ret = avctx->hwaccel->decode_slice(avctx, nal->raw_data, nal->raw_size);
|
||||||
h->nb_slice_ctx_queued = 0;
|
h->nb_slice_ctx_queued = 0;
|
||||||
} else
|
} else
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (CONFIG_H264_VDPAU_DECODER &&
|
|
||||||
h->avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU) {
|
|
||||||
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
|
|
||||||
start_code,
|
|
||||||
sizeof(start_code));
|
|
||||||
ff_vdpau_add_data_chunk(h->cur_pic_ptr->f->data[0],
|
|
||||||
nal->raw_data,
|
|
||||||
nal->raw_size);
|
|
||||||
ret = 0;
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
ret = ff_h264_execute_decode_slices(h);
|
ret = ff_h264_execute_decode_slices(h);
|
||||||
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
|
if (ret < 0 && (h->avctx->err_recognition & AV_EF_EXPLODE))
|
||||||
goto end;
|
goto end;
|
||||||
|
@ -58,11 +58,7 @@ int ff_mpeg_framesize_alloc(AVCodecContext *avctx, MotionEstContext *me,
|
|||||||
{
|
{
|
||||||
int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
|
int alloc_size = FFALIGN(FFABS(linesize) + 64, 32);
|
||||||
|
|
||||||
if (avctx->hwaccel
|
if (avctx->hwaccel)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| avctx->codec->capabilities & AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (linesize < 24) {
|
if (linesize < 24) {
|
||||||
|
@ -1311,11 +1311,7 @@ int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!avctx->hwaccel
|
if (!avctx->hwaccel) {
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
&& !(avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
#endif
|
|
||||||
) {
|
|
||||||
for(i=0; i<avctx->height; i++)
|
for(i=0; i<avctx->height; i++)
|
||||||
memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
|
memset(s->last_picture_ptr->f->data[0] + s->last_picture_ptr->f->linesize[0]*i,
|
||||||
0x80, avctx->width);
|
0x80, avctx->width);
|
||||||
@ -1661,11 +1657,7 @@ void ff_print_debug_info2(AVCodecContext *avctx, AVFrame *pict, uint8_t *mbskip_
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* TODO: export all the following to make them accessible for users (and filters) */
|
/* TODO: export all the following to make them accessible for users (and filters) */
|
||||||
if (avctx->hwaccel || !mbtype_table
|
if (avctx->hwaccel || !mbtype_table)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| (avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU)
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
|
||||||
|
@ -657,15 +657,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
return buf_size;
|
return buf_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if (s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
|
|
||||||
if (v->profile < PROFILE_ADVANCED)
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_VDPAU_WMV3;
|
|
||||||
else
|
|
||||||
avctx->pix_fmt = AV_PIX_FMT_VDPAU_VC1;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
//for advanced profile we may need to parse and unescape data
|
//for advanced profile we may need to parse and unescape data
|
||||||
if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
|
if (avctx->codec_id == AV_CODEC_ID_VC1 || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
|
||||||
int buf_size2 = 0;
|
int buf_size2 = 0;
|
||||||
@ -684,21 +675,13 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
if (size <= 0) continue;
|
if (size <= 0) continue;
|
||||||
switch (AV_RB32(start)) {
|
switch (AV_RB32(start)) {
|
||||||
case VC1_CODE_FRAME:
|
case VC1_CODE_FRAME:
|
||||||
if (avctx->hwaccel
|
if (avctx->hwaccel)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
buf_start = start;
|
buf_start = start;
|
||||||
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
|
buf_size2 = vc1_unescape_buffer(start + 4, size, buf2);
|
||||||
break;
|
break;
|
||||||
case VC1_CODE_FIELD: {
|
case VC1_CODE_FIELD: {
|
||||||
int buf_size3;
|
int buf_size3;
|
||||||
if (avctx->hwaccel
|
if (avctx->hwaccel)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
buf_start_second_field = start;
|
buf_start_second_field = start;
|
||||||
tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
|
tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
@ -764,11 +747,7 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
ret = AVERROR_INVALIDDATA;
|
ret = AVERROR_INVALIDDATA;
|
||||||
goto err;
|
goto err;
|
||||||
} else { // found field marker, unescape second field
|
} else { // found field marker, unescape second field
|
||||||
if (avctx->hwaccel
|
if (avctx->hwaccel)
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
|| s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU
|
|
||||||
#endif
|
|
||||||
)
|
|
||||||
buf_start_second_field = divider;
|
buf_start_second_field = divider;
|
||||||
tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
|
tmp = av_realloc_array(slices, sizeof(*slices), (n_slices+1));
|
||||||
if (!tmp) {
|
if (!tmp) {
|
||||||
@ -917,17 +896,6 @@ static int vc1_decode_frame(AVCodecContext *avctx, void *data,
|
|||||||
s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
|
s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
|
||||||
s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
|
s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
if ((CONFIG_VC1_VDPAU_DECODER)
|
|
||||||
&&s->avctx->codec->capabilities&AV_CODEC_CAP_HWACCEL_VDPAU) {
|
|
||||||
if (v->field_mode && buf_start_second_field) {
|
|
||||||
ff_vdpau_vc1_decode_picture(s, buf_start, buf_start_second_field - buf_start);
|
|
||||||
ff_vdpau_vc1_decode_picture(s, buf_start_second_field, (buf + buf_size) - buf_start_second_field);
|
|
||||||
} else {
|
|
||||||
ff_vdpau_vc1_decode_picture(s, buf_start, (buf + buf_size) - buf_start);
|
|
||||||
}
|
|
||||||
} else
|
|
||||||
#endif
|
|
||||||
if (avctx->hwaccel) {
|
if (avctx->hwaccel) {
|
||||||
s->mb_y = 0;
|
s->mb_y = 0;
|
||||||
if (v->field_mode && buf_start_second_field) {
|
if (v->field_mode && buf_start_second_field) {
|
||||||
|
@ -214,40 +214,6 @@ attribute_deprecated
|
|||||||
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
|
int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if FF_API_CAP_VDPAU
|
|
||||||
/** @brief The videoSurface is used for rendering. */
|
|
||||||
#define FF_VDPAU_STATE_USED_FOR_RENDER 1
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief The videoSurface is needed for reference/prediction.
|
|
||||||
* The codec manipulates this.
|
|
||||||
*/
|
|
||||||
#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
|
|
||||||
|
|
||||||
/**
|
|
||||||
* @brief This structure is used as a callback between the FFmpeg
|
|
||||||
* decoder (vd_) and presentation (vo_) module.
|
|
||||||
* This is used for defining a video frame containing surface,
|
|
||||||
* picture parameter, bitstream information etc which are passed
|
|
||||||
* between the FFmpeg decoder and its clients.
|
|
||||||
*/
|
|
||||||
struct vdpau_render_state {
|
|
||||||
VdpVideoSurface surface; ///< Used as rendered surface, never changed.
|
|
||||||
|
|
||||||
int state; ///< Holds FF_VDPAU_STATE_* values.
|
|
||||||
|
|
||||||
/** picture parameter information for all supported codecs */
|
|
||||||
union AVVDPAUPictureInfo info;
|
|
||||||
|
|
||||||
/** Describe size/location of the compressed video data.
|
|
||||||
Set to 0 when freeing bitstream_buffers. */
|
|
||||||
int bitstream_buffers_allocated;
|
|
||||||
int bitstream_buffers_used;
|
|
||||||
/** The user is responsible for freeing this buffer using av_freep(). */
|
|
||||||
VdpBitstreamBuffer *bitstream_buffers;
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
/* @}*/
|
/* @}*/
|
||||||
|
|
||||||
#endif /* AVCODEC_VDPAU_H */
|
#endif /* AVCODEC_VDPAU_H */
|
||||||
|
@ -57,9 +57,6 @@
|
|||||||
#ifndef FF_API_LOWRES
|
#ifndef FF_API_LOWRES
|
||||||
#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59)
|
#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 59)
|
||||||
#endif
|
#endif
|
||||||
#ifndef FF_API_CAP_VDPAU
|
|
||||||
#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58)
|
|
||||||
#endif
|
|
||||||
#ifndef FF_API_BUFS_VDPAU
|
#ifndef FF_API_BUFS_VDPAU
|
||||||
#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58)
|
#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58)
|
||||||
#endif
|
#endif
|
||||||
|
Loading…
x
Reference in New Issue
Block a user