1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Add VDPAU hardware accelerated decoding for MPEG-4 ASP which can be used

by video players.

Original patch by NVIDIA corporation.

Originally committed as revision 20502 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
NVIDIA Corporation 2009-11-10 18:52:39 +00:00 committed by Carl Eugen Hoyos
parent 00962abaa0
commit 70e0c871eb
8 changed files with 86 additions and 0 deletions

2
configure vendored
View File

@ -1131,6 +1131,8 @@ mpeg2_vaapi_hwaccel_deps="va_va_h"
mpeg2_vaapi_hwaccel_select="vaapi mpeg2video_decoder"
mpeg4_vaapi_hwaccel_deps="va_va_h"
mpeg4_vaapi_hwaccel_select="vaapi mpeg4_decoder"
mpeg4_vdpau_decoder_deps="vdpau_vdpau_h vdpau_vdpau_x11_h"
mpeg4_vdpau_decoder_select="vdpau mpeg4_decoder"
mpeg_xvmc_decoder_deps="X11_extensions_XvMClib_h"
mpeg_xvmc_decoder_select="mpegvideo_decoder"
msmpeg4v1_encoder_select="h263_encoder"

View File

@ -123,6 +123,7 @@ void avcodec_register_all(void)
REGISTER_ENCDEC (MPEG1VIDEO, mpeg1video);
REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
REGISTER_ENCDEC (MPEG4, mpeg4);
REGISTER_DECODER (MPEG4_VDPAU, mpeg4_vdpau);
REGISTER_DECODER (MPEGVIDEO, mpegvideo);
REGISTER_DECODER (MPEG_VDPAU, mpeg_vdpau);
REGISTER_DECODER (MPEG1_VDPAU, mpeg1_vdpau);

View File

@ -32,6 +32,7 @@
#include "h263_parser.h"
#include "mpeg4video_parser.h"
#include "msmpeg4.h"
#include "vdpau_internal.h"
//#define DEBUG
//#define PRINT_FRAME_TIME
@ -621,6 +622,11 @@ retry:
if(MPV_frame_start(s, avctx) < 0)
return -1;
if (CONFIG_MPEG4_VDPAU_DECODER && (s->avctx->codec->capabilities & CODEC_CAP_HWACCEL_VDPAU)) {
ff_vdpau_mpeg4_decode_picture(s, buf, buf_size);
goto frame_end;
}
if (avctx->hwaccel) {
if (avctx->hwaccel->start_frame(avctx, buf, buf_size) < 0)
return -1;
@ -695,6 +701,7 @@ retry:
intrax8_decoded:
ff_er_frame_end(s);
frame_end:
if (avctx->hwaccel) {
if (avctx->hwaccel->end_frame(avctx) < 0)
return -1;
@ -835,3 +842,19 @@ AVCodec flv_decoder = {
.long_name= NULL_IF_CONFIG_SMALL("Flash Video (FLV) / Sorenson Spark / Sorenson H.263"),
.pix_fmts= ff_pixfmt_list_420,
};
#if CONFIG_MPEG4_VDPAU_DECODER
AVCodec mpeg4_vdpau_decoder = {
"mpeg4_vdpau",
CODEC_TYPE_VIDEO,
CODEC_ID_MPEG4,
sizeof(MpegEncContext),
ff_h263_decode_init,
NULL,
ff_h263_decode_end,
ff_h263_decode_frame,
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_DELAY | CODEC_CAP_HWACCEL_VDPAU,
.long_name= NULL_IF_CONFIG_SMALL("MPEG-4 part 2 (VDPAU)"),
.pix_fmts= (const enum PixelFormat[]){PIX_FMT_VDPAU_MPEG4, PIX_FMT_NONE},
};
#endif

View File

@ -376,6 +376,11 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
.is_hwaccel = 1,
.x_chroma_shift = 1, .y_chroma_shift = 1,
},
[PIX_FMT_VDPAU_MPEG4] = {
.name = "vdpau_mpeg4",
.is_hwaccel = 1,
.x_chroma_shift = 1, .y_chroma_shift = 1,
},
[PIX_FMT_UYYVYY411] = {
.name = "uyyvyy411",
.nb_channels = 1,

View File

@ -305,4 +305,54 @@ void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
render->bitstream_buffers_used = 0;
}
void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf,
int buf_size)
{
struct vdpau_render_state *render, *last, *next;
int i;
if (!s->current_picture_ptr) return;
render = (struct vdpau_render_state *)s->current_picture_ptr->data[0];
assert(render);
/* fill VdpPictureInfoMPEG4Part2 struct */
render->info.mpeg4.vop_time_increment_resolution = s->avctx->time_base.den;
render->info.mpeg4.vop_coding_type = 0;
render->info.mpeg4.vop_fcode_forward = s->f_code;
render->info.mpeg4.vop_fcode_backward = s->b_code;
render->info.mpeg4.resync_marker_disable = !s->resync_marker;
render->info.mpeg4.interlaced = !s->progressive_sequence;
render->info.mpeg4.quant_type = s->mpeg_quant;
render->info.mpeg4.quarter_sample = s->quarter_sample;
render->info.mpeg4.short_video_header = s->avctx->codec->id == CODEC_ID_H263;
render->info.mpeg4.rounding_control = s->no_rounding;
render->info.mpeg4.alternate_vertical_scan_flag = s->alternate_scan;
render->info.mpeg4.top_field_first = s->top_field_first;
for (i = 0; i < 64; ++i) {
render->info.mpeg4.intra_quantizer_matrix[i] = s->intra_matrix[i];
render->info.mpeg4.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
}
render->info.mpeg4.forward_reference = VDP_INVALID_HANDLE;
render->info.mpeg4.backward_reference = VDP_INVALID_HANDLE;
switch (s->pict_type) {
case FF_B_TYPE:
next = (struct vdpau_render_state *)s->next_picture.data[0];
assert(next);
render->info.mpeg4.backward_reference = next->surface;
render->info.mpeg4.vop_coding_type = 2;
// no break here, going to set forward prediction
case FF_P_TYPE:
last = (struct vdpau_render_state *)s->last_picture.data[0];
assert(last);
render->info.mpeg4.forward_reference = last->surface;
}
ff_vdpau_add_data_chunk(s, buf, buf_size);
ff_draw_horiz_band(s, 0, s->avctx->height);
render->bitstream_buffers_used = 0;
}
/* @}*/

View File

@ -73,6 +73,7 @@ struct vdpau_render_state {
VdpPictureInfoH264 h264;
VdpPictureInfoMPEG1Or2 mpeg;
VdpPictureInfoVC1 vc1;
VdpPictureInfoMPEG4Part2 mpeg4;
} info;
/** Describe size/location of the compressed video data.

View File

@ -39,4 +39,7 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s);
void ff_vdpau_vc1_decode_picture(MpegEncContext *s, const uint8_t *buf,
int buf_size);
void ff_vdpau_mpeg4_decode_picture(MpegEncContext *s, const uint8_t *buf,
int buf_size);
#endif /* AVCODEC_VDPAU_INTERNAL_H */

View File

@ -123,6 +123,7 @@ enum PixelFormat {
PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
};