mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-11-21 10:55:51 +02:00
Add VDPAU hardware accelerated decoding for MPEG1 and MPEG2 which can
be used by video players. Original patch by NVIDIA corporation. Originally committed as revision 16628 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
3700d80028
commit
d37edddc09
1
configure
vendored
1
configure
vendored
@ -993,6 +993,7 @@ mjpeg_encoder_select="aandct"
|
||||
mpeg1video_encoder_select="aandct"
|
||||
mpeg2video_encoder_select="aandct"
|
||||
mpeg4_encoder_select="aandct"
|
||||
mpeg_vdpau_decoder_deps="vdpau"
|
||||
mpeg_xvmc_decoder_deps="xvmc X11_extensions_XvMClib_h"
|
||||
msmpeg4v1_encoder_select="aandct"
|
||||
msmpeg4v2_encoder_select="aandct"
|
||||
|
@ -133,6 +133,7 @@ OBJS-$(CONFIG_MP3ON4_DECODER) += mpegaudiodec.o mpegaudiodecheader.o mp
|
||||
OBJS-$(CONFIG_MPC7_DECODER) += mpc7.o mpc.o mpegaudiodec.o mpegaudiodecheader.o mpegaudio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MPC8_DECODER) += mpc8.o mpc.o mpegaudiodec.o mpegaudiodecheader.o mpegaudio.o mpegaudiodata.o
|
||||
OBJS-$(CONFIG_MDEC_DECODER) += mdec.o mpeg12.o mpeg12data.o mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MPEG_VDPAU_DECODER) += vdpauvideo.o mpeg12.o mpeg12data.o mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MPEGVIDEO_DECODER) += mpeg12.o mpeg12data.o mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MPEG1VIDEO_DECODER) += mpeg12.o mpeg12data.o mpegvideo.o error_resilience.o
|
||||
OBJS-$(CONFIG_MPEG1VIDEO_ENCODER) += mpeg12enc.o mpeg12data.o mpegvideo_enc.o motion_est.o ratecontrol.o mpeg12.o mpeg12data.o mpegvideo.o error_resilience.o
|
||||
|
@ -109,6 +109,7 @@ void avcodec_register_all(void)
|
||||
REGISTER_ENCDEC (MPEG2VIDEO, mpeg2video);
|
||||
REGISTER_ENCDEC (MPEG4, mpeg4);
|
||||
REGISTER_DECODER (MPEGVIDEO, mpegvideo);
|
||||
REGISTER_DECODER (MPEG_VDPAU, mpeg_vdpau);
|
||||
REGISTER_ENCDEC (MSMPEG4V1, msmpeg4v1);
|
||||
REGISTER_ENCDEC (MSMPEG4V2, msmpeg4v2);
|
||||
REGISTER_ENCDEC (MSMPEG4V3, msmpeg4v3);
|
||||
|
@ -267,6 +267,12 @@ static const PixFmtInfo pix_fmt_info[PIX_FMT_NB] = {
|
||||
[PIX_FMT_XVMC_MPEG2_IDCT] = {
|
||||
.name = "xvmcidct",
|
||||
},
|
||||
[PIX_FMT_VDPAU_MPEG1] = {
|
||||
.name = "vdpau_mpeg1",
|
||||
},
|
||||
[PIX_FMT_VDPAU_MPEG2] = {
|
||||
.name = "vdpau_mpeg2",
|
||||
},
|
||||
[PIX_FMT_VDPAU_H264] = {
|
||||
.name = "vdpau_h264",
|
||||
},
|
||||
|
@ -34,6 +34,7 @@
|
||||
#include "mpeg12data.h"
|
||||
#include "mpeg12decdata.h"
|
||||
#include "bytestream.h"
|
||||
#include "vdpau_internal.h"
|
||||
|
||||
//#undef NDEBUG
|
||||
//#include <assert.h>
|
||||
@ -1218,7 +1219,12 @@ static enum PixelFormat mpeg_get_pixelformat(AVCodecContext *avctx){
|
||||
|
||||
if(avctx->xvmc_acceleration)
|
||||
return avctx->get_format(avctx,pixfmt_xvmc_mpg2_420);
|
||||
else{
|
||||
else if(avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU){
|
||||
if(avctx->codec_id == CODEC_ID_MPEG1VIDEO)
|
||||
return PIX_FMT_VDPAU_MPEG1;
|
||||
else
|
||||
return PIX_FMT_VDPAU_MPEG2;
|
||||
}else{
|
||||
if(s->chroma_format < 2)
|
||||
return PIX_FMT_YUV420P;
|
||||
else if(s->chroma_format == 2)
|
||||
@ -1307,7 +1313,8 @@ static int mpeg_decode_postinit(AVCodecContext *avctx){
|
||||
|
||||
avctx->pix_fmt = mpeg_get_pixelformat(avctx);
|
||||
//until then pix_fmt may be changed right after codec init
|
||||
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT )
|
||||
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU )
|
||||
if( avctx->idct_algo == FF_IDCT_AUTO )
|
||||
avctx->idct_algo = FF_IDCT_SIMPLE;
|
||||
|
||||
@ -2076,7 +2083,8 @@ static int vcr2_init_sequence(AVCodecContext *avctx)
|
||||
|
||||
avctx->pix_fmt = mpeg_get_pixelformat(avctx);
|
||||
|
||||
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT )
|
||||
if( avctx->pix_fmt == PIX_FMT_XVMC_MPEG2_IDCT ||
|
||||
s->avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU )
|
||||
if( avctx->idct_algo == FF_IDCT_AUTO )
|
||||
avctx->idct_algo = FF_IDCT_SIMPLE;
|
||||
|
||||
@ -2304,6 +2312,10 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
for(i=0; i<s->slice_count; i++)
|
||||
s2->error_count += s2->thread_context[i]->error_count;
|
||||
}
|
||||
|
||||
if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU)
|
||||
ff_vdpau_mpeg_picture_complete(s2, buf, buf_size, s->slice_count);
|
||||
|
||||
if (slice_end(avctx, picture)) {
|
||||
if(s2->last_picture_ptr || s2->low_delay) //FIXME merge with the stuff in mpeg_decode_slice
|
||||
*data_size = sizeof(AVPicture);
|
||||
@ -2389,6 +2401,11 @@ static int decode_chunks(AVCodecContext *avctx,
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (avctx->codec->capabilities&CODEC_CAP_HWACCEL_VDPAU) {
|
||||
s->slice_count++;
|
||||
break;
|
||||
}
|
||||
|
||||
if(avctx->thread_count > 1){
|
||||
int threshold= (s2->mb_height*s->slice_count + avctx->thread_count/2) / avctx->thread_count;
|
||||
if(threshold <= mb_y){
|
||||
@ -2508,3 +2525,20 @@ AVCodec mpeg_xvmc_decoder = {
|
||||
};
|
||||
|
||||
#endif
|
||||
|
||||
#if CONFIG_MPEG_VDPAU_DECODER
|
||||
AVCodec mpeg_vdpau_decoder = {
|
||||
"mpegvideo_vdpau",
|
||||
CODEC_TYPE_VIDEO,
|
||||
CODEC_ID_MPEG2VIDEO,
|
||||
sizeof(Mpeg1Context),
|
||||
mpeg_decode_init,
|
||||
NULL,
|
||||
mpeg_decode_end,
|
||||
mpeg_decode_frame,
|
||||
CODEC_CAP_DR1 | CODEC_CAP_TRUNCATED | CODEC_CAP_HWACCEL_VDPAU | CODEC_CAP_DELAY,
|
||||
.flush= ff_mpeg_flush,
|
||||
.long_name = NULL_IF_CONFIG_SMALL("MPEG-1/2 video (VDPAU acceleration)"),
|
||||
};
|
||||
#endif
|
||||
|
||||
|
@ -70,6 +70,7 @@ struct vdpau_render_state {
|
||||
|
||||
/** picture parameter information for all supported codecs */
|
||||
union VdpPictureInfo {
|
||||
VdpPictureInfoMPEG1Or2 mpeg;
|
||||
VdpPictureInfoH264 h264;
|
||||
} info;
|
||||
|
||||
|
@ -29,6 +29,10 @@
|
||||
|
||||
void ff_vdpau_add_data_chunk(MpegEncContext *s, const uint8_t *buf,
|
||||
int buf_size);
|
||||
|
||||
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
|
||||
int buf_size, int slice_count);
|
||||
|
||||
void ff_vdpau_h264_set_reference_frames(MpegEncContext *s);
|
||||
void ff_vdpau_h264_picture_complete(MpegEncContext *s);
|
||||
|
||||
|
@ -177,4 +177,59 @@ void ff_vdpau_h264_picture_complete(MpegEncContext *s)
|
||||
render->bitstream_buffers_used = 0;
|
||||
}
|
||||
|
||||
void ff_vdpau_mpeg_picture_complete(MpegEncContext *s, const uint8_t *buf,
|
||||
int buf_size, int slice_count)
|
||||
{
|
||||
struct vdpau_render_state * render, * last, * next;
|
||||
int i;
|
||||
|
||||
render = (struct vdpau_render_state*)s->current_picture_ptr->data[0];
|
||||
assert(render);
|
||||
|
||||
/* fill VdpPictureInfoMPEG1Or2 struct */
|
||||
render->info.mpeg.picture_structure = s->picture_structure;
|
||||
render->info.mpeg.picture_coding_type = s->pict_type;
|
||||
render->info.mpeg.intra_dc_precision = s->intra_dc_precision;
|
||||
render->info.mpeg.frame_pred_frame_dct = s->frame_pred_frame_dct;
|
||||
render->info.mpeg.concealment_motion_vectors = s->concealment_motion_vectors;
|
||||
render->info.mpeg.intra_vlc_format = s->intra_vlc_format;
|
||||
render->info.mpeg.alternate_scan = s->alternate_scan;
|
||||
render->info.mpeg.q_scale_type = s->q_scale_type;
|
||||
render->info.mpeg.top_field_first = s->top_field_first;
|
||||
render->info.mpeg.full_pel_forward_vector = s->full_pel[0]; // MPEG-1 only. Set 0 for MPEG-2
|
||||
render->info.mpeg.full_pel_backward_vector = s->full_pel[1]; // MPEG-1 only. Set 0 for MPEG-2
|
||||
render->info.mpeg.f_code[0][0] = s->mpeg_f_code[0][0]; // For MPEG-1 fill both horiz. & vert.
|
||||
render->info.mpeg.f_code[0][1] = s->mpeg_f_code[0][1];
|
||||
render->info.mpeg.f_code[1][0] = s->mpeg_f_code[1][0];
|
||||
render->info.mpeg.f_code[1][1] = s->mpeg_f_code[1][1];
|
||||
for (i = 0; i < 64; ++i) {
|
||||
render->info.mpeg.intra_quantizer_matrix[i] = s->intra_matrix[i];
|
||||
render->info.mpeg.non_intra_quantizer_matrix[i] = s->inter_matrix[i];
|
||||
}
|
||||
|
||||
render->info.mpeg.forward_reference = VDP_INVALID_HANDLE;
|
||||
render->info.mpeg.backward_reference = VDP_INVALID_HANDLE;
|
||||
|
||||
switch(s->pict_type){
|
||||
case FF_B_TYPE:
|
||||
next = (struct vdpau_render_state*)s->next_picture.data[0];
|
||||
assert(next);
|
||||
render->info.mpeg.backward_reference = next->surface;
|
||||
// no return here, going to set forward prediction
|
||||
case FF_P_TYPE:
|
||||
last = (struct vdpau_render_state*)s->last_picture.data[0];
|
||||
if (!last) // FIXME: Does this test make sense?
|
||||
last = render; // predict second field from the first
|
||||
render->info.mpeg.forward_reference = last->surface;
|
||||
}
|
||||
|
||||
ff_vdpau_add_data_chunk(s, buf, buf_size);
|
||||
|
||||
render->info.mpeg.slice_count = slice_count;
|
||||
|
||||
if (slice_count)
|
||||
ff_draw_horiz_band(s, 0, s->avctx->height);
|
||||
render->bitstream_buffers_used = 0;
|
||||
}
|
||||
|
||||
/* @}*/
|
||||
|
@ -122,6 +122,8 @@ enum PixelFormat {
|
||||
PIX_FMT_YUVJ440P, ///< Planar YUV 4:4:0 full scale (jpeg)
|
||||
PIX_FMT_YUVA420P, ///< Planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
|
||||
PIX_FMT_VDPAU_H264,///< H264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
|
||||
PIX_FMT_VDPAU_MPEG1,///< MPEG1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
|
||||
PIX_FMT_VDPAU_MPEG2,///< MPEG2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
|
||||
PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
|
||||
};
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user