mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
Windows Media Image decoder (WMVP/WVP2)
Signed-off-by: Anton Khirnov <anton@khirnov.net>
This commit is contained in:
parent
3be5a94351
commit
45ecda8554
@ -41,6 +41,7 @@ easier to use. The changes are:
|
|||||||
'-preset <presetname>'.
|
'-preset <presetname>'.
|
||||||
* -intra option was removed, it's equivalent to -g 0.
|
* -intra option was removed, it's equivalent to -g 0.
|
||||||
- XMV demuxer
|
- XMV demuxer
|
||||||
|
- Windows Media Image decoder
|
||||||
|
|
||||||
|
|
||||||
version 0.7:
|
version 0.7:
|
||||||
|
2
configure
vendored
2
configure
vendored
@ -1354,6 +1354,7 @@ vc1_dxva2_hwaccel_deps="dxva2api_h DXVA_PictureParameters_wDecodedPictureIndex"
|
|||||||
vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
|
vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
|
||||||
vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
|
vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
|
||||||
vc1_vdpau_decoder_select="vdpau vc1_decoder"
|
vc1_vdpau_decoder_select="vdpau vc1_decoder"
|
||||||
|
vc1image_decoder_select="vc1_decoder"
|
||||||
vorbis_decoder_select="mdct"
|
vorbis_decoder_select="mdct"
|
||||||
vorbis_encoder_select="mdct"
|
vorbis_encoder_select="mdct"
|
||||||
vp6_decoder_select="huffman"
|
vp6_decoder_select="huffman"
|
||||||
@ -1374,6 +1375,7 @@ wmv3_decoder_select="vc1_decoder"
|
|||||||
wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
|
wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
|
||||||
wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
|
wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
|
||||||
wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
|
wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
|
||||||
|
wmv3image_decoder_select="wmv3_decoder"
|
||||||
zlib_decoder_select="zlib"
|
zlib_decoder_select="zlib"
|
||||||
zlib_encoder_select="zlib"
|
zlib_encoder_select="zlib"
|
||||||
zmbv_decoder_select="zlib"
|
zmbv_decoder_select="zlib"
|
||||||
|
@ -518,6 +518,7 @@ following image formats are supported:
|
|||||||
@item VMware Screen Codec / VMware Video @tab @tab X
|
@item VMware Screen Codec / VMware Video @tab @tab X
|
||||||
@tab Codec used in videos captured by VMware.
|
@tab Codec used in videos captured by VMware.
|
||||||
@item Westwood Studios VQA (Vector Quantized Animation) video @tab @tab X
|
@item Westwood Studios VQA (Vector Quantized Animation) video @tab @tab X
|
||||||
|
@item Windows Media Image @tab @tab X
|
||||||
@item Windows Media Video 7 @tab X @tab X
|
@item Windows Media Video 7 @tab X @tab X
|
||||||
@item Windows Media Video 8 @tab X @tab X
|
@item Windows Media Video 8 @tab X @tab X
|
||||||
@item Windows Media Video 9 @tab @tab X
|
@item Windows Media Video 9 @tab @tab X
|
||||||
|
@ -203,6 +203,7 @@ void avcodec_register_all(void)
|
|||||||
REGISTER_DECODER (VB, vb);
|
REGISTER_DECODER (VB, vb);
|
||||||
REGISTER_DECODER (VC1, vc1);
|
REGISTER_DECODER (VC1, vc1);
|
||||||
REGISTER_DECODER (VC1_VDPAU, vc1_vdpau);
|
REGISTER_DECODER (VC1_VDPAU, vc1_vdpau);
|
||||||
|
REGISTER_DECODER (VC1IMAGE, vc1image);
|
||||||
REGISTER_DECODER (VCR1, vcr1);
|
REGISTER_DECODER (VCR1, vcr1);
|
||||||
REGISTER_DECODER (VMDVIDEO, vmdvideo);
|
REGISTER_DECODER (VMDVIDEO, vmdvideo);
|
||||||
REGISTER_DECODER (VMNC, vmnc);
|
REGISTER_DECODER (VMNC, vmnc);
|
||||||
@ -217,6 +218,7 @@ void avcodec_register_all(void)
|
|||||||
REGISTER_ENCDEC (WMV2, wmv2);
|
REGISTER_ENCDEC (WMV2, wmv2);
|
||||||
REGISTER_DECODER (WMV3, wmv3);
|
REGISTER_DECODER (WMV3, wmv3);
|
||||||
REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau);
|
REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau);
|
||||||
|
REGISTER_DECODER (WMV3IMAGE, wmv3image);
|
||||||
REGISTER_DECODER (WNV1, wnv1);
|
REGISTER_DECODER (WNV1, wnv1);
|
||||||
REGISTER_DECODER (XAN_WC3, xan_wc3);
|
REGISTER_DECODER (XAN_WC3, xan_wc3);
|
||||||
REGISTER_DECODER (XAN_WC4, xan_wc4);
|
REGISTER_DECODER (XAN_WC4, xan_wc4);
|
||||||
|
@ -208,6 +208,8 @@ enum CodecID {
|
|||||||
CODEC_ID_PRORES,
|
CODEC_ID_PRORES,
|
||||||
CODEC_ID_JV,
|
CODEC_ID_JV,
|
||||||
CODEC_ID_DFA,
|
CODEC_ID_DFA,
|
||||||
|
CODEC_ID_WMV3IMAGE,
|
||||||
|
CODEC_ID_VC1IMAGE,
|
||||||
|
|
||||||
/* various PCM "codecs" */
|
/* various PCM "codecs" */
|
||||||
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
|
CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
|
||||||
|
@ -91,6 +91,8 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
|
|||||||
break;
|
break;
|
||||||
case CODEC_ID_VC1:
|
case CODEC_ID_VC1:
|
||||||
case CODEC_ID_WMV3:
|
case CODEC_ID_WMV3:
|
||||||
|
case CODEC_ID_VC1IMAGE:
|
||||||
|
case CODEC_ID_WMV3IMAGE:
|
||||||
s->h263_pred = 1;
|
s->h263_pred = 1;
|
||||||
s->msmpeg4_version=6;
|
s->msmpeg4_version=6;
|
||||||
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
|
avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
|
||||||
|
@ -208,7 +208,12 @@ void ff_copy_picture(Picture *dst, Picture *src){
|
|||||||
*/
|
*/
|
||||||
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
|
static void free_frame_buffer(MpegEncContext *s, Picture *pic)
|
||||||
{
|
{
|
||||||
ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
|
/* Windows Media Image codecs allocate internal buffers with different
|
||||||
|
dimensions; ignore user defined callbacks for these */
|
||||||
|
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
|
||||||
|
ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
|
||||||
|
else
|
||||||
|
avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
|
||||||
av_freep(&pic->f.hwaccel_picture_private);
|
av_freep(&pic->f.hwaccel_picture_private);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -230,7 +235,10 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture *pic)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
|
if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
|
||||||
|
r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
|
||||||
|
else
|
||||||
|
r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
|
||||||
|
|
||||||
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
|
if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
|
||||||
|
@ -314,9 +314,6 @@ int vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitConte
|
|||||||
"Old interlaced mode is not supported\n");
|
"Old interlaced mode is not supported\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
if (v->res_sprite) {
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "WMVP is not fully supported\n");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// (fps-2)/4 (->30)
|
// (fps-2)/4 (->30)
|
||||||
|
@ -311,6 +311,9 @@ typedef struct VC1Context{
|
|||||||
//@{
|
//@{
|
||||||
int new_sprite;
|
int new_sprite;
|
||||||
int two_sprites;
|
int two_sprites;
|
||||||
|
AVFrame sprite_output_frame;
|
||||||
|
int output_width, output_height, sprite_width, sprite_height;
|
||||||
|
uint8_t* sr_rows[2][2]; ///< Sprite resizer line cache
|
||||||
//@}
|
//@}
|
||||||
|
|
||||||
int p_frame_skipped;
|
int p_frame_skipped;
|
||||||
|
@ -3278,116 +3278,279 @@ static void vc1_decode_blocks(VC1Context *v)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline float get_float_val(GetBitContext* gb)
|
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
/**
|
||||||
|
* Transform coefficients for both sprites in 16.16 fixed point format,
|
||||||
|
* in the order they appear in the bitstream:
|
||||||
|
* x scale
|
||||||
|
* rotation 1 (unused)
|
||||||
|
* x offset
|
||||||
|
* rotation 2 (unused)
|
||||||
|
* y scale
|
||||||
|
* y offset
|
||||||
|
* alpha
|
||||||
|
*/
|
||||||
|
int coefs[2][7];
|
||||||
|
|
||||||
|
int effect_type, effect_flag;
|
||||||
|
int effect_pcount1, effect_pcount2; ///< amount of effect parameters stored in effect_params
|
||||||
|
int effect_params1[15], effect_params2[10]; ///< effect parameters in 16.16 fixed point format
|
||||||
|
} SpriteData;
|
||||||
|
|
||||||
|
static inline int get_fp_val(GetBitContext* gb)
|
||||||
{
|
{
|
||||||
return (float)get_bits_long(gb, 30) / (1<<15) - (1<<14);
|
return (get_bits_long(gb, 30) - (1<<29)) << 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vc1_sprite_parse_transform(VC1Context *v, GetBitContext* gb, float c[7])
|
static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
|
||||||
{
|
{
|
||||||
c[1] = c[3] = 0.0f;
|
c[1] = c[3] = 0;
|
||||||
|
|
||||||
switch (get_bits(gb, 2)) {
|
switch (get_bits(gb, 2)) {
|
||||||
case 0:
|
case 0:
|
||||||
c[0] = 1.0f;
|
c[0] = 1<<16;
|
||||||
c[2] = get_float_val(gb);
|
c[2] = get_fp_val(gb);
|
||||||
c[4] = 1.0f;
|
c[4] = 1<<16;
|
||||||
break;
|
break;
|
||||||
case 1:
|
case 1:
|
||||||
c[0] = c[4] = get_float_val(gb);
|
c[0] = c[4] = get_fp_val(gb);
|
||||||
c[2] = get_float_val(gb);
|
c[2] = get_fp_val(gb);
|
||||||
break;
|
break;
|
||||||
case 2:
|
case 2:
|
||||||
c[0] = get_float_val(gb);
|
c[0] = get_fp_val(gb);
|
||||||
c[2] = get_float_val(gb);
|
c[2] = get_fp_val(gb);
|
||||||
c[4] = get_float_val(gb);
|
c[4] = get_fp_val(gb);
|
||||||
break;
|
break;
|
||||||
case 3:
|
case 3:
|
||||||
av_log_ask_for_sample(v->s.avctx, NULL);
|
c[0] = get_fp_val(gb);
|
||||||
c[0] = get_float_val(gb);
|
c[1] = get_fp_val(gb);
|
||||||
c[1] = get_float_val(gb);
|
c[2] = get_fp_val(gb);
|
||||||
c[2] = get_float_val(gb);
|
c[3] = get_fp_val(gb);
|
||||||
c[3] = get_float_val(gb);
|
c[4] = get_fp_val(gb);
|
||||||
c[4] = get_float_val(gb);
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
c[5] = get_float_val(gb);
|
c[5] = get_fp_val(gb);
|
||||||
if (get_bits1(gb))
|
if (get_bits1(gb))
|
||||||
c[6] = get_float_val(gb);
|
c[6] = get_fp_val(gb);
|
||||||
else
|
else
|
||||||
c[6] = 1.0f;
|
c[6] = 1<<16;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb)
|
static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
|
||||||
{
|
{
|
||||||
int effect_type, effect_flag, effect_pcount1, effect_pcount2, i;
|
AVCodecContext *avctx = v->s.avctx;
|
||||||
float effect_params1[14], effect_params2[10];
|
int sprite, i;
|
||||||
|
|
||||||
float coefs[2][7];
|
for (sprite = 0; sprite <= v->two_sprites; sprite++) {
|
||||||
vc1_sprite_parse_transform(v, gb, coefs[0]);
|
vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "S1:");
|
if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
|
||||||
for (i = 0; i < 7; i++)
|
av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[0][i]);
|
av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
|
|
||||||
|
|
||||||
if (v->two_sprites) {
|
|
||||||
vc1_sprite_parse_transform(v, gb, coefs[1]);
|
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "S2:");
|
|
||||||
for (i = 0; i < 7; i++)
|
for (i = 0; i < 7; i++)
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[1][i]);
|
av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
|
sd->coefs[sprite][i] / (1<<16),
|
||||||
|
(abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1<<16));
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
skip_bits(gb, 2);
|
skip_bits(gb, 2);
|
||||||
if (effect_type = get_bits_long(gb, 30)){
|
if (sd->effect_type = get_bits_long(gb, 30)) {
|
||||||
switch (effect_pcount1 = get_bits(gb, 4)) {
|
switch (sd->effect_pcount1 = get_bits(gb, 4)) {
|
||||||
case 2:
|
|
||||||
effect_params1[0] = get_float_val(gb);
|
|
||||||
effect_params1[1] = get_float_val(gb);
|
|
||||||
break;
|
|
||||||
case 7:
|
case 7:
|
||||||
vc1_sprite_parse_transform(v, gb, effect_params1);
|
vc1_sprite_parse_transform(gb, sd->effect_params1);
|
||||||
break;
|
break;
|
||||||
case 14:
|
case 14:
|
||||||
vc1_sprite_parse_transform(v, gb, effect_params1);
|
vc1_sprite_parse_transform(gb, sd->effect_params1);
|
||||||
vc1_sprite_parse_transform(v, gb, &effect_params1[7]);
|
vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log_ask_for_sample(v->s.avctx, NULL);
|
for (i = 0; i < sd->effect_pcount1; i++)
|
||||||
return;
|
sd->effect_params1[i] = get_fp_val(gb);
|
||||||
}
|
}
|
||||||
if (effect_type != 13 || effect_params1[0] != coefs[0][6]) {
|
if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) {
|
||||||
// effect 13 is simple alpha blending and matches the opacity above
|
// effect 13 is simple alpha blending and matches the opacity above
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "Effect: %d; params: ", effect_type);
|
av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", sd->effect_type);
|
||||||
for (i = 0; i < effect_pcount1; i++)
|
for (i = 0; i < sd->effect_pcount1; i++)
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params1[i]);
|
av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
|
sd->effect_params1[i] / (1<<16),
|
||||||
|
(abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1<<16));
|
||||||
|
av_log(avctx, AV_LOG_DEBUG, "\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
effect_pcount2 = get_bits(gb, 16);
|
sd->effect_pcount2 = get_bits(gb, 16);
|
||||||
if (effect_pcount2 > 10) {
|
if (sd->effect_pcount2 > 10) {
|
||||||
av_log(v->s.avctx, AV_LOG_ERROR, "Too many effect parameters\n");
|
av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
|
||||||
return;
|
return;
|
||||||
} else if (effect_pcount2) {
|
} else if (sd->effect_pcount2) {
|
||||||
i = 0;
|
i = -1;
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "Effect params 2: ");
|
av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
|
||||||
while (i < effect_pcount2){
|
while (++i < sd->effect_pcount2){
|
||||||
effect_params2[i] = get_float_val(gb);
|
sd->effect_params2[i] = get_fp_val(gb);
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params2[i]);
|
av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
|
||||||
i++;
|
sd->effect_params2[i] / (1<<16),
|
||||||
|
(abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1<<16));
|
||||||
}
|
}
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
|
av_log(avctx, AV_LOG_DEBUG, "\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (effect_flag = get_bits1(gb))
|
if (sd->effect_flag = get_bits1(gb))
|
||||||
av_log(v->s.avctx, AV_LOG_DEBUG, "Effect flag set\n");
|
av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
|
||||||
|
|
||||||
if (get_bits_count(gb) >= gb->size_in_bits +
|
if (get_bits_count(gb) >= gb->size_in_bits +
|
||||||
(v->s.avctx->codec_id == CODEC_ID_WMV3 ? 64 : 0))
|
(avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
|
||||||
av_log(v->s.avctx, AV_LOG_ERROR, "Buffer overrun\n");
|
av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
|
||||||
if (get_bits_count(gb) < gb->size_in_bits - 8)
|
if (get_bits_count(gb) < gb->size_in_bits - 8)
|
||||||
av_log(v->s.avctx, AV_LOG_WARNING, "Buffer not fully read\n");
|
av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
|
||||||
|
{
|
||||||
|
int i, plane, row, sprite;
|
||||||
|
int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
|
||||||
|
uint8_t* src_h[2][2];
|
||||||
|
int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
|
||||||
|
int ysub[2];
|
||||||
|
MpegEncContext *s = &v->s;
|
||||||
|
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
|
||||||
|
xadv[i] = sd->coefs[i][0];
|
||||||
|
if (xadv[i] != 1<<16 || (v->sprite_width<<16) - (v->output_width<<16) - xoff[i])
|
||||||
|
xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 1) / v->output_width);
|
||||||
|
|
||||||
|
yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
|
||||||
|
yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height<<16) - yoff[i]) / v->output_height);
|
||||||
|
}
|
||||||
|
alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
|
||||||
|
|
||||||
|
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
|
||||||
|
int width = v->output_width>>!!plane;
|
||||||
|
|
||||||
|
for (row = 0; row < v->output_height>>!!plane; row++) {
|
||||||
|
uint8_t *dst = v->sprite_output_frame.data[plane] +
|
||||||
|
v->sprite_output_frame.linesize[plane] * row;
|
||||||
|
|
||||||
|
for (sprite = 0; sprite <= v->two_sprites; sprite++) {
|
||||||
|
uint8_t *iplane = s->current_picture.f.data[plane];
|
||||||
|
int iline = s->current_picture.f.linesize[plane];
|
||||||
|
int ycoord = yoff[sprite] + yadv[sprite]*row;
|
||||||
|
int yline = ycoord>>16;
|
||||||
|
ysub[sprite] = ycoord&0xFFFF;
|
||||||
|
if (sprite) {
|
||||||
|
iplane = s->last_picture.f.data[plane];
|
||||||
|
iline = s->last_picture.f.linesize[plane];
|
||||||
|
}
|
||||||
|
if (!(xoff[sprite]&0xFFFF) && xadv[sprite] == 1<<16) {
|
||||||
|
src_h[sprite][0] = iplane+(xoff[sprite]>>16)+ yline *iline;
|
||||||
|
if (ysub[sprite])
|
||||||
|
src_h[sprite][1] = iplane+(xoff[sprite]>>16)+(yline+1)*iline;
|
||||||
|
} else {
|
||||||
|
if (sr_cache[sprite][0] != yline) {
|
||||||
|
if (sr_cache[sprite][1] == yline) {
|
||||||
|
FFSWAP(uint8_t*, v->sr_rows[sprite][0], v->sr_rows[sprite][1]);
|
||||||
|
FFSWAP(int, sr_cache[sprite][0], sr_cache[sprite][1]);
|
||||||
|
} else {
|
||||||
|
v->vc1dsp.sprite_h(v->sr_rows[sprite][0], iplane+yline*iline, xoff[sprite], xadv[sprite], width);
|
||||||
|
sr_cache[sprite][0] = yline;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
|
||||||
|
v->vc1dsp.sprite_h(v->sr_rows[sprite][1], iplane+(yline+1)*iline, xoff[sprite], xadv[sprite], width);
|
||||||
|
sr_cache[sprite][1] = yline + 1;
|
||||||
|
}
|
||||||
|
src_h[sprite][0] = v->sr_rows[sprite][0];
|
||||||
|
src_h[sprite][1] = v->sr_rows[sprite][1];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!v->two_sprites) {
|
||||||
|
if (ysub[0]) {
|
||||||
|
v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], ysub[0], width);
|
||||||
|
} else {
|
||||||
|
memcpy(dst, src_h[0][0], width);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if (ysub[0] && ysub[1]) {
|
||||||
|
v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], src_h[0][1], ysub[0],
|
||||||
|
src_h[1][0], src_h[1][1], ysub[1], alpha, width);
|
||||||
|
} else if (ysub[0]) {
|
||||||
|
v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], src_h[0][1], ysub[0],
|
||||||
|
src_h[1][0], alpha, width);
|
||||||
|
} else if (ysub[1]) {
|
||||||
|
v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], src_h[1][1], ysub[1],
|
||||||
|
src_h[0][0], (1<<16)-1-alpha, width);
|
||||||
|
} else {
|
||||||
|
v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], src_h[1][0], alpha, width);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!plane) {
|
||||||
|
for (i = 0; i < 2; i++) {
|
||||||
|
xoff[i] >>= 1;
|
||||||
|
yoff[i] >>= 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
|
||||||
|
{
|
||||||
|
MpegEncContext *s = &v->s;
|
||||||
|
AVCodecContext *avctx = s->avctx;
|
||||||
|
SpriteData sd;
|
||||||
|
|
||||||
|
vc1_parse_sprites(v, gb, &sd);
|
||||||
|
|
||||||
|
if (!s->current_picture.f.data[0]) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v->two_sprites && (!s->last_picture_ptr || !s->last_picture.f.data[0])) {
|
||||||
|
av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
|
||||||
|
v->two_sprites = 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (v->sprite_output_frame.data[0])
|
||||||
|
avctx->release_buffer(avctx, &v->sprite_output_frame);
|
||||||
|
|
||||||
|
v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
|
||||||
|
v->sprite_output_frame.reference = 0;
|
||||||
|
if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
|
||||||
|
av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
|
return -1;
|
||||||
|
}
|
||||||
|
|
||||||
|
vc1_draw_sprites(v, &sd);
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void vc1_sprite_flush(AVCodecContext *avctx)
|
||||||
|
{
|
||||||
|
VC1Context *v = avctx->priv_data;
|
||||||
|
MpegEncContext *s = &v->s;
|
||||||
|
AVFrame *f = &s->current_picture.f;
|
||||||
|
int plane, i;
|
||||||
|
|
||||||
|
/* Windows Media Image codecs have a convergence interval of two keyframes.
|
||||||
|
Since we can't enforce it, clear to black the missing sprite. This is
|
||||||
|
wrong but it looks better than doing nothing. */
|
||||||
|
|
||||||
|
if (f->data[0])
|
||||||
|
for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
|
||||||
|
for (i = 0; i < v->sprite_height>>!!plane; i++)
|
||||||
|
memset(f->data[plane]+i*f->linesize[plane],
|
||||||
|
plane ? 128 : 0, f->linesize[plane]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
/** Initialize a VC1/WMV3 decoder
|
/** Initialize a VC1/WMV3 decoder
|
||||||
* @todo TODO: Handle VC-1 IDUs (Transport level?)
|
* @todo TODO: Handle VC-1 IDUs (Transport level?)
|
||||||
* @todo TODO: Decypher remaining bits in extra_data
|
* @todo TODO: Decypher remaining bits in extra_data
|
||||||
@ -3399,6 +3562,10 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
|||||||
GetBitContext gb;
|
GetBitContext gb;
|
||||||
int i, cur_width, cur_height;
|
int i, cur_width, cur_height;
|
||||||
|
|
||||||
|
/* save the container output size for WMImage */
|
||||||
|
v->output_width = avctx->width;
|
||||||
|
v->output_height = avctx->height;
|
||||||
|
|
||||||
if (!avctx->extradata_size || !avctx->extradata) return -1;
|
if (!avctx->extradata_size || !avctx->extradata) return -1;
|
||||||
if (!(avctx->flags & CODEC_FLAG_GRAY))
|
if (!(avctx->flags & CODEC_FLAG_GRAY))
|
||||||
avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
|
avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
|
||||||
@ -3420,7 +3587,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
cur_width = avctx->coded_width = avctx->width;
|
cur_width = avctx->coded_width = avctx->width;
|
||||||
cur_height = avctx->coded_height = avctx->height;
|
cur_height = avctx->coded_height = avctx->height;
|
||||||
if (avctx->codec_id == CODEC_ID_WMV3)
|
if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == CODEC_ID_WMV3IMAGE)
|
||||||
{
|
{
|
||||||
int count = 0;
|
int count = 0;
|
||||||
|
|
||||||
@ -3562,6 +3729,25 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ff_intrax8_common_init(&v->x8,s);
|
ff_intrax8_common_init(&v->x8,s);
|
||||||
|
|
||||||
|
if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
if (!(v->sr_rows[i>>1][i%2] = av_malloc(v->output_width))) return -1;
|
||||||
|
|
||||||
|
s->low_delay = 1;
|
||||||
|
|
||||||
|
v->sprite_width = avctx->coded_width;
|
||||||
|
v->sprite_height = avctx->coded_height;
|
||||||
|
|
||||||
|
avctx->coded_width = avctx->width = v->output_width;
|
||||||
|
avctx->coded_height = avctx->height = v->output_height;
|
||||||
|
|
||||||
|
// prevent 16.16 overflows
|
||||||
|
if (v->sprite_width > 1<<14 ||
|
||||||
|
v->sprite_height > 1<<14 ||
|
||||||
|
v->output_width > 1<<14 ||
|
||||||
|
v->output_height > 1<<14) return -1;
|
||||||
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -3614,7 +3800,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
|
|
||||||
//for advanced profile we may need to parse and unescape data
|
//for advanced profile we may need to parse and unescape data
|
||||||
if (avctx->codec_id == CODEC_ID_VC1) {
|
if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == CODEC_ID_VC1IMAGE) {
|
||||||
int buf_size2 = 0;
|
int buf_size2 = 0;
|
||||||
buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||||
|
|
||||||
@ -3679,8 +3865,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
if (v->res_sprite) {
|
if (v->res_sprite) {
|
||||||
v->new_sprite = !get_bits1(&s->gb);
|
v->new_sprite = !get_bits1(&s->gb);
|
||||||
v->two_sprites = get_bits1(&s->gb);
|
v->two_sprites = get_bits1(&s->gb);
|
||||||
if (!v->new_sprite)
|
/* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
|
||||||
goto end;
|
we're using the sprite compositor. These are intentionally kept separate
|
||||||
|
so you can get the raw sprites by using the wmv3 decoder for WMVP or
|
||||||
|
the vc1 one for WVP2 */
|
||||||
|
if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
|
||||||
|
if (v->new_sprite) {
|
||||||
|
// switch AVCodecContext parameters to those of the sprites
|
||||||
|
avctx->width = avctx->coded_width = v->sprite_width;
|
||||||
|
avctx->height = avctx->coded_height = v->sprite_height;
|
||||||
|
} else {
|
||||||
|
goto image;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// do parse frame header
|
// do parse frame header
|
||||||
@ -3694,8 +3891,10 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (v->res_sprite && s->pict_type!=AV_PICTURE_TYPE_I) {
|
if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
|
||||||
av_log(v->s.avctx, AV_LOG_WARNING, "Sprite decoder: expected I-frame\n");
|
&& s->pict_type!=AV_PICTURE_TYPE_I) {
|
||||||
|
av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
// for skipping the frame
|
// for skipping the frame
|
||||||
@ -3758,6 +3957,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
|
assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
|
||||||
assert(s->current_picture.f.pict_type == s->pict_type);
|
assert(s->current_picture.f.pict_type == s->pict_type);
|
||||||
|
|
||||||
|
if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE) {
|
||||||
|
image:
|
||||||
|
avctx->width = avctx->coded_width = v->output_width;
|
||||||
|
avctx->height = avctx->coded_height = v->output_height;
|
||||||
|
if (avctx->skip_frame >= AVDISCARD_NONREF) goto end;
|
||||||
|
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
|
||||||
|
if (vc1_decode_sprites(v, &s->gb)) goto err;
|
||||||
|
#endif
|
||||||
|
*pict = v->sprite_output_frame;
|
||||||
|
*data_size = sizeof(AVFrame);
|
||||||
|
} else {
|
||||||
|
|
||||||
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
|
||||||
*pict= *(AVFrame*)s->current_picture_ptr;
|
*pict= *(AVFrame*)s->current_picture_ptr;
|
||||||
} else if (s->last_picture_ptr != NULL) {
|
} else if (s->last_picture_ptr != NULL) {
|
||||||
@ -3769,9 +3981,9 @@ assert(s->current_picture.f.pict_type == s->pict_type);
|
|||||||
ff_print_debug_info(s, pict);
|
ff_print_debug_info(s, pict);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
end:
|
end:
|
||||||
if (v->res_sprite)
|
|
||||||
vc1_parse_sprites(v, &s->gb);
|
|
||||||
av_free(buf2);
|
av_free(buf2);
|
||||||
for (i = 0; i < n_slices; i++)
|
for (i = 0; i < n_slices; i++)
|
||||||
av_free(slices[i].buf);
|
av_free(slices[i].buf);
|
||||||
@ -3793,7 +4005,13 @@ err:
|
|||||||
static av_cold int vc1_decode_end(AVCodecContext *avctx)
|
static av_cold int vc1_decode_end(AVCodecContext *avctx)
|
||||||
{
|
{
|
||||||
VC1Context *v = avctx->priv_data;
|
VC1Context *v = avctx->priv_data;
|
||||||
|
int i;
|
||||||
|
|
||||||
|
if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == CODEC_ID_VC1IMAGE)
|
||||||
|
&& v->sprite_output_frame.data[0])
|
||||||
|
avctx->release_buffer(avctx, &v->sprite_output_frame);
|
||||||
|
for (i = 0; i < 4; i++)
|
||||||
|
av_freep(&v->sr_rows[i>>1][i%2]);
|
||||||
av_freep(&v->hrd_rate);
|
av_freep(&v->hrd_rate);
|
||||||
av_freep(&v->hrd_buffer);
|
av_freep(&v->hrd_buffer);
|
||||||
MPV_common_end(&v->s);
|
MPV_common_end(&v->s);
|
||||||
@ -3880,3 +4098,35 @@ AVCodec ff_vc1_vdpau_decoder = {
|
|||||||
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
.profiles = NULL_IF_CONFIG_SMALL(profiles)
|
||||||
};
|
};
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if CONFIG_WMV3IMAGE_DECODER
|
||||||
|
AVCodec ff_wmv3image_decoder = {
|
||||||
|
.name = "wmv3image",
|
||||||
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
|
.id = CODEC_ID_WMV3IMAGE,
|
||||||
|
.priv_data_size = sizeof(VC1Context),
|
||||||
|
.init = vc1_decode_init,
|
||||||
|
.close = vc1_decode_end,
|
||||||
|
.decode = vc1_decode_frame,
|
||||||
|
.capabilities = CODEC_CAP_DR1,
|
||||||
|
.flush = vc1_sprite_flush,
|
||||||
|
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
|
||||||
|
.pix_fmts = ff_pixfmt_list_420
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if CONFIG_VC1IMAGE_DECODER
|
||||||
|
AVCodec ff_vc1image_decoder = {
|
||||||
|
.name = "vc1image",
|
||||||
|
.type = AVMEDIA_TYPE_VIDEO,
|
||||||
|
.id = CODEC_ID_VC1IMAGE,
|
||||||
|
.priv_data_size = sizeof(VC1Context),
|
||||||
|
.init = vc1_decode_init,
|
||||||
|
.close = vc1_decode_end,
|
||||||
|
.decode = vc1_decode_frame,
|
||||||
|
.capabilities = CODEC_CAP_DR1,
|
||||||
|
.flush = vc1_sprite_flush,
|
||||||
|
.long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
|
||||||
|
.pix_fmts = ff_pixfmt_list_420
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
@ -713,6 +713,66 @@ static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t *dst/*align 8*/, uint8_t *src/*a
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
|
||||||
|
|
||||||
|
static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset, int advance, int count)
|
||||||
|
{
|
||||||
|
while (count--) {
|
||||||
|
int a = src[(offset >> 16) ];
|
||||||
|
int b = src[(offset >> 16) + 1];
|
||||||
|
*dst++ = a + ((b - a) * (offset&0xFFFF) >> 16);
|
||||||
|
offset += advance;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static av_always_inline void sprite_v_template(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
|
||||||
|
int two_sprites, const uint8_t *src2a, const uint8_t *src2b, int offset2,
|
||||||
|
int alpha, int scaled, int width)
|
||||||
|
{
|
||||||
|
int a1, b1, a2, b2;
|
||||||
|
while (width--) {
|
||||||
|
a1 = *src1a++;
|
||||||
|
if (scaled) {
|
||||||
|
b1 = *src1b++;
|
||||||
|
a1 = a1 + ((b1 - a1) * offset1 >> 16);
|
||||||
|
}
|
||||||
|
if (two_sprites) {
|
||||||
|
a2 = *src2a++;
|
||||||
|
if (scaled > 1) {
|
||||||
|
b2 = *src2b++;
|
||||||
|
a2 = a2 + ((b2 - a2) * offset2 >> 16);
|
||||||
|
}
|
||||||
|
a1 = a1 + ((a2 - a1) * alpha >> 16);
|
||||||
|
}
|
||||||
|
*dst++ = a1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width)
|
||||||
|
{
|
||||||
|
sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width)
|
||||||
|
{
|
||||||
|
sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sprite_v_double_onescale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
|
||||||
|
const uint8_t *src2a, int alpha, int width)
|
||||||
|
{
|
||||||
|
sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1, width);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void sprite_v_double_twoscale_c(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
|
||||||
|
const uint8_t *src2a, const uint8_t *src2b, int offset2,
|
||||||
|
int alpha, int width)
|
||||||
|
{
|
||||||
|
sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2, alpha, 2, width);
|
||||||
|
}
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
|
av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
|
||||||
dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
|
dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
|
||||||
dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
|
dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
|
||||||
@ -770,6 +830,14 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
|
|||||||
dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
|
dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
|
||||||
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
|
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
|
||||||
|
|
||||||
|
#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
|
||||||
|
dsp->sprite_h = sprite_h_c;
|
||||||
|
dsp->sprite_v_single = sprite_v_single_c;
|
||||||
|
dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
|
||||||
|
dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
|
||||||
|
dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
|
||||||
|
#endif
|
||||||
|
|
||||||
if (HAVE_ALTIVEC)
|
if (HAVE_ALTIVEC)
|
||||||
ff_vc1dsp_init_altivec(dsp);
|
ff_vc1dsp_init_altivec(dsp);
|
||||||
if (HAVE_MMX)
|
if (HAVE_MMX)
|
||||||
|
@ -60,6 +60,16 @@ typedef struct VC1DSPContext {
|
|||||||
/* This is really one func used in VC-1 decoding */
|
/* This is really one func used in VC-1 decoding */
|
||||||
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
|
h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
|
||||||
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
|
h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
|
||||||
|
|
||||||
|
/* Windows Media Image functions */
|
||||||
|
void (*sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int advance, int count);
|
||||||
|
void (*sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset, int width);
|
||||||
|
void (*sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src2a, int alpha, int width);
|
||||||
|
void (*sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
|
||||||
|
const uint8_t *src2a, int alpha, int width);
|
||||||
|
void (*sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const uint8_t *src1b, int offset1,
|
||||||
|
const uint8_t *src2a, const uint8_t *src2b, int offset2,
|
||||||
|
int alpha, int width);
|
||||||
} VC1DSPContext;
|
} VC1DSPContext;
|
||||||
|
|
||||||
void ff_vc1dsp_init(VC1DSPContext* c);
|
void ff_vc1dsp_init(VC1DSPContext* c);
|
||||||
|
@ -21,7 +21,7 @@
|
|||||||
#define AVCODEC_VERSION_H
|
#define AVCODEC_VERSION_H
|
||||||
|
|
||||||
#define LIBAVCODEC_VERSION_MAJOR 53
|
#define LIBAVCODEC_VERSION_MAJOR 53
|
||||||
#define LIBAVCODEC_VERSION_MINOR 8
|
#define LIBAVCODEC_VERSION_MINOR 9
|
||||||
#define LIBAVCODEC_VERSION_MICRO 0
|
#define LIBAVCODEC_VERSION_MICRO 0
|
||||||
|
|
||||||
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
|
||||||
|
@ -237,10 +237,10 @@ const AVCodecTag ff_codec_bmp_tags[] = {
|
|||||||
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
|
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '0') },
|
||||||
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
|
{ CODEC_ID_QPEG, MKTAG('Q', '1', '.', '1') },
|
||||||
{ CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
|
{ CODEC_ID_WMV3, MKTAG('W', 'M', 'V', '3') },
|
||||||
{ CODEC_ID_WMV3, MKTAG('W', 'M', 'V', 'P') },
|
{ CODEC_ID_WMV3IMAGE, MKTAG('W', 'M', 'V', 'P') },
|
||||||
{ CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
|
{ CODEC_ID_VC1, MKTAG('W', 'V', 'C', '1') },
|
||||||
{ CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
|
{ CODEC_ID_VC1, MKTAG('W', 'M', 'V', 'A') },
|
||||||
{ CODEC_ID_VC1, MKTAG('W', 'V', 'P', '2') },
|
{ CODEC_ID_VC1IMAGE, MKTAG('W', 'V', 'P', '2') },
|
||||||
{ CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
|
{ CODEC_ID_LOCO, MKTAG('L', 'O', 'C', 'O') },
|
||||||
{ CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
|
{ CODEC_ID_WNV1, MKTAG('W', 'N', 'V', '1') },
|
||||||
{ CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
|
{ CODEC_ID_AASC, MKTAG('A', 'A', 'S', 'C') },
|
||||||
|
Loading…
Reference in New Issue
Block a user