mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
VP3: K&R formatting cosmetics
This commit is contained in:
parent
66d04c068a
commit
d2264740e6
380
libavcodec/vp3.c
380
libavcodec/vp3.c
@ -34,16 +34,17 @@
|
|||||||
#include <string.h>
|
#include <string.h>
|
||||||
|
|
||||||
#include "libavutil/imgutils.h"
|
#include "libavutil/imgutils.h"
|
||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "internal.h"
|
|
||||||
#include "get_bits.h"
|
#include "get_bits.h"
|
||||||
#include "hpeldsp.h"
|
#include "hpeldsp.h"
|
||||||
|
#include "internal.h"
|
||||||
#include "mathops.h"
|
#include "mathops.h"
|
||||||
|
#include "thread.h"
|
||||||
#include "videodsp.h"
|
#include "videodsp.h"
|
||||||
#include "vp3data.h"
|
#include "vp3data.h"
|
||||||
#include "vp3dsp.h"
|
#include "vp3dsp.h"
|
||||||
#include "xiph.h"
|
#include "xiph.h"
|
||||||
#include "thread.h"
|
|
||||||
|
|
||||||
#define FRAGMENT_PIXELS 8
|
#define FRAGMENT_PIXELS 8
|
||||||
|
|
||||||
@ -77,8 +78,7 @@ typedef struct Vp3Fragment {
|
|||||||
#define MODE_COPY 8
|
#define MODE_COPY 8
|
||||||
|
|
||||||
/* There are 6 preset schemes, plus a free-form scheme */
|
/* There are 6 preset schemes, plus a free-form scheme */
|
||||||
static const int ModeAlphabet[6][CODING_MODE_COUNT] =
|
static const int ModeAlphabet[6][CODING_MODE_COUNT] = {
|
||||||
{
|
|
||||||
/* scheme 1: Last motion vector dominates */
|
/* scheme 1: Last motion vector dominates */
|
||||||
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
|
{ MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
|
||||||
MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
|
MODE_INTER_PLUS_MV, MODE_INTER_NO_MV,
|
||||||
@ -114,7 +114,6 @@ static const int ModeAlphabet[6][CODING_MODE_COUNT] =
|
|||||||
MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
|
MODE_INTER_LAST_MV, MODE_INTER_PRIOR_LAST,
|
||||||
MODE_INTER_PLUS_MV, MODE_INTRA,
|
MODE_INTER_PLUS_MV, MODE_INTRA,
|
||||||
MODE_GOLDEN_MV, MODE_INTER_FOURMV },
|
MODE_GOLDEN_MV, MODE_INTER_FOURMV },
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint8_t hilbert_offset[16][2] = {
|
static const uint8_t hilbert_offset[16][2] = {
|
||||||
@ -207,7 +206,8 @@ typedef struct Vp3DecodeContext {
|
|||||||
#define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
|
#define TOKEN_COEFF(coeff) (((coeff) << 2) + 2)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* number of blocks that contain DCT coefficients at the given level or higher
|
* number of blocks that contain DCT coefficients at
|
||||||
|
* the given level or higher
|
||||||
*/
|
*/
|
||||||
int num_coded_frags[3][64];
|
int num_coded_frags[3][64];
|
||||||
int total_num_coded_frags;
|
int total_num_coded_frags;
|
||||||
@ -307,7 +307,6 @@ static av_cold int vp3_decode_end(AVCodecContext *avctx)
|
|||||||
ff_free_vlc(&s->mode_code_vlc);
|
ff_free_vlc(&s->mode_code_vlc);
|
||||||
ff_free_vlc(&s->motion_vector_vlc);
|
ff_free_vlc(&s->motion_vector_vlc);
|
||||||
|
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -324,8 +323,10 @@ static int init_block_mapping(Vp3DecodeContext *s)
|
|||||||
int x, y, i, j = 0;
|
int x, y, i, j = 0;
|
||||||
|
|
||||||
for (plane = 0; plane < 3; plane++) {
|
for (plane = 0; plane < 3; plane++) {
|
||||||
int sb_width = plane ? s->c_superblock_width : s->y_superblock_width;
|
int sb_width = plane ? s->c_superblock_width
|
||||||
int sb_height = plane ? s->c_superblock_height : s->y_superblock_height;
|
: s->y_superblock_width;
|
||||||
|
int sb_height = plane ? s->c_superblock_height
|
||||||
|
: s->y_superblock_height;
|
||||||
int frag_width = s->fragment_width[!!plane];
|
int frag_width = s->fragment_width[!!plane];
|
||||||
int frag_height = s->fragment_height[!!plane];
|
int frag_height = s->fragment_height[!!plane];
|
||||||
|
|
||||||
@ -336,7 +337,8 @@ static int init_block_mapping(Vp3DecodeContext *s)
|
|||||||
y = 4 * sb_y + hilbert_offset[i][1];
|
y = 4 * sb_y + hilbert_offset[i][1];
|
||||||
|
|
||||||
if (x < frag_width && y < frag_height)
|
if (x < frag_width && y < frag_height)
|
||||||
s->superblock_fragments[j++] = s->fragment_start[plane] + y*frag_width + x;
|
s->superblock_fragments[j++] = s->fragment_start[plane] +
|
||||||
|
y * frag_width + x;
|
||||||
else
|
else
|
||||||
s->superblock_fragments[j++] = -1;
|
s->superblock_fragments[j++] = -1;
|
||||||
}
|
}
|
||||||
@ -367,10 +369,10 @@ static void init_dequantizer(Vp3DecodeContext *s, int qpi)
|
|||||||
bmi = s->qr_base[inter][plane][qri];
|
bmi = s->qr_base[inter][plane][qri];
|
||||||
bmj = s->qr_base[inter][plane][qri + 1];
|
bmj = s->qr_base[inter][plane][qri + 1];
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
int coeff= ( 2*(sum -s->qps[qpi])*s->base_matrix[bmi][i]
|
int coeff = (2 * (sum - s->qps[qpi]) * s->base_matrix[bmi][i] -
|
||||||
- 2*(qistart-s->qps[qpi])*s->base_matrix[bmj][i]
|
2 * (qistart - s->qps[qpi]) * s->base_matrix[bmj][i] +
|
||||||
+ s->qr_size[inter][plane][qri])
|
s->qr_size[inter][plane][qri]) /
|
||||||
/ (2*s->qr_size[inter][plane][qri]);
|
(2 * s->qr_size[inter][plane][qri]);
|
||||||
|
|
||||||
int qmin = 8 << (inter + !i);
|
int qmin = 8 << (inter + !i);
|
||||||
int qscale = i ? ac_scale_factor : dc_scale_factor;
|
int qscale = i ? ac_scale_factor : dc_scale_factor;
|
||||||
@ -378,7 +380,8 @@ static void init_dequantizer(Vp3DecodeContext *s, int qpi)
|
|||||||
s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
|
s->qmat[qpi][inter][plane][s->idct_permutation[i]] =
|
||||||
av_clip((qscale * coeff) / 100 * 4, qmin, 4096);
|
av_clip((qscale * coeff) / 100 * 4, qmin, 4096);
|
||||||
}
|
}
|
||||||
// all DC coefficients use the same quant so as not to interfere with DC prediction
|
/* all DC coefficients use the same quant so as not to interfere
|
||||||
|
* with DC prediction */
|
||||||
s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
|
s->qmat[qpi][inter][plane][0] = s->qmat[0][inter][plane][0];
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -421,7 +424,9 @@ static void init_loop_filter(Vp3DecodeContext *s)
|
|||||||
*/
|
*/
|
||||||
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
||||||
{
|
{
|
||||||
int superblock_starts[3] = { 0, s->u_superblock_start, s->v_superblock_start };
|
int superblock_starts[3] = {
|
||||||
|
0, s->u_superblock_start, s->v_superblock_start
|
||||||
|
};
|
||||||
int bit = 0;
|
int bit = 0;
|
||||||
int current_superblock = 0;
|
int current_superblock = 0;
|
||||||
int current_run = 0;
|
int current_run = 0;
|
||||||
@ -433,9 +438,7 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
|
|
||||||
if (s->keyframe) {
|
if (s->keyframe) {
|
||||||
memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
|
memset(s->superblock_coding, SB_FULLY_CODED, s->superblock_count);
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* unpack the list of partially-coded superblocks */
|
/* unpack the list of partially-coded superblocks */
|
||||||
bit = get_bits1(gb) ^ 1;
|
bit = get_bits1(gb) ^ 1;
|
||||||
current_run = 0;
|
current_run = 0;
|
||||||
@ -446,13 +449,14 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
else
|
else
|
||||||
bit ^= 1;
|
bit ^= 1;
|
||||||
|
|
||||||
current_run = get_vlc2(gb,
|
current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
|
||||||
s->superblock_run_length_vlc.table, 6, 2) + 1;
|
6, 2) + 1;
|
||||||
if (current_run == 34)
|
if (current_run == 34)
|
||||||
current_run += get_bits(gb, 12);
|
current_run += get_bits(gb, 12);
|
||||||
|
|
||||||
if (current_superblock + current_run > s->superblock_count) {
|
if (current_superblock + current_run > s->superblock_count) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid partially coded superblock run length\n");
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Invalid partially coded superblock run length\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -472,22 +476,22 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
bit = get_bits1(gb) ^ 1;
|
bit = get_bits1(gb) ^ 1;
|
||||||
current_run = 0;
|
current_run = 0;
|
||||||
|
|
||||||
while (superblocks_decoded < s->superblock_count - num_partial_superblocks
|
while (superblocks_decoded < s->superblock_count - num_partial_superblocks &&
|
||||||
&& get_bits_left(gb) > 0) {
|
get_bits_left(gb) > 0) {
|
||||||
|
|
||||||
if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
|
if (s->theora && current_run == MAXIMUM_LONG_BIT_RUN)
|
||||||
bit = get_bits1(gb);
|
bit = get_bits1(gb);
|
||||||
else
|
else
|
||||||
bit ^= 1;
|
bit ^= 1;
|
||||||
|
|
||||||
current_run = get_vlc2(gb,
|
current_run = get_vlc2(gb, s->superblock_run_length_vlc.table,
|
||||||
s->superblock_run_length_vlc.table, 6, 2) + 1;
|
6, 2) + 1;
|
||||||
if (current_run == 34)
|
if (current_run == 34)
|
||||||
current_run += get_bits(gb, 12);
|
current_run += get_bits(gb, 12);
|
||||||
|
|
||||||
for (j = 0; j < current_run; current_superblock++) {
|
for (j = 0; j < current_run; current_superblock++) {
|
||||||
if (current_superblock >= s->superblock_count) {
|
if (current_superblock >= s->superblock_count) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid fully coded superblock run length\n");
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Invalid fully coded superblock run length\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -504,7 +508,6 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
/* if there were partial blocks, initialize bitstream for
|
/* if there were partial blocks, initialize bitstream for
|
||||||
* unpacking fragment codings */
|
* unpacking fragment codings */
|
||||||
if (num_partial_superblocks) {
|
if (num_partial_superblocks) {
|
||||||
|
|
||||||
current_run = 0;
|
current_run = 0;
|
||||||
bit = get_bits1(gb);
|
bit = get_bits1(gb);
|
||||||
/* toggle the bit because as soon as the first run length is
|
/* toggle the bit because as soon as the first run length is
|
||||||
@ -520,27 +523,24 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
|
|
||||||
for (plane = 0; plane < 3; plane++) {
|
for (plane = 0; plane < 3; plane++) {
|
||||||
int sb_start = superblock_starts[plane];
|
int sb_start = superblock_starts[plane];
|
||||||
int sb_end = sb_start + (plane ? s->c_superblock_count : s->y_superblock_count);
|
int sb_end = sb_start + (plane ? s->c_superblock_count
|
||||||
|
: s->y_superblock_count);
|
||||||
int num_coded_frags = 0;
|
int num_coded_frags = 0;
|
||||||
|
|
||||||
for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
|
for (i = sb_start; i < sb_end && get_bits_left(gb) > 0; i++) {
|
||||||
|
|
||||||
/* iterate through all 16 fragments in a superblock */
|
/* iterate through all 16 fragments in a superblock */
|
||||||
for (j = 0; j < 16; j++) {
|
for (j = 0; j < 16; j++) {
|
||||||
|
|
||||||
/* if the fragment is in bounds, check its coding status */
|
/* if the fragment is in bounds, check its coding status */
|
||||||
current_fragment = s->superblock_fragments[i * 16 + j];
|
current_fragment = s->superblock_fragments[i * 16 + j];
|
||||||
if (current_fragment != -1) {
|
if (current_fragment != -1) {
|
||||||
int coded = s->superblock_coding[i];
|
int coded = s->superblock_coding[i];
|
||||||
|
|
||||||
if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
|
if (s->superblock_coding[i] == SB_PARTIALLY_CODED) {
|
||||||
|
|
||||||
/* fragment may or may not be coded; this is the case
|
/* fragment may or may not be coded; this is the case
|
||||||
* that cares about the fragment coding runs */
|
* that cares about the fragment coding runs */
|
||||||
if (current_run-- == 0) {
|
if (current_run-- == 0) {
|
||||||
bit ^= 1;
|
bit ^= 1;
|
||||||
current_run = get_vlc2(gb,
|
current_run = get_vlc2(gb, s->fragment_run_length_vlc.table, 5, 2);
|
||||||
s->fragment_run_length_vlc.table, 5, 2);
|
|
||||||
}
|
}
|
||||||
coded = bit;
|
coded = bit;
|
||||||
}
|
}
|
||||||
@ -564,7 +564,8 @@ static int unpack_superblocks(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
s->num_coded_frags[plane][i] = num_coded_frags;
|
s->num_coded_frags[plane][i] = num_coded_frags;
|
||||||
if (plane < 2)
|
if (plane < 2)
|
||||||
s->coded_fragment_list[plane+1] = s->coded_fragment_list[plane] + num_coded_frags;
|
s->coded_fragment_list[plane + 1] = s->coded_fragment_list[plane] +
|
||||||
|
num_coded_frags;
|
||||||
}
|
}
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -587,9 +588,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
if (s->keyframe) {
|
if (s->keyframe) {
|
||||||
for (i = 0; i < s->fragment_count; i++)
|
for (i = 0; i < s->fragment_count; i++)
|
||||||
s->all_fragments[i].coding_method = MODE_INTRA;
|
s->all_fragments[i].coding_method = MODE_INTRA;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* fetch the mode coding scheme for this frame */
|
/* fetch the mode coding scheme for this frame */
|
||||||
scheme = get_bits(gb, 3);
|
scheme = get_bits(gb, 3);
|
||||||
|
|
||||||
@ -615,15 +614,18 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
|
int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
|
||||||
current_macroblock = mb_y * s->macroblock_width + mb_x;
|
current_macroblock = mb_y * s->macroblock_width + mb_x;
|
||||||
|
|
||||||
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height)
|
if (mb_x >= s->macroblock_width ||
|
||||||
|
mb_y >= s->macroblock_height)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
#define BLOCK_X (2 * mb_x + (k & 1))
|
#define BLOCK_X (2 * mb_x + (k & 1))
|
||||||
#define BLOCK_Y (2 * mb_y + (k >> 1))
|
#define BLOCK_Y (2 * mb_y + (k >> 1))
|
||||||
/* coding modes are only stored if the macroblock has at least one
|
/* coding modes are only stored if the macroblock has
|
||||||
* luma block coded, otherwise it must be INTER_NO_MV */
|
* at least one luma block coded, otherwise it must be
|
||||||
|
* INTER_NO_MV */
|
||||||
for (k = 0; k < 4; k++) {
|
for (k = 0; k < 4; k++) {
|
||||||
current_fragment = BLOCK_Y*s->fragment_width[0] + BLOCK_X;
|
current_fragment = BLOCK_Y *
|
||||||
|
s->fragment_width[0] + BLOCK_X;
|
||||||
if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
|
if (s->all_fragments[current_fragment].coding_method != MODE_COPY)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -636,8 +638,7 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
if (scheme == 7)
|
if (scheme == 7)
|
||||||
coding_mode = get_bits(gb, 3);
|
coding_mode = get_bits(gb, 3);
|
||||||
else
|
else
|
||||||
coding_mode = alphabet
|
coding_mode = alphabet[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
|
||||||
[get_vlc2(gb, s->mode_code_vlc.table, 3, 3)];
|
|
||||||
|
|
||||||
s->macroblock_coding[current_macroblock] = coding_mode;
|
s->macroblock_coding[current_macroblock] = coding_mode;
|
||||||
for (k = 0; k < 4; k++) {
|
for (k = 0; k < 4; k++) {
|
||||||
@ -653,17 +654,20 @@ static int unpack_modes(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
frag[s->fragment_start[2]].coding_method = coding_mode;
|
frag[s->fragment_start[2]].coding_method = coding_mode;
|
||||||
|
|
||||||
if (s->chroma_y_shift) {
|
if (s->chroma_y_shift) {
|
||||||
frag = s->all_fragments + mb_y*s->fragment_width[1] + mb_x;
|
frag = s->all_fragments + mb_y *
|
||||||
|
s->fragment_width[1] + mb_x;
|
||||||
SET_CHROMA_MODES
|
SET_CHROMA_MODES
|
||||||
} else if (s->chroma_x_shift) {
|
} else if (s->chroma_x_shift) {
|
||||||
frag = s->all_fragments + 2*mb_y*s->fragment_width[1] + mb_x;
|
frag = s->all_fragments +
|
||||||
|
2 * mb_y * s->fragment_width[1] + mb_x;
|
||||||
for (k = 0; k < 2; k++) {
|
for (k = 0; k < 2; k++) {
|
||||||
SET_CHROMA_MODES
|
SET_CHROMA_MODES
|
||||||
frag += s->fragment_width[1];
|
frag += s->fragment_width[1];
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
for (k = 0; k < 4; k++) {
|
for (k = 0; k < 4; k++) {
|
||||||
frag = s->all_fragments + BLOCK_Y*s->fragment_width[1] + BLOCK_X;
|
frag = s->all_fragments +
|
||||||
|
BLOCK_Y * s->fragment_width[1] + BLOCK_X;
|
||||||
SET_CHROMA_MODES
|
SET_CHROMA_MODES
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -711,12 +715,12 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
|
int mb_y = 2 * sb_y + (((j >> 1) + j) & 1);
|
||||||
current_macroblock = mb_y * s->macroblock_width + mb_x;
|
current_macroblock = mb_y * s->macroblock_width + mb_x;
|
||||||
|
|
||||||
if (mb_x >= s->macroblock_width || mb_y >= s->macroblock_height ||
|
if (mb_x >= s->macroblock_width ||
|
||||||
(s->macroblock_coding[current_macroblock] == MODE_COPY))
|
mb_y >= s->macroblock_height ||
|
||||||
|
s->macroblock_coding[current_macroblock] == MODE_COPY)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
switch (s->macroblock_coding[current_macroblock]) {
|
switch (s->macroblock_coding[current_macroblock]) {
|
||||||
|
|
||||||
case MODE_INTER_PLUS_MV:
|
case MODE_INTER_PLUS_MV:
|
||||||
case MODE_GOLDEN_MV:
|
case MODE_GOLDEN_MV:
|
||||||
/* all 6 fragments use the same motion vector */
|
/* all 6 fragments use the same motion vector */
|
||||||
@ -729,8 +733,7 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* vector maintenance, only on MODE_INTER_PLUS_MV */
|
/* vector maintenance, only on MODE_INTER_PLUS_MV */
|
||||||
if (s->macroblock_coding[current_macroblock] ==
|
if (s->macroblock_coding[current_macroblock] == MODE_INTER_PLUS_MV) {
|
||||||
MODE_INTER_PLUS_MV) {
|
|
||||||
prior_last_motion_x = last_motion_x;
|
prior_last_motion_x = last_motion_x;
|
||||||
prior_last_motion_y = last_motion_y;
|
prior_last_motion_y = last_motion_y;
|
||||||
last_motion_x = motion_x[0];
|
last_motion_x = motion_x[0];
|
||||||
@ -810,8 +813,10 @@ static int unpack_vectors(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
|
|
||||||
if (s->chroma_y_shift) {
|
if (s->chroma_y_shift) {
|
||||||
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
if (s->macroblock_coding[current_macroblock] == MODE_INTER_FOURMV) {
|
||||||
motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] + motion_x[2] + motion_x[3], 2);
|
motion_x[0] = RSHIFT(motion_x[0] + motion_x[1] +
|
||||||
motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] + motion_y[2] + motion_y[3], 2);
|
motion_x[2] + motion_x[3], 2);
|
||||||
|
motion_y[0] = RSHIFT(motion_y[0] + motion_y[1] +
|
||||||
|
motion_y[2] + motion_y[3], 2);
|
||||||
}
|
}
|
||||||
motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
|
motion_x[0] = (motion_x[0] >> 1) | (motion_x[0] & 1);
|
||||||
motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
|
motion_y[0] = (motion_y[0] >> 1) | (motion_y[0] & 1);
|
||||||
@ -931,13 +936,16 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
VLC_TYPE(*vlc_table)[2] = table->table;
|
VLC_TYPE(*vlc_table)[2] = table->table;
|
||||||
|
|
||||||
if (num_coeffs < 0)
|
if (num_coeffs < 0)
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Invalid number of coefficents at level %d\n", coeff_index);
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Invalid number of coefficents at level %d\n", coeff_index);
|
||||||
|
|
||||||
if (eob_run > num_coeffs) {
|
if (eob_run > num_coeffs) {
|
||||||
coeff_i = blocks_ended = num_coeffs;
|
coeff_i =
|
||||||
|
blocks_ended = num_coeffs;
|
||||||
eob_run -= num_coeffs;
|
eob_run -= num_coeffs;
|
||||||
} else {
|
} else {
|
||||||
coeff_i = blocks_ended = eob_run;
|
coeff_i =
|
||||||
|
blocks_ended = eob_run;
|
||||||
eob_run = 0;
|
eob_run = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -991,8 +999,9 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (coeff_index + zero_run > 64) {
|
if (coeff_index + zero_run > 64) {
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "Invalid zero run of %d with"
|
av_log(s->avctx, AV_LOG_DEBUG,
|
||||||
" %d coeffs left\n", zero_run, 64-coeff_index);
|
"Invalid zero run of %d with %d coeffs left\n",
|
||||||
|
zero_run, 64 - coeff_index);
|
||||||
zero_run = 64 - coeff_index;
|
zero_run = 64 - coeff_index;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1002,8 +1011,7 @@ static int unpack_vlcs(Vp3DecodeContext *s, GetBitContext *gb,
|
|||||||
s->num_coded_frags[plane][i]--;
|
s->num_coded_frags[plane][i]--;
|
||||||
coeff_i++;
|
coeff_i++;
|
||||||
} else {
|
} else {
|
||||||
av_log(s->avctx, AV_LOG_ERROR,
|
av_log(s->avctx, AV_LOG_ERROR, "Invalid token %d\n", token);
|
||||||
"Invalid token %d\n", token);
|
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1071,8 +1079,7 @@ static int unpack_dct_coeffs(Vp3DecodeContext *s, GetBitContext *gb)
|
|||||||
return residual_eob_run;
|
return residual_eob_run;
|
||||||
|
|
||||||
/* reverse prediction of the C-plane DC coefficients */
|
/* reverse prediction of the C-plane DC coefficients */
|
||||||
if (!(s->avctx->flags & CODEC_FLAG_GRAY))
|
if (!(s->avctx->flags & CODEC_FLAG_GRAY)) {
|
||||||
{
|
|
||||||
reverse_dc_prediction(s, s->fragment_start[1],
|
reverse_dc_prediction(s, s->fragment_start[1],
|
||||||
s->fragment_width[1], s->fragment_height[1]);
|
s->fragment_width[1], s->fragment_height[1]);
|
||||||
reverse_dc_prediction(s, s->fragment_start[2],
|
reverse_dc_prediction(s, s->fragment_start[2],
|
||||||
@ -1135,7 +1142,6 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
|
|||||||
int fragment_width,
|
int fragment_width,
|
||||||
int fragment_height)
|
int fragment_height)
|
||||||
{
|
{
|
||||||
|
|
||||||
#define PUL 8
|
#define PUL 8
|
||||||
#define PU 4
|
#define PU 4
|
||||||
#define PUR 2
|
#define PUR 2
|
||||||
@ -1202,18 +1208,21 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
|
|||||||
|
|
||||||
int transform = 0;
|
int transform = 0;
|
||||||
|
|
||||||
vul = vu = vur = vl = 0;
|
vul =
|
||||||
last_dc[0] = last_dc[1] = last_dc[2] = 0;
|
vu =
|
||||||
|
vur =
|
||||||
|
vl = 0;
|
||||||
|
last_dc[0] =
|
||||||
|
last_dc[1] =
|
||||||
|
last_dc[2] = 0;
|
||||||
|
|
||||||
/* for each fragment row... */
|
/* for each fragment row... */
|
||||||
for (y = 0; y < fragment_height; y++) {
|
for (y = 0; y < fragment_height; y++) {
|
||||||
|
|
||||||
/* for each fragment in a row... */
|
/* for each fragment in a row... */
|
||||||
for (x = 0; x < fragment_width; x++, i++) {
|
for (x = 0; x < fragment_width; x++, i++) {
|
||||||
|
|
||||||
/* reverse prediction if this block was coded */
|
/* reverse prediction if this block was coded */
|
||||||
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
||||||
|
|
||||||
current_frame_type =
|
current_frame_type =
|
||||||
compatible_frame[s->all_fragments[i].coding_method];
|
compatible_frame[s->all_fragments[i].coding_method];
|
||||||
|
|
||||||
@ -1244,12 +1253,10 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (transform == 0) {
|
if (transform == 0) {
|
||||||
|
|
||||||
/* if there were no fragments to predict from, use last
|
/* if there were no fragments to predict from, use last
|
||||||
* DC saved */
|
* DC saved */
|
||||||
predicted_dc = last_dc[current_frame_type];
|
predicted_dc = last_dc[current_frame_type];
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* apply the appropriate predictor transform */
|
/* apply the appropriate predictor transform */
|
||||||
predicted_dc =
|
predicted_dc =
|
||||||
(predictor_transform[transform][0] * vul) +
|
(predictor_transform[transform][0] * vul) +
|
||||||
@ -1280,7 +1287,8 @@ static void reverse_dc_prediction(Vp3DecodeContext *s,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int yend)
|
static void apply_loop_filter(Vp3DecodeContext *s, int plane,
|
||||||
|
int ystart, int yend)
|
||||||
{
|
{
|
||||||
int x, y;
|
int x, y;
|
||||||
int *bounding_values = s->bounding_values_array + 127;
|
int *bounding_values = s->bounding_values_array + 127;
|
||||||
@ -1290,18 +1298,17 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye
|
|||||||
int fragment = s->fragment_start[plane] + ystart * width;
|
int fragment = s->fragment_start[plane] + ystart * width;
|
||||||
ptrdiff_t stride = s->current_frame.f->linesize[plane];
|
ptrdiff_t stride = s->current_frame.f->linesize[plane];
|
||||||
uint8_t *plane_data = s->current_frame.f->data[plane];
|
uint8_t *plane_data = s->current_frame.f->data[plane];
|
||||||
if (!s->flipped_image) stride = -stride;
|
if (!s->flipped_image)
|
||||||
|
stride = -stride;
|
||||||
plane_data += s->data_offset[plane] + 8 * ystart * stride;
|
plane_data += s->data_offset[plane] + 8 * ystart * stride;
|
||||||
|
|
||||||
for (y = ystart; y < yend; y++) {
|
for (y = ystart; y < yend; y++) {
|
||||||
|
|
||||||
for (x = 0; x < width; x++) {
|
for (x = 0; x < width; x++) {
|
||||||
/* This code basically just deblocks on the edges of coded blocks.
|
/* This code basically just deblocks on the edges of coded blocks.
|
||||||
* However, it has to be much more complicated because of the
|
* However, it has to be much more complicated because of the
|
||||||
* braindamaged deblock ordering used in VP3/Theora. Order matters
|
* braindamaged deblock ordering used in VP3/Theora. Order matters
|
||||||
* because some pixels get filtered twice. */
|
* because some pixels get filtered twice. */
|
||||||
if( s->all_fragments[fragment].coding_method != MODE_COPY )
|
if (s->all_fragments[fragment].coding_method != MODE_COPY) {
|
||||||
{
|
|
||||||
/* do not perform left edge filter for left columns frags */
|
/* do not perform left edge filter for left columns frags */
|
||||||
if (x > 0) {
|
if (x > 0) {
|
||||||
s->vp3dsp.h_loop_filter(
|
s->vp3dsp.h_loop_filter(
|
||||||
@ -1358,7 +1365,7 @@ static inline int vp3_dequant(Vp3DecodeContext *s, Vp3Fragment *frag,
|
|||||||
int token = *s->dct_tokens[plane][i];
|
int token = *s->dct_tokens[plane][i];
|
||||||
switch (token & 3) {
|
switch (token & 3) {
|
||||||
case 0: // EOB
|
case 0: // EOB
|
||||||
if (--token < 4) // 0-3 are token types, so the EOB run must now be 0
|
if (--token < 4) // 0-3 are token types so the EOB run must now be 0
|
||||||
s->dct_tokens[plane][i]++;
|
s->dct_tokens[plane][i]++;
|
||||||
else
|
else
|
||||||
*s->dct_tokens[plane][i] = token & ~3;
|
*s->dct_tokens[plane][i] = token & ~3;
|
||||||
@ -1400,10 +1407,13 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
|
|||||||
if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
|
if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
|
||||||
int y_flipped = s->flipped_image ? s->avctx->height - y : y;
|
int y_flipped = s->flipped_image ? s->avctx->height - y : y;
|
||||||
|
|
||||||
// At the end of the frame, report INT_MAX instead of the height of the frame.
|
/* At the end of the frame, report INT_MAX instead of the height of
|
||||||
// This makes the other threads' ff_thread_await_progress() calls cheaper, because
|
* the frame. This makes the other threads' ff_thread_await_progress()
|
||||||
// they don't have to clip their values.
|
* calls cheaper, because they don't have to clip their values. */
|
||||||
ff_thread_report_progress(&s->current_frame, y_flipped==s->avctx->height ? INT_MAX : y_flipped-1, 0);
|
ff_thread_report_progress(&s->current_frame,
|
||||||
|
y_flipped == s->avctx->height ? INT_MAX
|
||||||
|
: y_flipped - 1,
|
||||||
|
0);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (s->avctx->draw_horiz_band == NULL)
|
if (s->avctx->draw_horiz_band == NULL)
|
||||||
@ -1413,9 +1423,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
|
|||||||
s->last_slice_end = y;
|
s->last_slice_end = y;
|
||||||
y -= h;
|
y -= h;
|
||||||
|
|
||||||
if (!s->flipped_image) {
|
if (!s->flipped_image)
|
||||||
y = s->avctx->height - y - h;
|
y = s->avctx->height - y - h;
|
||||||
}
|
|
||||||
|
|
||||||
cy = y >> s->chroma_y_shift;
|
cy = y >> s->chroma_y_shift;
|
||||||
offset[0] = s->current_frame.f->linesize[0] * y;
|
offset[0] = s->current_frame.f->linesize[0] * y;
|
||||||
@ -1432,7 +1441,8 @@ static void vp3_draw_horiz_band(Vp3DecodeContext *s, int y)
|
|||||||
* Wait for the reference frame of the current fragment.
|
* Wait for the reference frame of the current fragment.
|
||||||
* The progress value is in luma pixel rows.
|
* The progress value is in luma pixel rows.
|
||||||
*/
|
*/
|
||||||
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment, int motion_y, int y)
|
static void await_reference_row(Vp3DecodeContext *s, Vp3Fragment *fragment,
|
||||||
|
int motion_y, int y)
|
||||||
{
|
{
|
||||||
ThreadFrame *ref_frame;
|
ThreadFrame *ref_frame;
|
||||||
int ref_row;
|
int ref_row;
|
||||||
@ -1467,9 +1477,12 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
return;
|
return;
|
||||||
|
|
||||||
for (plane = 0; plane < 3; plane++) {
|
for (plane = 0; plane < 3; plane++) {
|
||||||
uint8_t *output_plane = s->current_frame.f->data [plane] + s->data_offset[plane];
|
uint8_t *output_plane = s->current_frame.f->data[plane] +
|
||||||
uint8_t * last_plane = s-> last_frame.f->data [plane] + s->data_offset[plane];
|
s->data_offset[plane];
|
||||||
uint8_t *golden_plane = s-> golden_frame.f->data [plane] + s->data_offset[plane];
|
uint8_t *last_plane = s->last_frame.f->data[plane] +
|
||||||
|
s->data_offset[plane];
|
||||||
|
uint8_t *golden_plane = s->golden_frame.f->data[plane] +
|
||||||
|
s->data_offset[plane];
|
||||||
ptrdiff_t stride = s->current_frame.f->linesize[plane];
|
ptrdiff_t stride = s->current_frame.f->linesize[plane];
|
||||||
int plane_width = s->width >> (plane && s->chroma_x_shift);
|
int plane_width = s->width >> (plane && s->chroma_x_shift);
|
||||||
int plane_height = s->height >> (plane && s->chroma_y_shift);
|
int plane_height = s->height >> (plane && s->chroma_y_shift);
|
||||||
@ -1477,23 +1490,25 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
|
|
||||||
int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
|
int sb_x, sb_y = slice << (!plane && s->chroma_y_shift);
|
||||||
int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
|
int slice_height = sb_y + 1 + (!plane && s->chroma_y_shift);
|
||||||
int slice_width = plane ? s->c_superblock_width : s->y_superblock_width;
|
int slice_width = plane ? s->c_superblock_width
|
||||||
|
: s->y_superblock_width;
|
||||||
|
|
||||||
int fragment_width = s->fragment_width[!!plane];
|
int fragment_width = s->fragment_width[!!plane];
|
||||||
int fragment_height = s->fragment_height[!!plane];
|
int fragment_height = s->fragment_height[!!plane];
|
||||||
int fragment_start = s->fragment_start[plane];
|
int fragment_start = s->fragment_start[plane];
|
||||||
int do_await = !plane && HAVE_THREADS && (s->avctx->active_thread_type&FF_THREAD_FRAME);
|
|
||||||
|
|
||||||
if (!s->flipped_image) stride = -stride;
|
int do_await = !plane && HAVE_THREADS &&
|
||||||
|
(s->avctx->active_thread_type & FF_THREAD_FRAME);
|
||||||
|
|
||||||
|
if (!s->flipped_image)
|
||||||
|
stride = -stride;
|
||||||
if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
|
if (CONFIG_GRAY && plane && (s->avctx->flags & CODEC_FLAG_GRAY))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
/* for each superblock row in the slice (both of them)... */
|
/* for each superblock row in the slice (both of them)... */
|
||||||
for (; sb_y < slice_height; sb_y++) {
|
for (; sb_y < slice_height; sb_y++) {
|
||||||
|
|
||||||
/* for each superblock in a row... */
|
/* for each superblock in a row... */
|
||||||
for (sb_x = 0; sb_x < slice_width; sb_x++) {
|
for (sb_x = 0; sb_x < slice_width; sb_x++) {
|
||||||
|
|
||||||
/* for each block in a superblock... */
|
/* for each block in a superblock... */
|
||||||
for (j = 0; j < 16; j++) {
|
for (j = 0; j < 16; j++) {
|
||||||
x = 4 * sb_x + hilbert_offset[j][0];
|
x = 4 * sb_x + hilbert_offset[j][0];
|
||||||
@ -1508,8 +1523,11 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
|
|
||||||
first_pixel = 8 * y * stride + 8 * x;
|
first_pixel = 8 * y * stride + 8 * x;
|
||||||
|
|
||||||
if (do_await && s->all_fragments[i].coding_method != MODE_INTRA)
|
if (do_await &&
|
||||||
await_reference_row(s, &s->all_fragments[i], motion_val[fragment][1], (16*y) >> s->chroma_y_shift);
|
s->all_fragments[i].coding_method != MODE_INTRA)
|
||||||
|
await_reference_row(s, &s->all_fragments[i],
|
||||||
|
motion_val[fragment][1],
|
||||||
|
(16 * y) >> s->chroma_y_shift);
|
||||||
|
|
||||||
/* transform if this block was coded */
|
/* transform if this block was coded */
|
||||||
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
if (s->all_fragments[i].coding_method != MODE_COPY) {
|
||||||
@ -1539,9 +1557,12 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
motion_halfpel_index |= (motion_y & 0x01) << 1;
|
motion_halfpel_index |= (motion_y & 0x01) << 1;
|
||||||
motion_source += ((motion_y >> 1) * stride);
|
motion_source += ((motion_y >> 1) * stride);
|
||||||
|
|
||||||
if(src_x<0 || src_y<0 || src_x + 9 >= plane_width || src_y + 9 >= plane_height){
|
if (src_x < 0 || src_y < 0 ||
|
||||||
|
src_x + 9 >= plane_width ||
|
||||||
|
src_y + 9 >= plane_height) {
|
||||||
uint8_t *temp = s->edge_emu_buffer;
|
uint8_t *temp = s->edge_emu_buffer;
|
||||||
if(stride<0) temp -= 8*stride;
|
if (stride < 0)
|
||||||
|
temp -= 8 * stride;
|
||||||
|
|
||||||
s->vdsp.emulated_edge_mc(temp, motion_source,
|
s->vdsp.emulated_edge_mc(temp, motion_source,
|
||||||
stride, stride,
|
stride, stride,
|
||||||
@ -1552,22 +1573,22 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
/* first, take care of copying a block from either the
|
/* first, take care of copying a block from either the
|
||||||
* previous or the golden frame */
|
* previous or the golden frame */
|
||||||
if (s->all_fragments[i].coding_method != MODE_INTRA) {
|
if (s->all_fragments[i].coding_method != MODE_INTRA) {
|
||||||
/* Note, it is possible to implement all MC cases with
|
/* Note, it is possible to implement all MC cases
|
||||||
put_no_rnd_pixels_l2 which would look more like the
|
* with put_no_rnd_pixels_l2 which would look more
|
||||||
VP3 source but this would be slower as
|
* like the VP3 source but this would be slower as
|
||||||
put_no_rnd_pixels_tab is better optimzed */
|
* put_no_rnd_pixels_tab is better optimzed */
|
||||||
if (motion_halfpel_index != 3) {
|
if (motion_halfpel_index != 3) {
|
||||||
s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
|
s->hdsp.put_no_rnd_pixels_tab[1][motion_halfpel_index](
|
||||||
output_plane + first_pixel,
|
output_plane + first_pixel,
|
||||||
motion_source, stride, 8);
|
motion_source, stride, 8);
|
||||||
} else {
|
} else {
|
||||||
int d= (motion_x ^ motion_y)>>31; // d is 0 if motion_x and _y have the same sign, else -1
|
/* d is 0 if motion_x and _y have the same sign,
|
||||||
s->vp3dsp.put_no_rnd_pixels_l2(
|
* else -1 */
|
||||||
output_plane + first_pixel,
|
int d = (motion_x ^ motion_y) >> 31;
|
||||||
|
s->vp3dsp.put_no_rnd_pixels_l2(output_plane + first_pixel,
|
||||||
motion_source - d,
|
motion_source - d,
|
||||||
motion_source + stride + 1 + d,
|
motion_source + stride + 1 + d,
|
||||||
stride, 8);
|
stride, 8);
|
||||||
@ -1578,41 +1599,41 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
|
|
||||||
if (s->all_fragments[i].coding_method == MODE_INTRA) {
|
if (s->all_fragments[i].coding_method == MODE_INTRA) {
|
||||||
int index;
|
int index;
|
||||||
index = vp3_dequant(s, s->all_fragments + i, plane, 0, block);
|
index = vp3_dequant(s, s->all_fragments + i,
|
||||||
|
plane, 0, block);
|
||||||
if (index > 63)
|
if (index > 63)
|
||||||
continue;
|
continue;
|
||||||
s->vp3dsp.idct_put(
|
s->vp3dsp.idct_put(output_plane + first_pixel,
|
||||||
output_plane + first_pixel,
|
|
||||||
stride,
|
stride,
|
||||||
block);
|
block);
|
||||||
} else {
|
} else {
|
||||||
int index = vp3_dequant(s, s->all_fragments + i, plane, 1, block);
|
int index = vp3_dequant(s, s->all_fragments + i,
|
||||||
|
plane, 1, block);
|
||||||
if (index > 63)
|
if (index > 63)
|
||||||
continue;
|
continue;
|
||||||
if (index > 0) {
|
if (index > 0) {
|
||||||
s->vp3dsp.idct_add(
|
s->vp3dsp.idct_add(output_plane + first_pixel,
|
||||||
output_plane + first_pixel,
|
|
||||||
stride,
|
stride,
|
||||||
block);
|
block);
|
||||||
} else {
|
} else {
|
||||||
s->vp3dsp.idct_dc_add(output_plane + first_pixel, stride, block);
|
s->vp3dsp.idct_dc_add(output_plane + first_pixel,
|
||||||
|
stride, block);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
/* copy directly from the previous frame */
|
/* copy directly from the previous frame */
|
||||||
s->hdsp.put_pixels_tab[1][0](
|
s->hdsp.put_pixels_tab[1][0](
|
||||||
output_plane + first_pixel,
|
output_plane + first_pixel,
|
||||||
last_plane + first_pixel,
|
last_plane + first_pixel,
|
||||||
stride, 8);
|
stride, 8);
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Filter up to the last row in the superblock row
|
// Filter up to the last row in the superblock row
|
||||||
if (!s->skip_loop_filter)
|
if (!s->skip_loop_filter)
|
||||||
apply_loop_filter(s, plane, 4*sb_y - !!sb_y, FFMIN(4*sb_y+3, fragment_height-1));
|
apply_loop_filter(s, plane, 4 * sb_y - !!sb_y,
|
||||||
|
FFMIN(4 * sb_y + 3, fragment_height - 1));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1624,7 +1645,8 @@ static void render_slice(Vp3DecodeContext *s, int slice)
|
|||||||
* dispatch (slice - 1);
|
* dispatch (slice - 1);
|
||||||
*/
|
*/
|
||||||
|
|
||||||
vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) -16, s->height-16));
|
vp3_draw_horiz_band(s, FFMIN((32 << s->chroma_y_shift) * (slice + 1) - 16,
|
||||||
|
s->height - 16));
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Allocate tables for per-frame data in Vp3DecodeContext
|
/// Allocate tables for per-frame data in Vp3DecodeContext
|
||||||
@ -1638,8 +1660,11 @@ static av_cold int allocate_tables(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->superblock_coding = av_malloc(s->superblock_count);
|
s->superblock_coding = av_malloc(s->superblock_count);
|
||||||
s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
|
s->all_fragments = av_malloc(s->fragment_count * sizeof(Vp3Fragment));
|
||||||
|
|
||||||
s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
|
s->coded_fragment_list[0] = av_malloc(s->fragment_count * sizeof(int));
|
||||||
s->dct_tokens_base = av_malloc(64*s->fragment_count * sizeof(*s->dct_tokens_base));
|
|
||||||
|
s->dct_tokens_base = av_malloc(64 * s->fragment_count *
|
||||||
|
sizeof(*s->dct_tokens_base));
|
||||||
s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
|
s->motion_val[0] = av_malloc(y_fragment_count * sizeof(*s->motion_val[0]));
|
||||||
s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
|
s->motion_val[1] = av_malloc(c_fragment_count * sizeof(*s->motion_val[1]));
|
||||||
|
|
||||||
@ -1647,8 +1672,9 @@ static av_cold int allocate_tables(AVCodecContext *avctx)
|
|||||||
s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
|
s->superblock_fragments = av_malloc(s->superblock_count * 16 * sizeof(int));
|
||||||
s->macroblock_coding = av_malloc(s->macroblock_count + 1);
|
s->macroblock_coding = av_malloc(s->macroblock_count + 1);
|
||||||
|
|
||||||
if (!s->superblock_coding || !s->all_fragments || !s->dct_tokens_base ||
|
if (!s->superblock_coding || !s->all_fragments ||
|
||||||
!s->coded_fragment_list[0] || !s->superblock_fragments || !s->macroblock_coding ||
|
!s->dct_tokens_base || !s->coded_fragment_list[0] ||
|
||||||
|
!s->superblock_fragments || !s->macroblock_coding ||
|
||||||
!s->motion_val[0] || !s->motion_val[1]) {
|
!s->motion_val[0] || !s->motion_val[1]) {
|
||||||
vp3_decode_end(avctx);
|
vp3_decode_end(avctx);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1750,8 +1776,7 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
|
|||||||
s->fragment_start[1] = y_fragment_count;
|
s->fragment_start[1] = y_fragment_count;
|
||||||
s->fragment_start[2] = y_fragment_count + c_fragment_count;
|
s->fragment_start[2] = y_fragment_count + c_fragment_count;
|
||||||
|
|
||||||
if (!s->theora_tables)
|
if (!s->theora_tables) {
|
||||||
{
|
|
||||||
for (i = 0; i < 64; i++) {
|
for (i = 0; i < 64; i++) {
|
||||||
s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
|
s->coded_dc_scale_factor[i] = vp31_dc_scale_factor[i];
|
||||||
s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
|
s->coded_ac_scale_factor[i] = vp31_ac_scale_factor[i];
|
||||||
@ -1772,7 +1797,6 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
/* init VLC tables */
|
/* init VLC tables */
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
|
|
||||||
/* DC histograms */
|
/* DC histograms */
|
||||||
init_vlc(&s->dc_vlc[i], 11, 32,
|
init_vlc(&s->dc_vlc[i], 11, 32,
|
||||||
&dc_bias[i][0][1], 4, 2,
|
&dc_bias[i][0][1], 4, 2,
|
||||||
@ -1799,7 +1823,6 @@ static av_cold int vp3_decode_init(AVCodecContext *avctx)
|
|||||||
&ac_bias_3[i][0][0], 4, 2, 0);
|
&ac_bias_3[i][0][0], 4, 2, 0);
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
||||||
for (i = 0; i < 16; i++) {
|
for (i = 0; i < 16; i++) {
|
||||||
/* DC histograms */
|
/* DC histograms */
|
||||||
if (init_vlc(&s->dc_vlc[i], 11, 32,
|
if (init_vlc(&s->dc_vlc[i], 11, 32,
|
||||||
@ -1862,7 +1885,6 @@ static int update_frames(AVCodecContext *avctx)
|
|||||||
Vp3DecodeContext *s = avctx->priv_data;
|
Vp3DecodeContext *s = avctx->priv_data;
|
||||||
int ret = 0;
|
int ret = 0;
|
||||||
|
|
||||||
|
|
||||||
/* shuffle frames (last = current) */
|
/* shuffle frames (last = current) */
|
||||||
ff_thread_release_buffer(avctx, &s->last_frame);
|
ff_thread_release_buffer(avctx, &s->last_frame);
|
||||||
ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
|
ret = ff_thread_ref_frame(&s->last_frame, &s->current_frame);
|
||||||
@ -1902,11 +1924,12 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
|
|||||||
Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
|
Vp3DecodeContext *s = dst->priv_data, *s1 = src->priv_data;
|
||||||
int qps_changed = 0, i, err;
|
int qps_changed = 0, i, err;
|
||||||
|
|
||||||
#define copy_fields(to, from, start_field, end_field) memcpy(&to->start_field, &from->start_field, (char*)&to->end_field - (char*)&to->start_field)
|
#define copy_fields(to, from, start_field, end_field) \
|
||||||
|
memcpy(&to->start_field, &from->start_field, \
|
||||||
|
(char *) &to->end_field - (char *) &to->start_field)
|
||||||
|
|
||||||
if (!s1->current_frame.f->data[0]
|
if (!s1->current_frame.f->data[0] ||
|
||||||
||s->width != s1->width
|
s->width != s1->width || s->height != s1->height) {
|
||||||
||s->height!= s1->height) {
|
|
||||||
if (s != s1)
|
if (s != s1)
|
||||||
ref_frames(s, s1);
|
ref_frames(s, s1);
|
||||||
return -1;
|
return -1;
|
||||||
@ -1922,8 +1945,10 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
|
|||||||
return err;
|
return err;
|
||||||
y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
|
y_fragment_count = s->fragment_width[0] * s->fragment_height[0];
|
||||||
c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
|
c_fragment_count = s->fragment_width[1] * s->fragment_height[1];
|
||||||
memcpy(s->motion_val[0], s1->motion_val[0], y_fragment_count * sizeof(*s->motion_val[0]));
|
memcpy(s->motion_val[0], s1->motion_val[0],
|
||||||
memcpy(s->motion_val[1], s1->motion_val[1], c_fragment_count * sizeof(*s->motion_val[1]));
|
y_fragment_count * sizeof(*s->motion_val[0]));
|
||||||
|
memcpy(s->motion_val[1], s1->motion_val[1],
|
||||||
|
c_fragment_count * sizeof(*s->motion_val[1]));
|
||||||
}
|
}
|
||||||
|
|
||||||
// copy previous frame data
|
// copy previous frame data
|
||||||
@ -1941,7 +1966,8 @@ static int vp3_update_thread_context(AVCodecContext *dst, const AVCodecContext *
|
|||||||
}
|
}
|
||||||
|
|
||||||
if (s->qps[0] != s1->qps[0])
|
if (s->qps[0] != s1->qps[0])
|
||||||
memcpy(&s->bounding_values_array, &s1->bounding_values_array, sizeof(s->bounding_values_array));
|
memcpy(&s->bounding_values_array, &s1->bounding_values_array,
|
||||||
|
sizeof(s->bounding_values_array));
|
||||||
|
|
||||||
if (qps_changed)
|
if (qps_changed)
|
||||||
copy_fields(s, s1, qps, superblock_count);
|
copy_fields(s, s1, qps, superblock_count);
|
||||||
@ -1963,9 +1989,9 @@ static int vp3_decode_frame(AVCodecContext *avctx,
|
|||||||
|
|
||||||
init_get_bits(&gb, buf, buf_size * 8);
|
init_get_bits(&gb, buf, buf_size * 8);
|
||||||
|
|
||||||
if (s->theora && get_bits1(&gb))
|
if (s->theora && get_bits1(&gb)) {
|
||||||
{
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
av_log(avctx, AV_LOG_ERROR, "Header packet passed to frame decoder, skipping\n");
|
"Header packet passed to frame decoder, skipping\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1987,7 +2013,8 @@ static int vp3_decode_frame(AVCodecContext *avctx,
|
|||||||
s->keyframe ? "key" : "", avctx->frame_number + 1, s->qps[0]);
|
s->keyframe ? "key" : "", avctx->frame_number + 1, s->qps[0]);
|
||||||
|
|
||||||
s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
|
s->skip_loop_filter = !s->filter_limit_values[s->qps[0]] ||
|
||||||
avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL : AVDISCARD_NONKEY);
|
avctx->skip_loop_filter >= (s->keyframe ? AVDISCARD_ALL
|
||||||
|
: AVDISCARD_NONKEY);
|
||||||
|
|
||||||
if (s->qps[0] != s->last_qps[0])
|
if (s->qps[0] != s->last_qps[0])
|
||||||
init_loop_filter(s);
|
init_loop_filter(s);
|
||||||
@ -2001,7 +2028,8 @@ static int vp3_decode_frame(AVCodecContext *avctx,
|
|||||||
if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
|
if (avctx->skip_frame >= AVDISCARD_NONKEY && !s->keyframe)
|
||||||
return buf_size;
|
return buf_size;
|
||||||
|
|
||||||
s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
|
s->current_frame.f->pict_type = s->keyframe ? AV_PICTURE_TYPE_I
|
||||||
|
: AV_PICTURE_TYPE_P;
|
||||||
if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
|
if (ff_thread_get_buffer(avctx, &s->current_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
goto error;
|
goto error;
|
||||||
@ -2011,34 +2039,36 @@ static int vp3_decode_frame(AVCodecContext *avctx,
|
|||||||
s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
|
s->edge_emu_buffer = av_malloc(9 * FFABS(s->current_frame.f->linesize[0]));
|
||||||
|
|
||||||
if (s->keyframe) {
|
if (s->keyframe) {
|
||||||
if (!s->theora)
|
if (!s->theora) {
|
||||||
{
|
|
||||||
skip_bits(&gb, 4); /* width code */
|
skip_bits(&gb, 4); /* width code */
|
||||||
skip_bits(&gb, 4); /* height code */
|
skip_bits(&gb, 4); /* height code */
|
||||||
if (s->version)
|
if (s->version) {
|
||||||
{
|
|
||||||
s->version = get_bits(&gb, 5);
|
s->version = get_bits(&gb, 5);
|
||||||
if (avctx->frame_number == 0)
|
if (avctx->frame_number == 0)
|
||||||
av_log(s->avctx, AV_LOG_DEBUG, "VP version: %d\n", s->version);
|
av_log(s->avctx, AV_LOG_DEBUG,
|
||||||
|
"VP version: %d\n", s->version);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (s->version || s->theora)
|
if (s->version || s->theora) {
|
||||||
{
|
|
||||||
if (get_bits1(&gb))
|
if (get_bits1(&gb))
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "Warning, unsupported keyframe coding type?!\n");
|
av_log(s->avctx, AV_LOG_ERROR,
|
||||||
|
"Warning, unsupported keyframe coding type?!\n");
|
||||||
skip_bits(&gb, 2); /* reserved? */
|
skip_bits(&gb, 2); /* reserved? */
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if (!s->golden_frame.f->data[0]) {
|
if (!s->golden_frame.f->data[0]) {
|
||||||
av_log(s->avctx, AV_LOG_WARNING, "vp3: first frame not a keyframe\n");
|
av_log(s->avctx, AV_LOG_WARNING,
|
||||||
|
"vp3: first frame not a keyframe\n");
|
||||||
|
|
||||||
s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
|
s->golden_frame.f->pict_type = AV_PICTURE_TYPE_I;
|
||||||
if (ff_thread_get_buffer(avctx, &s->golden_frame, AV_GET_BUFFER_FLAG_REF) < 0) {
|
if (ff_thread_get_buffer(avctx, &s->golden_frame,
|
||||||
|
AV_GET_BUFFER_FLAG_REF) < 0) {
|
||||||
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
|
||||||
goto error;
|
goto error;
|
||||||
}
|
}
|
||||||
ff_thread_release_buffer(avctx, &s->last_frame);
|
ff_thread_release_buffer(avctx, &s->last_frame);
|
||||||
if ((ret = ff_thread_ref_frame(&s->last_frame, &s->golden_frame)) < 0)
|
if ((ret = ff_thread_ref_frame(&s->last_frame,
|
||||||
|
&s->golden_frame)) < 0)
|
||||||
goto error;
|
goto error;
|
||||||
ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
|
ff_thread_report_progress(&s->last_frame, INT_MAX, 0);
|
||||||
}
|
}
|
||||||
@ -2124,8 +2154,7 @@ static int read_huffman_tree(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
s->huffman_table[s->hti][token][0] = s->hbits;
|
s->huffman_table[s->hti][token][0] = s->hbits;
|
||||||
s->huffman_table[s->hti][token][1] = s->huff_code_size;
|
s->huffman_table[s->hti][token][1] = s->huff_code_size;
|
||||||
s->entries++;
|
s->entries++;
|
||||||
}
|
} else {
|
||||||
else {
|
|
||||||
if (s->huff_code_size >= 32) { /* overflow */
|
if (s->huff_code_size >= 32) { /* overflow */
|
||||||
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
av_log(avctx, AV_LOG_ERROR, "huffman tree overflow\n");
|
||||||
return -1;
|
return -1;
|
||||||
@ -2176,16 +2205,18 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
s->theora = get_bits_long(gb, 24);
|
s->theora = get_bits_long(gb, 24);
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
|
av_log(avctx, AV_LOG_DEBUG, "Theora bitstream version %X\n", s->theora);
|
||||||
|
|
||||||
/* 3.2.0 aka alpha3 has the same frame orientation as original vp3 */
|
/* 3.2.0 aka alpha3 has the same frame orientation as original vp3
|
||||||
/* but previous versions have the image flipped relative to vp3 */
|
* but previous versions have the image flipped relative to vp3 */
|
||||||
if (s->theora < 0x030200)
|
if (s->theora < 0x030200) {
|
||||||
{
|
|
||||||
s->flipped_image = 1;
|
s->flipped_image = 1;
|
||||||
av_log(avctx, AV_LOG_DEBUG, "Old (<alpha3) Theora bitstream, flipped image\n");
|
av_log(avctx, AV_LOG_DEBUG,
|
||||||
|
"Old (<alpha3) Theora bitstream, flipped image\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
visible_width = s->width = get_bits(gb, 16) << 4;
|
visible_width =
|
||||||
visible_height = s->height = get_bits(gb, 16) << 4;
|
s->width = get_bits(gb, 16) << 4;
|
||||||
|
visible_height =
|
||||||
|
s->height = get_bits(gb, 16) << 4;
|
||||||
|
|
||||||
if (s->theora >= 0x030200) {
|
if (s->theora >= 0x030200) {
|
||||||
visible_width = get_bits_long(gb, 24);
|
visible_width = get_bits_long(gb, 24);
|
||||||
@ -2221,8 +2252,7 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
|
|
||||||
skip_bits(gb, 6); /* quality hint */
|
skip_bits(gb, 6); /* quality hint */
|
||||||
|
|
||||||
if (s->theora >= 0x030200)
|
if (s->theora >= 0x030200) {
|
||||||
{
|
|
||||||
skip_bits(gb, 5); /* keyframe frequency force */
|
skip_bits(gb, 5); /* keyframe frequency force */
|
||||||
avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
|
avctx->pix_fmt = theora_pix_fmts[get_bits(gb, 2)];
|
||||||
skip_bits(gb, 3); /* reserved */
|
skip_bits(gb, 3); /* reserved */
|
||||||
@ -2230,20 +2260,20 @@ static int theora_decode_header(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
|
|
||||||
// align_get_bits(gb);
|
// align_get_bits(gb);
|
||||||
|
|
||||||
if ( visible_width <= s->width && visible_width > s->width-16
|
if (visible_width <= s->width && visible_width > s->width - 16 &&
|
||||||
&& visible_height <= s->height && visible_height > s->height-16
|
visible_height <= s->height && visible_height > s->height - 16 &&
|
||||||
&& !offset_x && (offset_y == s->height - visible_height))
|
!offset_x && (offset_y == s->height - visible_height))
|
||||||
ret = ff_set_dimensions(avctx, visible_width, visible_height);
|
ret = ff_set_dimensions(avctx, visible_width, visible_height);
|
||||||
else
|
else
|
||||||
ret = ff_set_dimensions(avctx, s->width, s->height);
|
ret = ff_set_dimensions(avctx, s->width, s->height);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
|
|
||||||
if (colorspace == 1) {
|
if (colorspace == 1)
|
||||||
avctx->color_primaries = AVCOL_PRI_BT470M;
|
avctx->color_primaries = AVCOL_PRI_BT470M;
|
||||||
} else if (colorspace == 2) {
|
else if (colorspace == 2)
|
||||||
avctx->color_primaries = AVCOL_PRI_BT470BG;
|
avctx->color_primaries = AVCOL_PRI_BT470BG;
|
||||||
}
|
|
||||||
if (colorspace == 1 || colorspace == 2) {
|
if (colorspace == 1 || colorspace == 2) {
|
||||||
avctx->colorspace = AVCOL_SPC_BT470BG;
|
avctx->colorspace = AVCOL_SPC_BT470BG;
|
||||||
avctx->color_trc = AVCOL_TRC_BT709;
|
avctx->color_trc = AVCOL_TRC_BT709;
|
||||||
@ -2291,10 +2321,9 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
|
||||||
for(n=0; n<matrices; n++){
|
for (n = 0; n < matrices; n++)
|
||||||
for (i = 0; i < 64; i++)
|
for (i = 0; i < 64; i++)
|
||||||
s->base_matrix[n][i] = get_bits(gb, 8);
|
s->base_matrix[n][i] = get_bits(gb, 8);
|
||||||
}
|
|
||||||
|
|
||||||
for (inter = 0; inter <= 1; inter++) {
|
for (inter = 0; inter <= 1; inter++) {
|
||||||
for (plane = 0; plane <= 2; plane++) {
|
for (plane = 0; plane <= 2; plane++) {
|
||||||
@ -2311,8 +2340,10 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
plj = (plane + 2) % 3;
|
plj = (plane + 2) % 3;
|
||||||
}
|
}
|
||||||
s->qr_count[inter][plane] = s->qr_count[qtj][plj];
|
s->qr_count[inter][plane] = s->qr_count[qtj][plj];
|
||||||
memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj], sizeof(s->qr_size[0][0]));
|
memcpy(s->qr_size[inter][plane], s->qr_size[qtj][plj],
|
||||||
memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj], sizeof(s->qr_base[0][0]));
|
sizeof(s->qr_size[0][0]));
|
||||||
|
memcpy(s->qr_base[inter][plane], s->qr_base[qtj][plj],
|
||||||
|
sizeof(s->qr_base[0][0]));
|
||||||
} else {
|
} else {
|
||||||
int qri = 0;
|
int qri = 0;
|
||||||
int qi = 0;
|
int qi = 0;
|
||||||
@ -2320,7 +2351,8 @@ static int theora_decode_tables(AVCodecContext *avctx, GetBitContext *gb)
|
|||||||
for (;;) {
|
for (;;) {
|
||||||
i = get_bits(gb, av_log2(matrices - 1) + 1);
|
i = get_bits(gb, av_log2(matrices - 1) + 1);
|
||||||
if (i >= matrices) {
|
if (i >= matrices) {
|
||||||
av_log(avctx, AV_LOG_ERROR, "invalid base matrix index\n");
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"invalid base matrix index\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
s->qr_base[inter][plane][qri] = i;
|
s->qr_base[inter][plane][qri] = i;
|
||||||
@ -2370,8 +2402,7 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
s->theora = 1;
|
s->theora = 1;
|
||||||
|
|
||||||
if (!avctx->extradata_size)
|
if (!avctx->extradata_size) {
|
||||||
{
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
|
av_log(avctx, AV_LOG_ERROR, "Missing extradata!\n");
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
@ -2389,8 +2420,7 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
|
|||||||
|
|
||||||
ptype = get_bits(&gb, 8);
|
ptype = get_bits(&gb, 8);
|
||||||
|
|
||||||
if (!(ptype & 0x80))
|
if (!(ptype & 0x80)) {
|
||||||
{
|
|
||||||
av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
|
av_log(avctx, AV_LOG_ERROR, "Invalid extradata!\n");
|
||||||
// return -1;
|
// return -1;
|
||||||
}
|
}
|
||||||
@ -2398,8 +2428,7 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
|
|||||||
// FIXME: Check for this as well.
|
// FIXME: Check for this as well.
|
||||||
skip_bits_long(&gb, 6 * 8); /* "theora" */
|
skip_bits_long(&gb, 6 * 8); /* "theora" */
|
||||||
|
|
||||||
switch(ptype)
|
switch (ptype) {
|
||||||
{
|
|
||||||
case 0x80:
|
case 0x80:
|
||||||
theora_decode_header(avctx, &gb);
|
theora_decode_header(avctx, &gb);
|
||||||
break;
|
break;
|
||||||
@ -2412,11 +2441,14 @@ static av_cold int theora_decode_init(AVCodecContext *avctx)
|
|||||||
return -1;
|
return -1;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
av_log(avctx, AV_LOG_ERROR, "Unknown Theora config packet: %d\n", ptype&~0x80);
|
av_log(avctx, AV_LOG_ERROR,
|
||||||
|
"Unknown Theora config packet: %d\n", ptype & ~0x80);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb))
|
if (ptype != 0x81 && 8 * header_len[i] != get_bits_count(&gb))
|
||||||
av_log(avctx, AV_LOG_WARNING, "%d bits left in packet %X\n", 8*header_len[i] - get_bits_count(&gb), ptype);
|
av_log(avctx, AV_LOG_WARNING,
|
||||||
|
"%d bits left in packet %X\n",
|
||||||
|
8 * header_len[i] - get_bits_count(&gb), ptype);
|
||||||
if (s->theora < 0x030200)
|
if (s->theora < 0x030200)
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -36,7 +36,9 @@ static int parse(AVCodecParserContext *s,
|
|||||||
}
|
}
|
||||||
|
|
||||||
AVCodecParser ff_vp3_parser = {
|
AVCodecParser ff_vp3_parser = {
|
||||||
.codec_ids = { AV_CODEC_ID_THEORA, AV_CODEC_ID_VP3, AV_CODEC_ID_VP6,
|
.codec_ids = {
|
||||||
AV_CODEC_ID_VP6F, AV_CODEC_ID_VP6A },
|
AV_CODEC_ID_THEORA, AV_CODEC_ID_VP3,
|
||||||
|
AV_CODEC_ID_VP6, AV_CODEC_ID_VP6F, AV_CODEC_ID_VP6A
|
||||||
|
},
|
||||||
.parser_parse = parse,
|
.parser_parse = parse,
|
||||||
};
|
};
|
||||||
|
@ -26,8 +26,8 @@
|
|||||||
|
|
||||||
/* these coefficients dequantize intraframe Y plane coefficients
|
/* these coefficients dequantize intraframe Y plane coefficients
|
||||||
* (note: same as JPEG) */
|
* (note: same as JPEG) */
|
||||||
static const int16_t vp31_intra_y_dequant[64] =
|
static const int16_t vp31_intra_y_dequant[64] = {
|
||||||
{ 16, 11, 10, 16, 24, 40, 51, 61,
|
16, 11, 10, 16, 24, 40, 51, 61,
|
||||||
12, 12, 14, 19, 26, 58, 60, 55,
|
12, 12, 14, 19, 26, 58, 60, 55,
|
||||||
14, 13, 16, 24, 40, 57, 69, 56,
|
14, 13, 16, 24, 40, 57, 69, 56,
|
||||||
14, 17, 22, 29, 51, 87, 80, 62,
|
14, 17, 22, 29, 51, 87, 80, 62,
|
||||||
@ -39,8 +39,8 @@ static const int16_t vp31_intra_y_dequant[64] =
|
|||||||
|
|
||||||
/* these coefficients dequantize intraframe C plane coefficients
|
/* these coefficients dequantize intraframe C plane coefficients
|
||||||
* (note: same as JPEG) */
|
* (note: same as JPEG) */
|
||||||
static const int16_t vp31_intra_c_dequant[64] =
|
static const int16_t vp31_intra_c_dequant[64] = {
|
||||||
{ 17, 18, 24, 47, 99, 99, 99, 99,
|
17, 18, 24, 47, 99, 99, 99, 99,
|
||||||
18, 21, 26, 66, 99, 99, 99, 99,
|
18, 21, 26, 66, 99, 99, 99, 99,
|
||||||
24, 26, 56, 99, 99, 99, 99, 99,
|
24, 26, 56, 99, 99, 99, 99, 99,
|
||||||
47, 66, 99, 99, 99, 99, 99, 99,
|
47, 66, 99, 99, 99, 99, 99, 99,
|
||||||
@ -51,8 +51,8 @@ static const int16_t vp31_intra_c_dequant[64] =
|
|||||||
};
|
};
|
||||||
|
|
||||||
/* these coefficients dequantize interframe coefficients (all planes) */
|
/* these coefficients dequantize interframe coefficients (all planes) */
|
||||||
static const int16_t vp31_inter_dequant[64] =
|
static const int16_t vp31_inter_dequant[64] = {
|
||||||
{ 16, 16, 16, 20, 24, 28, 32, 40,
|
16, 16, 16, 20, 24, 28, 32, 40,
|
||||||
16, 16, 20, 24, 28, 32, 40, 48,
|
16, 16, 20, 24, 28, 32, 40, 48,
|
||||||
16, 20, 24, 28, 32, 40, 48, 64,
|
16, 20, 24, 28, 32, 40, 48, 64,
|
||||||
20, 24, 28, 32, 40, 48, 64, 64,
|
20, 24, 28, 32, 40, 48, 64, 64,
|
||||||
@ -62,8 +62,8 @@ static const int16_t vp31_inter_dequant[64] =
|
|||||||
40, 48, 64, 64, 64, 96, 128, 128
|
40, 48, 64, 64, 64, 96, 128, 128
|
||||||
};
|
};
|
||||||
|
|
||||||
static const int16_t vp31_dc_scale_factor[64] =
|
static const int16_t vp31_dc_scale_factor[64] = {
|
||||||
{ 220, 200, 190, 180, 170, 170, 160, 160,
|
220, 200, 190, 180, 170, 170, 160, 160,
|
||||||
150, 150, 140, 140, 130, 130, 120, 120,
|
150, 150, 140, 140, 130, 130, 120, 120,
|
||||||
110, 110, 100, 100, 90, 90, 90, 80,
|
110, 110, 100, 100, 90, 90, 90, 80,
|
||||||
80, 80, 70, 70, 70, 60, 60, 60,
|
80, 80, 70, 70, 70, 60, 60, 60,
|
||||||
@ -73,8 +73,8 @@ static const int16_t vp31_dc_scale_factor[64] =
|
|||||||
20, 10, 10, 10, 10, 10, 10, 10
|
20, 10, 10, 10, 10, 10, 10, 10
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint32_t vp31_ac_scale_factor[64] =
|
static const uint32_t vp31_ac_scale_factor[64] = {
|
||||||
{ 500, 450, 400, 370, 340, 310, 285, 265,
|
500, 450, 400, 370, 340, 310, 285, 265,
|
||||||
245, 225, 210, 195, 185, 180, 170, 160,
|
245, 225, 210, 195, 185, 180, 170, 160,
|
||||||
150, 145, 135, 130, 125, 115, 110, 107,
|
150, 145, 135, 130, 125, 115, 110, 107,
|
||||||
100, 96, 93, 89, 85, 82, 75, 74,
|
100, 96, 93, 89, 85, 82, 75, 74,
|
||||||
@ -84,8 +84,8 @@ static const uint32_t vp31_ac_scale_factor[64] =
|
|||||||
21, 19, 18, 17, 15, 13, 12, 10
|
21, 19, 18, 17, 15, 13, 12, 10
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint8_t vp31_filter_limit_values[64] =
|
static const uint8_t vp31_filter_limit_values[64] = {
|
||||||
{ 30, 25, 20, 20, 15, 15, 14, 14,
|
30, 25, 20, 20, 15, 15, 14, 14,
|
||||||
13, 13, 12, 12, 11, 11, 10, 10,
|
13, 13, 12, 12, 11, 11, 10, 10,
|
||||||
9, 9, 8, 8, 7, 7, 7, 7,
|
9, 9, 8, 8, 7, 7, 7, 7,
|
||||||
6, 6, 6, 6, 5, 5, 5, 5,
|
6, 6, 6, 6, 5, 5, 5, 5,
|
||||||
@ -113,7 +113,7 @@ static const uint16_t superblock_run_length_vlc_table[34][2] = {
|
|||||||
{ 0x3EC, 10 }, { 0x3ED, 10 }, { 0x3EE, 10 }, { 0x3EF, 10 },
|
{ 0x3EC, 10 }, { 0x3ED, 10 }, { 0x3EE, 10 }, { 0x3EF, 10 },
|
||||||
|
|
||||||
{ 0x3F, 6 } /* this last VLC is a special case for reading 12 more
|
{ 0x3F, 6 } /* this last VLC is a special case for reading 12 more
|
||||||
bits from stream and adding the value 34 */
|
* bits from stream and adding the value 34 */
|
||||||
};
|
};
|
||||||
|
|
||||||
static const uint16_t fragment_run_length_vlc_table[30][2] = {
|
static const uint16_t fragment_run_length_vlc_table[30][2] = {
|
||||||
|
@ -27,6 +27,7 @@
|
|||||||
#include "libavutil/attributes.h"
|
#include "libavutil/attributes.h"
|
||||||
#include "libavutil/common.h"
|
#include "libavutil/common.h"
|
||||||
#include "libavutil/intreadwrite.h"
|
#include "libavutil/intreadwrite.h"
|
||||||
|
|
||||||
#include "avcodec.h"
|
#include "avcodec.h"
|
||||||
#include "dsputil.h"
|
#include "dsputil.h"
|
||||||
#include "rnd_avg.h"
|
#include "rnd_avg.h"
|
||||||
@ -43,7 +44,8 @@
|
|||||||
|
|
||||||
#define M(a, b) (((a) * (b)) >> 16)
|
#define M(a, b) (((a) * (b)) >> 16)
|
||||||
|
|
||||||
static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int type)
|
static av_always_inline void idct(uint8_t *dst, int stride,
|
||||||
|
int16_t *input, int type)
|
||||||
{
|
{
|
||||||
int16_t *ip = input;
|
int16_t *ip = input;
|
||||||
|
|
||||||
@ -106,7 +108,6 @@ static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int
|
|||||||
/* Check for non-zero values (bitwise or faster than ||) */
|
/* Check for non-zero values (bitwise or faster than ||) */
|
||||||
if (ip[1] | ip[2] | ip[3] |
|
if (ip[1] | ip[2] | ip[3] |
|
||||||
ip[4] | ip[5] | ip[6] | ip[7]) {
|
ip[4] | ip[5] | ip[6] | ip[7]) {
|
||||||
|
|
||||||
A = M(xC1S7, ip[1]) + M(xC7S1, ip[7]);
|
A = M(xC1S7, ip[1]) + M(xC7S1, ip[7]);
|
||||||
B = M(xC7S1, ip[1]) - M(xC1S7, ip[7]);
|
B = M(xC7S1, ip[1]) - M(xC1S7, ip[7]);
|
||||||
C = M(xC3S5, ip[3]) + M(xC5S3, ip[5]);
|
C = M(xC3S5, ip[3]) + M(xC5S3, ip[5]);
|
||||||
@ -164,7 +165,6 @@ static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int
|
|||||||
dst[5 * stride] = av_clip_uint8(dst[5 * stride] + ((Fd + Bdd) >> 4));
|
dst[5 * stride] = av_clip_uint8(dst[5 * stride] + ((Fd + Bdd) >> 4));
|
||||||
dst[6 * stride] = av_clip_uint8(dst[6 * stride] + ((Fd - Bdd) >> 4));
|
dst[6 * stride] = av_clip_uint8(dst[6 * stride] + ((Fd - Bdd) >> 4));
|
||||||
}
|
}
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
if (type == 1) {
|
if (type == 1) {
|
||||||
dst[0*stride] =
|
dst[0*stride] =
|
||||||
@ -177,7 +177,7 @@ static av_always_inline void idct(uint8_t *dst, int stride, int16_t *input, int
|
|||||||
dst[7*stride] = av_clip_uint8(128 + ((xC4S4 * ip[0] + (IdctAdjustBeforeShift << 16)) >> 20));
|
dst[7*stride] = av_clip_uint8(128 + ((xC4S4 * ip[0] + (IdctAdjustBeforeShift << 16)) >> 20));
|
||||||
} else {
|
} else {
|
||||||
if (ip[0]) {
|
if (ip[0]) {
|
||||||
int v= ((xC4S4 * ip[0] + (IdctAdjustBeforeShift<<16))>>20);
|
int v = (xC4S4 * ip[0] + (IdctAdjustBeforeShift << 16)) >> 20;
|
||||||
dst[0 * stride] = av_clip_uint8(dst[0 * stride] + v);
|
dst[0 * stride] = av_clip_uint8(dst[0 * stride] + v);
|
||||||
dst[1 * stride] = av_clip_uint8(dst[1 * stride] + v);
|
dst[1 * stride] = av_clip_uint8(dst[1 * stride] + v);
|
||||||
dst[2 * stride] = av_clip_uint8(dst[2 * stride] + v);
|
dst[2 * stride] = av_clip_uint8(dst[2 * stride] + v);
|
||||||
@ -236,10 +236,10 @@ static void vp3_v_loop_filter_c(uint8_t *first_pixel, int stride,
|
|||||||
const int nstride = -stride;
|
const int nstride = -stride;
|
||||||
|
|
||||||
for (end = first_pixel + 8; first_pixel < end; first_pixel++) {
|
for (end = first_pixel + 8; first_pixel < end; first_pixel++) {
|
||||||
filter_value =
|
filter_value = (first_pixel[2 * nstride] - first_pixel[stride]) +
|
||||||
(first_pixel[2 * nstride] - first_pixel[ stride])
|
(first_pixel[0] - first_pixel[nstride]) * 3;
|
||||||
+3*(first_pixel[0 ] - first_pixel[nstride]);
|
|
||||||
filter_value = bounding_values[(filter_value + 4) >> 3];
|
filter_value = bounding_values[(filter_value + 4) >> 3];
|
||||||
|
|
||||||
first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
|
first_pixel[nstride] = av_clip_uint8(first_pixel[nstride] + filter_value);
|
||||||
first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
|
first_pixel[0] = av_clip_uint8(first_pixel[0] - filter_value);
|
||||||
}
|
}
|
||||||
@ -252,10 +252,10 @@ static void vp3_h_loop_filter_c(uint8_t *first_pixel, int stride,
|
|||||||
int filter_value;
|
int filter_value;
|
||||||
|
|
||||||
for (end = first_pixel + 8 * stride; first_pixel != end; first_pixel += stride) {
|
for (end = first_pixel + 8 * stride; first_pixel != end; first_pixel += stride) {
|
||||||
filter_value =
|
filter_value = (first_pixel[-2] - first_pixel[1]) +
|
||||||
(first_pixel[-2] - first_pixel[ 1])
|
(first_pixel[ 0] - first_pixel[-1]) * 3;
|
||||||
+3*(first_pixel[ 0] - first_pixel[-1]);
|
|
||||||
filter_value = bounding_values[(filter_value + 4) >> 3];
|
filter_value = bounding_values[(filter_value + 4) >> 3];
|
||||||
|
|
||||||
first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
|
first_pixel[-1] = av_clip_uint8(first_pixel[-1] + filter_value);
|
||||||
first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
|
first_pixel[ 0] = av_clip_uint8(first_pixel[ 0] - filter_value);
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user