1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

Decoding to bgr15/16 from 16bit input. Decoding of 24bit input added, not yet finished, but at least the picture can be recognized

Originally committed as revision 3488 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
Alex Beregszaszi 2004-09-21 17:37:28 +00:00
parent f970a8e682
commit be441a9eee

View File

@ -24,7 +24,7 @@
* Mike Melanson (melanson@pcisys.net)
*
* The TrueMotion v1 decoder presently only decodes 16-bit TM1 data and
* outputs RGB555 data. 24-bit TM1 data is not supported yet.
* outputs RGB555 (or RGB565) data. 24-bit TM1 data is not supported yet.
*/
#include <stdio.h>
@ -43,12 +43,12 @@ typedef struct TrueMotion1Context {
AVFrame frame;
AVFrame prev_frame;
unsigned char *buf;
uint8_t *buf;
int size;
unsigned char *mb_change_bits;
uint8_t *mb_change_bits;
int mb_change_bits_row_size;
unsigned char *index_stream;
uint8_t *index_stream;
int index_stream_size;
int flags;
@ -56,6 +56,8 @@ typedef struct TrueMotion1Context {
uint32_t y_predictor_table[1024];
uint32_t c_predictor_table[1024];
uint32_t fat_y_predictor_table[1024];
uint32_t fat_c_predictor_table[1024];
int compression;
int block_type;
@ -109,12 +111,12 @@ struct frame_header {
typedef struct comp_types {
int algorithm;
int block_width;
int block_height;
int block_width; // vres
int block_height; // hres
int block_type;
} comp_types;
/* { valid for metatype }, algorithm, num of deltas, horiz res, vert res */
/* { valid for metatype }, algorithm, num of deltas, vert res, horiz res */
static comp_types compression_types[17] = {
{ ALGO_NOP, 0, 0, 0 },
@ -163,9 +165,9 @@ static void select_delta_tables(TrueMotion1Context *s, int delta_table_index)
}
#ifdef WORDS_BIGENDIAN
static int make_ydt_entry(int p2, int p1, int16_t *ydt)
static int make_ydt15_entry(int p2, int p1, int16_t *ydt)
#else
static int make_ydt_entry(int p1, int p2, int16_t *ydt)
static int make_ydt15_entry(int p1, int p2, int16_t *ydt)
#endif
{
int lo, hi;
@ -178,9 +180,9 @@ static int make_ydt_entry(int p1, int p2, int16_t *ydt)
}
#ifdef WORDS_BIGENDIAN
static int make_cdt_entry(int p2, int p1, int16_t *cdt)
static int make_cdt15_entry(int p2, int p1, int16_t *cdt)
#else
static int make_cdt_entry(int p1, int p2, int16_t *cdt)
static int make_cdt15_entry(int p1, int p2, int16_t *cdt)
#endif
{
int r, b, lo;
@ -191,7 +193,62 @@ static int make_cdt_entry(int p1, int p2, int16_t *cdt)
return ((lo + (lo << 16)) << 1);
}
static void gen_vector_table(TrueMotion1Context *s, uint8_t *sel_vector_table)
#ifdef WORDS_BIGENDIAN
static int make_ydt16_entry(int p2, int p1, int16_t *ydt)
#else
static int make_ydt16_entry(int p1, int p2, int16_t *ydt)
#endif
{
int lo, hi;
lo = ydt[p1];
lo += (lo << 6) + (lo << 11);
hi = ydt[p2];
hi += (hi << 6) + (hi << 11);
return ((lo + (hi << 16)) << 1);
}
#ifdef WORDS_BIGENDIAN
static int make_cdt16_entry(int p2, int p1, int16_t *cdt)
#else
static int make_cdt16_entry(int p1, int p2, int16_t *cdt)
#endif
{
int r, b, lo;
b = cdt[p2];
r = cdt[p1] << 11;
lo = b + r;
return ((lo + (lo << 16)) << 1);
}
#ifdef WORDS_BIGENDIAN
static int make_ydt24_entry(int p2, int p1, int16_t *ydt)
#else
static int make_ydt24_entry(int p1, int p2, int16_t *ydt)
#endif
{
int lo, hi;
lo = ydt[p1];
hi = ydt[p2];
return ((lo + (hi << 8)) << 1);
}
#ifdef WORDS_BIGENDIAN
static int make_cdt24_entry(int p2, int p1, int16_t *cdt)
#else
static int make_cdt24_entry(int p1, int p2, int16_t *cdt)
#endif
{
int r, b;
b = cdt[p2];
r = cdt[p1]<<16;
return ((b+r) << 1);
}
static void gen_vector_table15(TrueMotion1Context *s, uint8_t *sel_vector_table)
{
int len, i, j;
unsigned char delta_pair;
@ -203,15 +260,63 @@ static void gen_vector_table(TrueMotion1Context *s, uint8_t *sel_vector_table)
{
delta_pair = *sel_vector_table++;
s->y_predictor_table[i+j] = 0xfffffffe &
make_ydt_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
make_ydt15_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
s->c_predictor_table[i+j] = 0xfffffffe &
make_cdt_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
make_cdt15_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
}
s->y_predictor_table[i+(j-1)] |= 1;
s->c_predictor_table[i+(j-1)] |= 1;
}
}
static void gen_vector_table16(TrueMotion1Context *s, uint8_t *sel_vector_table)
{
int len, i, j;
unsigned char delta_pair;
for (i = 0; i < 1024; i += 4)
{
len = *sel_vector_table++ / 2;
for (j = 0; j < len; j++)
{
delta_pair = *sel_vector_table++;
s->y_predictor_table[i+j] = 0xfffffffe &
make_ydt16_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
s->c_predictor_table[i+j] = 0xfffffffe &
make_cdt16_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
}
s->y_predictor_table[i+(j-1)] |= 1;
s->c_predictor_table[i+(j-1)] |= 1;
}
}
static void gen_vector_table24(TrueMotion1Context *s, uint8_t *sel_vector_table)
{
int len, i, j;
unsigned char delta_pair;
for (i = 0; i < 1024; i += 4)
{
len = *sel_vector_table++ / 2;
for (j = 0; j < len; j++)
{
delta_pair = *sel_vector_table++;
s->y_predictor_table[i+j] = 0xfffffffe &
make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->ydt);
s->c_predictor_table[i+j] = 0xfffffffe &
make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->cdt);
s->fat_y_predictor_table[i+j] = 0xfffffffe &
make_ydt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_ydt);
s->fat_c_predictor_table[i+j] = 0xfffffffe &
make_cdt24_entry(delta_pair >> 4, delta_pair & 0xf, s->fat_cdt);
}
s->y_predictor_table[i+(j-1)] |= 1;
s->c_predictor_table[i+(j-1)] |= 1;
s->fat_y_predictor_table[i+(j-1)] |= 1;
s->fat_c_predictor_table[i+(j-1)] |= 1;
}
}
/* Returns the number of bytes consumed from the bytestream. Returns -1 if
* there was an error while decoding the header */
static int truemotion1_decode_header(TrueMotion1Context *s)
@ -229,14 +334,15 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
header.header_size = ((s->buf[0] >> 5) | (s->buf[0] << 3)) & 0x7f;
if (s->buf[0] < 0x10)
{
av_log(s->avctx, AV_LOG_ERROR, "invalid header size\n");
av_log(s->avctx, AV_LOG_ERROR, "invalid header size (%d)\n", s->buf[0]);
return -1;
}
/* unscramble the header bytes with a XOR operation */
memset(header_buffer, 0, 128);
for (i = 1; i < header.header_size; i++)
header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
header_buffer[i - 1] = s->buf[i] ^ s->buf[i + 1];
header.compression = header_buffer[0];
header.deltaset = header_buffer[1];
header.vectable = header_buffer[2];
@ -253,7 +359,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
{
if (header.header_type > 3)
{
av_log(s->avctx, AV_LOG_ERROR, "truemotion1: invalid header type\n");
av_log(s->avctx, AV_LOG_ERROR, "invalid header type (%d)\n", header.header_type);
return -1;
} else if ((header.header_type == 2) || (header.header_type == 3)) {
s->flags = header.flags;
@ -265,6 +371,7 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->flags = FLAG_KEYFRAME;
if (s->flags & FLAG_SPRITE) {
av_log(s->avctx, AV_LOG_INFO, "SPRITE frame found, please report the sample to the developers\n");
s->w = header.width;
s->h = header.height;
s->x = header.xoffset;
@ -274,7 +381,10 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->h = header.ysize;
if (header.header_type < 2) {
if ((s->w < 213) && (s->h >= 176))
{
s->flags |= FLAG_INTERPOLATED;
av_log(s->avctx, AV_LOG_INFO, "INTERPOLATION selected, please report the sample to the developers\n");
}
}
}
@ -297,15 +407,22 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
return -1;
}
}
// FIXME: where to place this ?!?!
if (compression_types[header.compression].algorithm == ALGO_RGB24H)
s->avctx->pix_fmt = PIX_FMT_BGR24;
else
s->avctx->pix_fmt = PIX_FMT_RGB555; // RGB565 is supported aswell
if ((header.deltaset != s->last_deltaset) || (header.vectable != s->last_vectable))
{
if (compression_types[header.compression].algorithm == ALGO_RGB24H)
{
av_log(s->avctx, AV_LOG_ERROR, "24bit compression not yet supported\n");
}
gen_vector_table24(s, sel_vector_table);
else
gen_vector_table(s, sel_vector_table);
if (s->avctx->pix_fmt == PIX_FMT_RGB555)
gen_vector_table15(s, sel_vector_table);
else
gen_vector_table16(s, sel_vector_table);
}
/* set up pointers to the other key data chunks */
@ -327,6 +444,15 @@ static int truemotion1_decode_header(TrueMotion1Context *s)
s->block_height = compression_types[header.compression].block_height;
s->block_type = compression_types[header.compression].block_type;
if (s->avctx->debug & FF_DEBUG_PICT_INFO)
av_log(s->avctx, AV_LOG_INFO, "tables: %d / %d c:%d %dx%d t:%d %s%s%s%s\n",
s->last_deltaset, s->last_vectable, s->compression, s->block_width,
s->block_height, s->block_type,
s->flags & FLAG_KEYFRAME ? " KEY" : "",
s->flags & FLAG_INTERFRAME ? " INTER" : "",
s->flags & FLAG_SPRITE ? " SPRITE" : "",
s->flags & FLAG_INTERPOLATED ? " INTERPOL" : "");
return header.header_size;
}
@ -336,7 +462,12 @@ static int truemotion1_decode_init(AVCodecContext *avctx)
s->avctx = avctx;
avctx->pix_fmt = PIX_FMT_RGB555;
// FIXME: it may change ?
// if (avctx->bits_per_sample == 24)
// avctx->pix_fmt = PIX_FMT_RGB24;
// else
// avctx->pix_fmt = PIX_FMT_RGB555;
avctx->has_b_frames = 0;
s->frame.data[0] = s->prev_frame.data[0] = NULL;
@ -348,6 +479,32 @@ static int truemotion1_decode_init(AVCodecContext *avctx)
return 0;
}
/*
Block decoding order:
dxi: Y-Y
dxic: Y-C-Y
dxic2: Y-C-Y-C
hres,vres,i,i%vres (0 < i < 4)
2x2 0: 0 dxic2
2x2 1: 1 dxi
2x2 2: 0 dxic2
2x2 3: 1 dxi
2x4 0: 0 dxic2
2x4 1: 1 dxi
2x4 2: 2 dxi
2x4 3: 3 dxi
4x2 0: 0 dxic
4x2 1: 1 dxi
4x2 2: 0 dxic
4x2 3: 1 dxi
4x4 0: 0 dxic
4x4 1: 1 dxi
4x4 2: 2 dxi
4x4 3: 3 dxi
*/
#define GET_NEXT_INDEX() \
{\
if (index_stream_index >= s->index_stream_size) { \
@ -374,6 +531,25 @@ static int truemotion1_decode_init(AVCodecContext *avctx)
} else \
index++;
#define APPLY_C_PREDICTOR_24() \
predictor_pair = s->c_predictor_table[index]; \
c_horiz_pred += (predictor_pair >> 1); \
if (predictor_pair & 1) { \
GET_NEXT_INDEX() \
if (!index) { \
GET_NEXT_INDEX() \
predictor_pair = s->fat_c_predictor_table[index]; \
c_horiz_pred += (predictor_pair >> 1); \
if (predictor_pair & 1) \
GET_NEXT_INDEX() \
else \
index++; \
} \
} else \
index++;
// c_last+coff = clast+c_horiz_pred;
#define APPLY_Y_PREDICTOR() \
predictor_pair = s->y_predictor_table[index]; \
horiz_pred += (predictor_pair >> 1); \
@ -391,6 +567,23 @@ static int truemotion1_decode_init(AVCodecContext *avctx)
} else \
index++;
#define APPLY_Y_PREDICTOR_24() \
predictor_pair = s->y_predictor_table[index]; \
horiz_pred += (predictor_pair >> 1); \
if (predictor_pair & 1) { \
GET_NEXT_INDEX() \
if (!index) { \
GET_NEXT_INDEX() \
predictor_pair = s->fat_y_predictor_table[index]; \
horiz_pred += (predictor_pair >> 1); \
if (predictor_pair & 1) \
GET_NEXT_INDEX() \
else \
index++; \
} \
} else \
index++;
#define OUTPUT_PIXEL_PAIR() \
*current_pixel_pair = *vert_pred + horiz_pred; \
*vert_pred++ = *current_pixel_pair++; \
@ -528,6 +721,149 @@ static void truemotion1_decode_16bit(TrueMotion1Context *s)
}
}
static void truemotion1_decode_24bit(TrueMotion1Context *s)
{
int y;
int pixels_left; /* remaining pixels on this line */
unsigned int predictor_pair;
unsigned int horiz_pred;
unsigned int c_horiz_pred;
unsigned int *vert_pred;
unsigned int *current_pixel_pair;
unsigned int *prev_pixel_pair;
unsigned char *current_line = s->frame.data[0];
unsigned char *prev_line = s->prev_frame.data[0];
int keyframe = s->flags & FLAG_KEYFRAME;
/* these variables are for managing the stream of macroblock change bits */
unsigned char *mb_change_bits = s->mb_change_bits;
unsigned char mb_change_byte;
unsigned char mb_change_byte_mask;
int mb_change_index;
/* these variables are for managing the main index stream */
int index_stream_index = 0; /* yes, the index into the index stream */
int index;
/* clean out the line buffer */
memset(s->vert_pred, 0, s->avctx->width * sizeof(unsigned short));
GET_NEXT_INDEX();
for (y = 0; y < s->avctx->height; y++) {
/* re-init variables for the next line iteration */
horiz_pred = c_horiz_pred = 0;
current_pixel_pair = (unsigned int *)current_line;
prev_pixel_pair = (unsigned int *)prev_line;
vert_pred = s->vert_pred;
mb_change_index = 0;
mb_change_byte = mb_change_bits[mb_change_index++];
mb_change_byte_mask = 0x01;
pixels_left = s->avctx->width;
while (pixels_left > 0) {
if (keyframe || ((mb_change_byte & mb_change_byte_mask) == 0)) {
switch (y & 3) {
case 0:
/* if macroblock width is 2, apply C-Y-C-Y; else
* apply C-Y-Y */
if (s->block_width == 2) {
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
} else {
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
}
break;
case 1:
case 3:
/* always apply 2 Y predictors on these iterations */
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
break;
case 2:
/* this iteration might be C-Y-C-Y, Y-Y, or C-Y-Y
* depending on the macroblock type */
if (s->block_type == BLOCK_2x2) {
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
} else if (s->block_type == BLOCK_4x2) {
APPLY_C_PREDICTOR_24();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
// OUTPUT_PIXEL_PAIR_24_C();
} else {
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
APPLY_Y_PREDICTOR_24();
OUTPUT_PIXEL_PAIR();
}
break;
}
} else {
/* skip (copy) four pixels, but reassign the horizontal
* predictor */
*current_pixel_pair = *prev_pixel_pair++;
*vert_pred++ = *current_pixel_pair++;
*current_pixel_pair = *prev_pixel_pair++;
horiz_pred = *current_pixel_pair - *vert_pred;
// c_horiz_pred = *current_pixel_pair - *vert_pred;
*vert_pred++ = *current_pixel_pair++;
}
if (!keyframe) {
mb_change_byte_mask <<= 1;
/* next byte */
if (!mb_change_byte_mask) {
mb_change_byte = mb_change_bits[mb_change_index++];
mb_change_byte_mask = 0x01;
}
}
pixels_left -= 4;
}
/* next change row */
if (((y + 1) & 3) == 0)
mb_change_bits += s->mb_change_bits_row_size;
current_line += s->frame.linesize[0];
prev_line += s->prev_frame.linesize[0];
}
}
static int truemotion1_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
@ -537,12 +873,6 @@ static int truemotion1_decode_frame(AVCodecContext *avctx,
s->buf = buf;
s->size = buf_size;
s->frame.reference = 1;
if (avctx->get_buffer(avctx, &s->frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "truemotion1: get_buffer() failed\n");
return -1;
}
/* no supplementary picture */
if (buf_size == 0)
return 0;
@ -550,13 +880,19 @@ static int truemotion1_decode_frame(AVCodecContext *avctx,
if (truemotion1_decode_header(s) == -1)
return -1;
s->frame.reference = 1;
if (avctx->get_buffer(avctx, &s->frame) < 0) {
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
/* check for a do-nothing frame and copy the previous frame */
if (compression_types[s->compression].algorithm == ALGO_NOP)
{
memcpy(s->frame.data[0], s->prev_frame.data[0],
s->frame.linesize[0] * s->avctx->height);
} else if (compression_types[s->compression].algorithm == ALGO_RGB24H) {
av_log(s->avctx, AV_LOG_ERROR, "24bit compression not yet supported\n");
truemotion1_decode_24bit(s);
} else {
truemotion1_decode_16bit(s);
}