1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-04 22:03:09 +02:00

lavc: APV decoder

This commit is contained in:
Mark Thompson
2025-04-19 19:10:34 +01:00
parent 324330a11e
commit 483cadf8d7
9 changed files with 890 additions and 1 deletions

1
configure vendored
View File

@ -2935,6 +2935,7 @@ apng_decoder_select="inflate_wrapper"
apng_encoder_select="deflate_wrapper llvidencdsp"
aptx_encoder_select="audio_frame_queue"
aptx_hd_encoder_select="audio_frame_queue"
apv_decoder_select="cbs_apv"
asv1_decoder_select="blockdsp bswapdsp idctdsp"
asv1_encoder_select="aandcttables bswapdsp fdctdsp pixblockdsp"
asv2_decoder_select="blockdsp bswapdsp idctdsp"

View File

@ -244,6 +244,7 @@ OBJS-$(CONFIG_APTX_HD_DECODER) += aptxdec.o aptx.o
OBJS-$(CONFIG_APTX_HD_ENCODER) += aptxenc.o aptx.o
OBJS-$(CONFIG_APNG_DECODER) += png.o pngdec.o pngdsp.o
OBJS-$(CONFIG_APNG_ENCODER) += png.o pngenc.o
OBJS-$(CONFIG_APV_DECODER) += apv_decode.o apv_entropy.o apv_dsp.o
OBJS-$(CONFIG_ARBC_DECODER) += arbc.o
OBJS-$(CONFIG_ARGO_DECODER) += argo.o
OBJS-$(CONFIG_SSA_DECODER) += assdec.o ass.o

View File

@ -47,6 +47,7 @@ extern const FFCodec ff_anm_decoder;
extern const FFCodec ff_ansi_decoder;
extern const FFCodec ff_apng_encoder;
extern const FFCodec ff_apng_decoder;
extern const FFCodec ff_apv_decoder;
extern const FFCodec ff_arbc_decoder;
extern const FFCodec ff_argo_decoder;
extern const FFCodec ff_asv1_encoder;

433
libavcodec/apv_decode.c Normal file
View File

@ -0,0 +1,433 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/mastering_display_metadata.h"
#include "libavutil/mem_internal.h"
#include "libavutil/pixdesc.h"
#include "apv.h"
#include "apv_decode.h"
#include "apv_dsp.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_apv.h"
#include "codec_internal.h"
#include "decode.h"
#include "internal.h"
#include "thread.h"
typedef struct APVDecodeContext {
CodedBitstreamContext *cbc;
APVDSPContext dsp;
CodedBitstreamFragment au;
APVDerivedTileInfo tile_info;
APVVLCLUT decode_lut;
AVFrame *output_frame;
uint8_t warned_additional_frames;
uint8_t warned_unknown_pbu_types;
} APVDecodeContext;
static const enum AVPixelFormat apv_format_table[5][5] = {
{ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16 },
{ 0 }, // 4:2:0 is not valid.
{ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUV422P16 },
{ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUV444P16 },
{ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUVA444P16 },
};
static int apv_decode_check_format(AVCodecContext *avctx,
const APVRawFrameHeader *header)
{
int err, bit_depth;
avctx->profile = header->frame_info.profile_idc;
avctx->level = header->frame_info.level_idc;
bit_depth = header->frame_info.bit_depth_minus8 + 8;
if (bit_depth < 8 || bit_depth > 16 || bit_depth % 2) {
avpriv_request_sample(avctx, "Bit depth %d", bit_depth);
return AVERROR_PATCHWELCOME;
}
avctx->pix_fmt =
apv_format_table[header->frame_info.chroma_format_idc][bit_depth - 4 >> 2];
err = ff_set_dimensions(avctx,
FFALIGN(header->frame_info.frame_width, 16),
FFALIGN(header->frame_info.frame_height, 16));
if (err < 0) {
// Unsupported frame size.
return err;
}
avctx->width = header->frame_info.frame_width;
avctx->height = header->frame_info.frame_height;
avctx->sample_aspect_ratio = (AVRational){ 1, 1 };
avctx->color_primaries = header->color_primaries;
avctx->color_trc = header->transfer_characteristics;
avctx->colorspace = header->matrix_coefficients;
avctx->color_range = header->full_range_flag ? AVCOL_RANGE_JPEG
: AVCOL_RANGE_MPEG;
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
avctx->refs = 0;
avctx->has_b_frames = 0;
return 0;
}
static const CodedBitstreamUnitType apv_decompose_unit_types[] = {
APV_PBU_PRIMARY_FRAME,
APV_PBU_METADATA,
};
static av_cold int apv_decode_init(AVCodecContext *avctx)
{
APVDecodeContext *apv = avctx->priv_data;
int err;
err = ff_cbs_init(&apv->cbc, AV_CODEC_ID_APV, avctx);
if (err < 0)
return err;
apv->cbc->decompose_unit_types =
apv_decompose_unit_types;
apv->cbc->nb_decompose_unit_types =
FF_ARRAY_ELEMS(apv_decompose_unit_types);
// Extradata could be set here, but is ignored by the decoder.
ff_apv_entropy_build_decode_lut(&apv->decode_lut);
ff_apv_dsp_init(&apv->dsp);
return 0;
}
static av_cold int apv_decode_close(AVCodecContext *avctx)
{
APVDecodeContext *apv = avctx->priv_data;
ff_cbs_fragment_free(&apv->au);
ff_cbs_close(&apv->cbc);
return 0;
}
static int apv_decode_block(AVCodecContext *avctx,
void *output,
ptrdiff_t pitch,
GetBitContext *gbc,
APVEntropyState *entropy_state,
int bit_depth,
int qp_shift,
const uint16_t *qmatrix)
{
APVDecodeContext *apv = avctx->priv_data;
int err;
LOCAL_ALIGNED_32(int16_t, coeff, [64]);
err = ff_apv_entropy_decode_block(coeff, gbc, entropy_state);
if (err < 0)
return 0;
apv->dsp.decode_transquant(output, pitch,
coeff, qmatrix,
bit_depth, qp_shift);
return 0;
}
static int apv_decode_tile_component(AVCodecContext *avctx, void *data,
int job, int thread)
{
APVRawFrame *input = data;
APVDecodeContext *apv = avctx->priv_data;
const CodedBitstreamAPVContext *apv_cbc = apv->cbc->priv_data;
const APVDerivedTileInfo *tile_info = &apv_cbc->tile_info;
int tile_index = job / apv_cbc->num_comp;
int comp_index = job % apv_cbc->num_comp;
const AVPixFmtDescriptor *pix_fmt_desc =
av_pix_fmt_desc_get(avctx->pix_fmt);
int sub_w = comp_index == 0 ? 1 : pix_fmt_desc->log2_chroma_w + 1;
int sub_h = comp_index == 0 ? 1 : pix_fmt_desc->log2_chroma_h + 1;
APVRawTile *tile = &input->tile[tile_index];
int tile_y = tile_index / tile_info->tile_cols;
int tile_x = tile_index % tile_info->tile_cols;
int tile_start_x = tile_info->col_starts[tile_x];
int tile_start_y = tile_info->row_starts[tile_y];
int tile_width = tile_info->col_starts[tile_x + 1] - tile_start_x;
int tile_height = tile_info->row_starts[tile_y + 1] - tile_start_y;
int tile_mb_width = tile_width / APV_MB_WIDTH;
int tile_mb_height = tile_height / APV_MB_HEIGHT;
int blk_mb_width = 2 / sub_w;
int blk_mb_height = 2 / sub_h;
int bit_depth;
int qp_shift;
LOCAL_ALIGNED_32(uint16_t, qmatrix_scaled, [64]);
GetBitContext gbc;
APVEntropyState entropy_state = {
.log_ctx = avctx,
.decode_lut = &apv->decode_lut,
.prev_dc = 0,
.prev_dc_diff = 20,
.prev_1st_ac_level = 0,
};
init_get_bits8(&gbc, tile->tile_data[comp_index],
tile->tile_header.tile_data_size[comp_index]);
// Combine the bitstream quantisation matrix with the qp scaling
// in advance. (Including qp_shift as well would overflow 16 bits.)
// Fix the row ordering at the same time.
{
static const uint8_t apv_level_scale[6] = { 40, 45, 51, 57, 64, 71 };
int qp = tile->tile_header.tile_qp[comp_index];
int level_scale = apv_level_scale[qp % 6];
bit_depth = apv_cbc->bit_depth;
qp_shift = qp / 6;
for (int y = 0; y < 8; y++) {
for (int x = 0; x < 8; x++)
qmatrix_scaled[y * 8 + x] = level_scale *
input->frame_header.quantization_matrix.q_matrix[comp_index][x][y];
}
}
for (int mb_y = 0; mb_y < tile_mb_height; mb_y++) {
for (int mb_x = 0; mb_x < tile_mb_width; mb_x++) {
for (int blk_y = 0; blk_y < blk_mb_height; blk_y++) {
for (int blk_x = 0; blk_x < blk_mb_width; blk_x++) {
int frame_y = (tile_start_y +
APV_MB_HEIGHT * mb_y +
APV_TR_SIZE * blk_y) / sub_h;
int frame_x = (tile_start_x +
APV_MB_WIDTH * mb_x +
APV_TR_SIZE * blk_x) / sub_w;
ptrdiff_t frame_pitch = apv->output_frame->linesize[comp_index];
uint8_t *block_start = apv->output_frame->data[comp_index] +
frame_y * frame_pitch + 2 * frame_x;
apv_decode_block(avctx,
block_start, frame_pitch,
&gbc, &entropy_state,
bit_depth,
qp_shift,
qmatrix_scaled);
}
}
}
}
av_log(avctx, AV_LOG_DEBUG,
"Decoded tile %d component %d: %dx%d MBs starting at (%d,%d)\n",
tile_index, comp_index, tile_mb_width, tile_mb_height,
tile_start_x, tile_start_y);
return 0;
}
static int apv_decode(AVCodecContext *avctx, AVFrame *output,
APVRawFrame *input)
{
APVDecodeContext *apv = avctx->priv_data;
const CodedBitstreamAPVContext *apv_cbc = apv->cbc->priv_data;
const APVDerivedTileInfo *tile_info = &apv_cbc->tile_info;
int err, job_count;
err = apv_decode_check_format(avctx, &input->frame_header);
if (err < 0) {
av_log(avctx, AV_LOG_ERROR, "Unsupported format parameters.\n");
return err;
}
err = ff_thread_get_buffer(avctx, output, 0);
if (err) {
av_log(avctx, AV_LOG_ERROR, "No output frame supplied.\n");
return err;
}
apv->output_frame = output;
// Each component within a tile is independent of every other,
// so we can decode all in parallel.
job_count = tile_info->num_tiles * apv_cbc->num_comp;
avctx->execute2(avctx, apv_decode_tile_component,
input, NULL, job_count);
return 0;
}
static int apv_decode_metadata(AVCodecContext *avctx, AVFrame *frame,
const APVRawMetadata *md)
{
int err;
for (int i = 0; i < md->metadata_count; i++) {
const APVRawMetadataPayload *pl = &md->payloads[i];
switch (pl->payload_type) {
case APV_METADATA_MDCV:
{
const APVRawMetadataMDCV *mdcv = &pl->mdcv;
AVMasteringDisplayMetadata *mdm;
err = ff_decode_mastering_display_new(avctx, frame, &mdm);
if (err < 0)
return err;
if (mdm) {
for (int i = 0; i < 3; i++) {
mdm->display_primaries[i][0] =
av_make_q(mdcv->primary_chromaticity_x[i], 1 << 16);
mdm->display_primaries[i][1] =
av_make_q(mdcv->primary_chromaticity_y[i], 1 << 16);
}
mdm->white_point[0] =
av_make_q(mdcv->white_point_chromaticity_x, 1 << 16);
mdm->white_point[1] =
av_make_q(mdcv->white_point_chromaticity_y, 1 << 16);
mdm->max_luminance =
av_make_q(mdcv->max_mastering_luminance, 1 << 8);
mdm->min_luminance =
av_make_q(mdcv->min_mastering_luminance, 1 << 14);
mdm->has_primaries = 1;
mdm->has_luminance = 1;
}
}
break;
case APV_METADATA_CLL:
{
const APVRawMetadataCLL *cll = &pl->cll;
AVContentLightMetadata *clm;
err = ff_decode_content_light_new(avctx, frame, &clm);
if (err < 0)
return err;
if (clm) {
clm->MaxCLL = cll->max_cll;
clm->MaxFALL = cll->max_fall;
}
}
break;
default:
// Ignore other types of metadata.
break;
}
}
return 0;
}
static int apv_decode_frame(AVCodecContext *avctx, AVFrame *frame,
int *got_frame, AVPacket *packet)
{
APVDecodeContext *apv = avctx->priv_data;
CodedBitstreamFragment *au = &apv->au;
int err;
err = ff_cbs_read_packet(apv->cbc, au, packet);
if (err < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
return err;
}
for (int i = 0; i < au->nb_units; i++) {
CodedBitstreamUnit *pbu = &au->units[i];
switch (pbu->type) {
case APV_PBU_PRIMARY_FRAME:
err = apv_decode(avctx, frame, pbu->content);
if (err < 0)
return err;
*got_frame = 1;
break;
case APV_PBU_METADATA:
apv_decode_metadata(avctx, frame, pbu->content);
break;
case APV_PBU_NON_PRIMARY_FRAME:
case APV_PBU_PREVIEW_FRAME:
case APV_PBU_DEPTH_FRAME:
case APV_PBU_ALPHA_FRAME:
if (!avctx->internal->is_copy &&
!apv->warned_additional_frames) {
av_log(avctx, AV_LOG_WARNING,
"Stream contains additional non-primary frames "
"which will be ignored by the decoder.\n");
apv->warned_additional_frames = 1;
}
break;
case APV_PBU_ACCESS_UNIT_INFORMATION:
case APV_PBU_FILLER:
// Not relevant to the decoder.
break;
default:
if (!avctx->internal->is_copy &&
!apv->warned_unknown_pbu_types) {
av_log(avctx, AV_LOG_WARNING,
"Stream contains PBUs with unknown types "
"which will be ignored by the decoder.\n");
apv->warned_unknown_pbu_types = 1;
}
break;
}
}
ff_cbs_fragment_reset(au);
return packet->size;
}
const FFCodec ff_apv_decoder = {
.p.name = "apv",
CODEC_LONG_NAME("Advanced Professional Video"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_APV,
.priv_data_size = sizeof(APVDecodeContext),
.init = apv_decode_init,
.close = apv_decode_close,
FF_CODEC_DECODE_CB(apv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
};

80
libavcodec/apv_decode.h Normal file
View File

@ -0,0 +1,80 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_APV_DECODE_H
#define AVCODEC_APV_DECODE_H
#include <stdint.h>
#include "apv.h"
#include "avcodec.h"
#include "get_bits.h"
// Number of bits in the entropy look-up tables.
// It may be desirable to tune this per-architecture, as a larger LUT
// trades greater memory use for fewer instructions.
// (N bits -> 24*2^N bytes of tables; 9 -> 12KB of tables.)
#define APV_VLC_LUT_BITS 9
#define APV_VLC_LUT_SIZE (1 << APV_VLC_LUT_BITS)
typedef struct APVVLCLUTEntry {
uint16_t result; // Return value if not reading more.
uint8_t consume; // Number of bits to consume.
uint8_t more; // Whether to read additional bits.
} APVVLCLUTEntry;
typedef struct APVVLCLUT {
APVVLCLUTEntry lut[6][APV_VLC_LUT_SIZE];
} APVVLCLUT;
typedef struct APVEntropyState {
void *log_ctx;
const APVVLCLUT *decode_lut;
int16_t prev_dc;
int16_t prev_dc_diff;
int16_t prev_1st_ac_level;
} APVEntropyState;
/**
* Build the decoder VLC look-up table.
*/
void ff_apv_entropy_build_decode_lut(APVVLCLUT *decode_lut);
/**
* Entropy decode a single 8x8 block to coefficients.
*
* Outputs in block order (dezigzag already applied).
*/
int ff_apv_entropy_decode_block(int16_t *coeff,
GetBitContext *gbc,
APVEntropyState *state);
/**
* Read a single APV VLC code.
*
* This entrypoint is exposed for testing.
*/
unsigned int ff_apv_read_vlc(GetBitContext *gbc, int k_param,
const APVVLCLUT *lut);
#endif /* AVCODEC_APV_DECODE_H */

136
libavcodec/apv_dsp.c Normal file
View File

@ -0,0 +1,136 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/common.h"
#include "apv.h"
#include "apv_dsp.h"
static const int8_t apv_trans_matrix[8][8] = {
{ 64, 64, 64, 64, 64, 64, 64, 64 },
{ 89, 75, 50, 18, -18, -50, -75, -89 },
{ 84, 35, -35, -84, -84, -35, 35, 84 },
{ 75, -18, -89, -50, 50, 89, 18, -75 },
{ 64, -64, -64, 64, 64, -64, -64, 64 },
{ 50, -89, 18, 75, -75, -18, 89, -50 },
{ 35, -84, 84, -35, -35, 84, -84, 35 },
{ 18, -50, 75, -89, 89, -75, 50, -18 },
};
static void apv_decode_transquant_c(void *output,
ptrdiff_t pitch,
const int16_t *input_flat,
const int16_t *qmatrix_flat,
int bit_depth,
int qp_shift)
{
const int16_t (*input)[8] = (const int16_t(*)[8])input_flat;
const int16_t (*qmatrix)[8] = (const int16_t(*)[8])qmatrix_flat;
int16_t scaled_coeff[8][8];
int32_t recon_sample[8][8];
// Dequant.
{
// Note that level_scale was already combined into qmatrix
// before we got here.
int bd_shift = bit_depth + 3 - 5;
for (int y = 0; y < 8; y++) {
for (int x = 0; x < 8; x++) {
int coeff = (((input[y][x] * qmatrix[y][x]) << qp_shift) +
(1 << (bd_shift - 1))) >> bd_shift;
scaled_coeff[y][x] =
av_clip(coeff, APV_MIN_TRANS_COEFF,
APV_MAX_TRANS_COEFF);
}
}
}
// Transform.
{
int32_t tmp[8][8];
// Vertical transform of columns.
for (int x = 0; x < 8; x++) {
for (int i = 0; i < 8; i++) {
int sum = 0;
for (int j = 0; j < 8; j++)
sum += apv_trans_matrix[j][i] * scaled_coeff[j][x];
tmp[i][x] = sum;
}
}
// Renormalise.
for (int x = 0; x < 8; x++) {
for (int y = 0; y < 8; y++)
tmp[y][x] = (tmp[y][x] + 64) >> 7;
}
// Horizontal transform of rows.
for (int y = 0; y < 8; y++) {
for (int i = 0; i < 8; i++) {
int sum = 0;
for (int j = 0; j < 8; j++)
sum += apv_trans_matrix[j][i] * tmp[y][j];
recon_sample[y][i] = sum;
}
}
}
// Output.
if (bit_depth == 8) {
uint8_t *ptr = output;
int bd_shift = 20 - bit_depth;
for (int y = 0; y < 8; y++) {
for (int x = 0; x < 8; x++) {
int sample = ((recon_sample[y][x] +
(1 << (bd_shift - 1))) >> bd_shift) +
(1 << (bit_depth - 1));
ptr[x] = av_clip_uintp2(sample, bit_depth);
}
ptr += pitch;
}
} else {
uint16_t *ptr = output;
int bd_shift = 20 - bit_depth;
pitch /= 2; // Pitch was in bytes, 2 bytes per sample.
for (int y = 0; y < 8; y++) {
for (int x = 0; x < 8; x++) {
int sample = ((recon_sample[y][x] +
(1 << (bd_shift - 1))) >> bd_shift) +
(1 << (bit_depth - 1));
ptr[x] = av_clip_uintp2(sample, bit_depth);
}
ptr += pitch;
}
}
}
av_cold void ff_apv_dsp_init(APVDSPContext *dsp)
{
dsp->decode_transquant = apv_decode_transquant_c;
}

37
libavcodec/apv_dsp.h Normal file
View File

@ -0,0 +1,37 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_APV_DSP_H
#define AVCODEC_APV_DSP_H
#include <stddef.h>
#include <stdint.h>
typedef struct APVDSPContext {
void (*decode_transquant)(void *output,
ptrdiff_t pitch,
const int16_t *input,
const int16_t *qmatrix,
int bit_depth,
int qp_shift);
} APVDSPContext;
void ff_apv_dsp_init(APVDSPContext *dsp);
#endif /* AVCODEC_APV_DSP_H */

200
libavcodec/apv_entropy.c Normal file
View File

@ -0,0 +1,200 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "apv.h"
#include "apv_decode.h"
void ff_apv_entropy_build_decode_lut(APVVLCLUT *decode_lut)
{
const int code_len = APV_VLC_LUT_BITS;
const int lut_size = APV_VLC_LUT_SIZE;
for (int k = 0; k <= 5; k++) {
for (unsigned int code = 0; code < lut_size; code++) {
APVVLCLUTEntry *ent = &decode_lut->lut[k][code];
unsigned int first_bit = code & (1 << code_len - 1);
unsigned int remaining_bits = code ^ first_bit;
if (first_bit) {
ent->consume = 1 + k;
ent->result = remaining_bits >> (code_len - k - 1);
ent->more = 0;
} else {
unsigned int second_bit = code & (1 << code_len - 2);
remaining_bits ^= second_bit;
if (second_bit) {
unsigned int bits_left = code_len - 2;
unsigned int first_set = bits_left - av_log2(remaining_bits);
unsigned int last_bits = first_set - 1 + k;
if (first_set + last_bits <= bits_left) {
// Whole code fits here.
ent->consume = 2 + first_set + last_bits;
ent->result = ((2 << k) +
(((1 << first_set - 1) - 1) << k) +
((code >> bits_left - first_set - last_bits) & (1 << last_bits) - 1));
ent->more = 0;
} else {
// Need to read more, collapse to default.
ent->consume = 2;
ent->more = 1;
}
} else {
ent->consume = 2 + k;
ent->result = (1 << k) + (remaining_bits >> (code_len - k - 2));
ent->more = 0;
}
}
}
}
}
av_always_inline
static unsigned int apv_read_vlc(GetBitContext *gbc, int k_param,
const APVVLCLUT *lut)
{
unsigned int next_bits;
const APVVLCLUTEntry *ent;
next_bits = show_bits(gbc, APV_VLC_LUT_BITS);
ent = &lut->lut[k_param][next_bits];
if (ent->more) {
unsigned int leading_zeroes;
skip_bits(gbc, ent->consume);
next_bits = show_bits(gbc, 16);
leading_zeroes = 15 - av_log2(next_bits);
skip_bits(gbc, leading_zeroes + 1);
return (2 << k_param) +
((1 << leading_zeroes) - 1) * (1 << k_param) +
get_bits(gbc, leading_zeroes + k_param);
} else {
skip_bits(gbc, ent->consume);
return ent->result;
}
}
unsigned int ff_apv_read_vlc(GetBitContext *gbc, int k_param,
const APVVLCLUT *lut)
{
return apv_read_vlc(gbc, k_param, lut);
}
int ff_apv_entropy_decode_block(int16_t *coeff,
GetBitContext *gbc,
APVEntropyState *state)
{
const APVVLCLUT *lut = state->decode_lut;
int k_param;
// DC coefficient.
{
int abs_dc_coeff_diff;
int sign_dc_coeff_diff;
int dc_coeff;
k_param = av_clip(state->prev_dc_diff >> 1, 0, 5);
abs_dc_coeff_diff = apv_read_vlc(gbc, k_param, lut);
if (abs_dc_coeff_diff > 0)
sign_dc_coeff_diff = get_bits1(gbc);
else
sign_dc_coeff_diff = 0;
if (sign_dc_coeff_diff)
dc_coeff = state->prev_dc - abs_dc_coeff_diff;
else
dc_coeff = state->prev_dc + abs_dc_coeff_diff;
if (dc_coeff < APV_MIN_TRANS_COEFF ||
dc_coeff > APV_MAX_TRANS_COEFF) {
av_log(state->log_ctx, AV_LOG_ERROR,
"Out-of-range DC coefficient value: %d "
"(from prev_dc %d abs_dc_coeff_diff %d sign_dc_coeff_diff %d)\n",
dc_coeff, state->prev_dc, abs_dc_coeff_diff, sign_dc_coeff_diff);
return AVERROR_INVALIDDATA;
}
coeff[0] = dc_coeff;
state->prev_dc = dc_coeff;
state->prev_dc_diff = abs_dc_coeff_diff;
}
// AC coefficients.
{
int scan_pos = 1;
int first_ac = 1;
int prev_level = state->prev_1st_ac_level;
int prev_run = 0;
do {
int coeff_zero_run;
k_param = av_clip(prev_run >> 2, 0, 2);
coeff_zero_run = apv_read_vlc(gbc, k_param, lut);
if (coeff_zero_run > APV_BLK_COEFFS - scan_pos) {
av_log(state->log_ctx, AV_LOG_ERROR,
"Out-of-range zero-run value: %d (at scan pos %d)\n",
coeff_zero_run, scan_pos);
return AVERROR_INVALIDDATA;
}
for (int i = 0; i < coeff_zero_run; i++) {
coeff[ff_zigzag_direct[scan_pos]] = 0;
++scan_pos;
}
prev_run = coeff_zero_run;
if (scan_pos < APV_BLK_COEFFS) {
int abs_ac_coeff_minus1;
int sign_ac_coeff;
int level;
k_param = av_clip(prev_level >> 2, 0, 4);
abs_ac_coeff_minus1 = apv_read_vlc(gbc, k_param, lut);
sign_ac_coeff = get_bits(gbc, 1);
if (sign_ac_coeff)
level = -abs_ac_coeff_minus1 - 1;
else
level = abs_ac_coeff_minus1 + 1;
coeff[ff_zigzag_direct[scan_pos]] = level;
prev_level = abs_ac_coeff_minus1 + 1;
if (first_ac) {
state->prev_1st_ac_level = prev_level;
first_ac = 0;
}
++scan_pos;
}
} while (scan_pos < APV_BLK_COEFFS);
}
return 0;
}

View File

@ -30,7 +30,7 @@
#include "version_major.h"
#define LIBAVCODEC_VERSION_MINOR 1
#define LIBAVCODEC_VERSION_MICRO 100
#define LIBAVCODEC_VERSION_MICRO 101
#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
LIBAVCODEC_VERSION_MINOR, \