You've already forked FFmpeg
mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2025-08-04 22:03:09 +02:00
lavc: APV decoder
This commit is contained in:
433
libavcodec/apv_decode.c
Normal file
433
libavcodec/apv_decode.c
Normal file
@ -0,0 +1,433 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/mastering_display_metadata.h"
|
||||
#include "libavutil/mem_internal.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
|
||||
#include "apv.h"
|
||||
#include "apv_decode.h"
|
||||
#include "apv_dsp.h"
|
||||
#include "avcodec.h"
|
||||
#include "cbs.h"
|
||||
#include "cbs_apv.h"
|
||||
#include "codec_internal.h"
|
||||
#include "decode.h"
|
||||
#include "internal.h"
|
||||
#include "thread.h"
|
||||
|
||||
|
||||
typedef struct APVDecodeContext {
|
||||
CodedBitstreamContext *cbc;
|
||||
APVDSPContext dsp;
|
||||
|
||||
CodedBitstreamFragment au;
|
||||
APVDerivedTileInfo tile_info;
|
||||
|
||||
APVVLCLUT decode_lut;
|
||||
|
||||
AVFrame *output_frame;
|
||||
|
||||
uint8_t warned_additional_frames;
|
||||
uint8_t warned_unknown_pbu_types;
|
||||
} APVDecodeContext;
|
||||
|
||||
static const enum AVPixelFormat apv_format_table[5][5] = {
|
||||
{ AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY10, AV_PIX_FMT_GRAY12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_GRAY16 },
|
||||
{ 0 }, // 4:2:0 is not valid.
|
||||
{ AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUV422P16 },
|
||||
{ AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV444P10, AV_PIX_FMT_YUV444P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUV444P16 },
|
||||
{ AV_PIX_FMT_YUVA444P, AV_PIX_FMT_YUVA444P10, AV_PIX_FMT_YUVA444P12, AV_PIX_FMT_GRAY14, AV_PIX_FMT_YUVA444P16 },
|
||||
};
|
||||
|
||||
static int apv_decode_check_format(AVCodecContext *avctx,
|
||||
const APVRawFrameHeader *header)
|
||||
{
|
||||
int err, bit_depth;
|
||||
|
||||
avctx->profile = header->frame_info.profile_idc;
|
||||
avctx->level = header->frame_info.level_idc;
|
||||
|
||||
bit_depth = header->frame_info.bit_depth_minus8 + 8;
|
||||
if (bit_depth < 8 || bit_depth > 16 || bit_depth % 2) {
|
||||
avpriv_request_sample(avctx, "Bit depth %d", bit_depth);
|
||||
return AVERROR_PATCHWELCOME;
|
||||
}
|
||||
avctx->pix_fmt =
|
||||
apv_format_table[header->frame_info.chroma_format_idc][bit_depth - 4 >> 2];
|
||||
|
||||
err = ff_set_dimensions(avctx,
|
||||
FFALIGN(header->frame_info.frame_width, 16),
|
||||
FFALIGN(header->frame_info.frame_height, 16));
|
||||
if (err < 0) {
|
||||
// Unsupported frame size.
|
||||
return err;
|
||||
}
|
||||
avctx->width = header->frame_info.frame_width;
|
||||
avctx->height = header->frame_info.frame_height;
|
||||
|
||||
avctx->sample_aspect_ratio = (AVRational){ 1, 1 };
|
||||
|
||||
avctx->color_primaries = header->color_primaries;
|
||||
avctx->color_trc = header->transfer_characteristics;
|
||||
avctx->colorspace = header->matrix_coefficients;
|
||||
avctx->color_range = header->full_range_flag ? AVCOL_RANGE_JPEG
|
||||
: AVCOL_RANGE_MPEG;
|
||||
avctx->chroma_sample_location = AVCHROMA_LOC_TOPLEFT;
|
||||
|
||||
avctx->refs = 0;
|
||||
avctx->has_b_frames = 0;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const CodedBitstreamUnitType apv_decompose_unit_types[] = {
|
||||
APV_PBU_PRIMARY_FRAME,
|
||||
APV_PBU_METADATA,
|
||||
};
|
||||
|
||||
static av_cold int apv_decode_init(AVCodecContext *avctx)
|
||||
{
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
int err;
|
||||
|
||||
err = ff_cbs_init(&apv->cbc, AV_CODEC_ID_APV, avctx);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
apv->cbc->decompose_unit_types =
|
||||
apv_decompose_unit_types;
|
||||
apv->cbc->nb_decompose_unit_types =
|
||||
FF_ARRAY_ELEMS(apv_decompose_unit_types);
|
||||
|
||||
// Extradata could be set here, but is ignored by the decoder.
|
||||
|
||||
ff_apv_entropy_build_decode_lut(&apv->decode_lut);
|
||||
|
||||
ff_apv_dsp_init(&apv->dsp);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold int apv_decode_close(AVCodecContext *avctx)
|
||||
{
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
|
||||
ff_cbs_fragment_free(&apv->au);
|
||||
ff_cbs_close(&apv->cbc);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apv_decode_block(AVCodecContext *avctx,
|
||||
void *output,
|
||||
ptrdiff_t pitch,
|
||||
GetBitContext *gbc,
|
||||
APVEntropyState *entropy_state,
|
||||
int bit_depth,
|
||||
int qp_shift,
|
||||
const uint16_t *qmatrix)
|
||||
{
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
int err;
|
||||
|
||||
LOCAL_ALIGNED_32(int16_t, coeff, [64]);
|
||||
|
||||
err = ff_apv_entropy_decode_block(coeff, gbc, entropy_state);
|
||||
if (err < 0)
|
||||
return 0;
|
||||
|
||||
apv->dsp.decode_transquant(output, pitch,
|
||||
coeff, qmatrix,
|
||||
bit_depth, qp_shift);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apv_decode_tile_component(AVCodecContext *avctx, void *data,
|
||||
int job, int thread)
|
||||
{
|
||||
APVRawFrame *input = data;
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
const CodedBitstreamAPVContext *apv_cbc = apv->cbc->priv_data;
|
||||
const APVDerivedTileInfo *tile_info = &apv_cbc->tile_info;
|
||||
|
||||
int tile_index = job / apv_cbc->num_comp;
|
||||
int comp_index = job % apv_cbc->num_comp;
|
||||
|
||||
const AVPixFmtDescriptor *pix_fmt_desc =
|
||||
av_pix_fmt_desc_get(avctx->pix_fmt);
|
||||
|
||||
int sub_w = comp_index == 0 ? 1 : pix_fmt_desc->log2_chroma_w + 1;
|
||||
int sub_h = comp_index == 0 ? 1 : pix_fmt_desc->log2_chroma_h + 1;
|
||||
|
||||
APVRawTile *tile = &input->tile[tile_index];
|
||||
|
||||
int tile_y = tile_index / tile_info->tile_cols;
|
||||
int tile_x = tile_index % tile_info->tile_cols;
|
||||
|
||||
int tile_start_x = tile_info->col_starts[tile_x];
|
||||
int tile_start_y = tile_info->row_starts[tile_y];
|
||||
|
||||
int tile_width = tile_info->col_starts[tile_x + 1] - tile_start_x;
|
||||
int tile_height = tile_info->row_starts[tile_y + 1] - tile_start_y;
|
||||
|
||||
int tile_mb_width = tile_width / APV_MB_WIDTH;
|
||||
int tile_mb_height = tile_height / APV_MB_HEIGHT;
|
||||
|
||||
int blk_mb_width = 2 / sub_w;
|
||||
int blk_mb_height = 2 / sub_h;
|
||||
|
||||
int bit_depth;
|
||||
int qp_shift;
|
||||
LOCAL_ALIGNED_32(uint16_t, qmatrix_scaled, [64]);
|
||||
|
||||
GetBitContext gbc;
|
||||
|
||||
APVEntropyState entropy_state = {
|
||||
.log_ctx = avctx,
|
||||
.decode_lut = &apv->decode_lut,
|
||||
.prev_dc = 0,
|
||||
.prev_dc_diff = 20,
|
||||
.prev_1st_ac_level = 0,
|
||||
};
|
||||
|
||||
init_get_bits8(&gbc, tile->tile_data[comp_index],
|
||||
tile->tile_header.tile_data_size[comp_index]);
|
||||
|
||||
// Combine the bitstream quantisation matrix with the qp scaling
|
||||
// in advance. (Including qp_shift as well would overflow 16 bits.)
|
||||
// Fix the row ordering at the same time.
|
||||
{
|
||||
static const uint8_t apv_level_scale[6] = { 40, 45, 51, 57, 64, 71 };
|
||||
int qp = tile->tile_header.tile_qp[comp_index];
|
||||
int level_scale = apv_level_scale[qp % 6];
|
||||
|
||||
bit_depth = apv_cbc->bit_depth;
|
||||
qp_shift = qp / 6;
|
||||
|
||||
for (int y = 0; y < 8; y++) {
|
||||
for (int x = 0; x < 8; x++)
|
||||
qmatrix_scaled[y * 8 + x] = level_scale *
|
||||
input->frame_header.quantization_matrix.q_matrix[comp_index][x][y];
|
||||
}
|
||||
}
|
||||
|
||||
for (int mb_y = 0; mb_y < tile_mb_height; mb_y++) {
|
||||
for (int mb_x = 0; mb_x < tile_mb_width; mb_x++) {
|
||||
for (int blk_y = 0; blk_y < blk_mb_height; blk_y++) {
|
||||
for (int blk_x = 0; blk_x < blk_mb_width; blk_x++) {
|
||||
int frame_y = (tile_start_y +
|
||||
APV_MB_HEIGHT * mb_y +
|
||||
APV_TR_SIZE * blk_y) / sub_h;
|
||||
int frame_x = (tile_start_x +
|
||||
APV_MB_WIDTH * mb_x +
|
||||
APV_TR_SIZE * blk_x) / sub_w;
|
||||
|
||||
ptrdiff_t frame_pitch = apv->output_frame->linesize[comp_index];
|
||||
uint8_t *block_start = apv->output_frame->data[comp_index] +
|
||||
frame_y * frame_pitch + 2 * frame_x;
|
||||
|
||||
apv_decode_block(avctx,
|
||||
block_start, frame_pitch,
|
||||
&gbc, &entropy_state,
|
||||
bit_depth,
|
||||
qp_shift,
|
||||
qmatrix_scaled);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_log(avctx, AV_LOG_DEBUG,
|
||||
"Decoded tile %d component %d: %dx%d MBs starting at (%d,%d)\n",
|
||||
tile_index, comp_index, tile_mb_width, tile_mb_height,
|
||||
tile_start_x, tile_start_y);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apv_decode(AVCodecContext *avctx, AVFrame *output,
|
||||
APVRawFrame *input)
|
||||
{
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
const CodedBitstreamAPVContext *apv_cbc = apv->cbc->priv_data;
|
||||
const APVDerivedTileInfo *tile_info = &apv_cbc->tile_info;
|
||||
int err, job_count;
|
||||
|
||||
err = apv_decode_check_format(avctx, &input->frame_header);
|
||||
if (err < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Unsupported format parameters.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
err = ff_thread_get_buffer(avctx, output, 0);
|
||||
if (err) {
|
||||
av_log(avctx, AV_LOG_ERROR, "No output frame supplied.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
apv->output_frame = output;
|
||||
|
||||
// Each component within a tile is independent of every other,
|
||||
// so we can decode all in parallel.
|
||||
job_count = tile_info->num_tiles * apv_cbc->num_comp;
|
||||
|
||||
avctx->execute2(avctx, apv_decode_tile_component,
|
||||
input, NULL, job_count);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apv_decode_metadata(AVCodecContext *avctx, AVFrame *frame,
|
||||
const APVRawMetadata *md)
|
||||
{
|
||||
int err;
|
||||
|
||||
for (int i = 0; i < md->metadata_count; i++) {
|
||||
const APVRawMetadataPayload *pl = &md->payloads[i];
|
||||
|
||||
switch (pl->payload_type) {
|
||||
case APV_METADATA_MDCV:
|
||||
{
|
||||
const APVRawMetadataMDCV *mdcv = &pl->mdcv;
|
||||
AVMasteringDisplayMetadata *mdm;
|
||||
|
||||
err = ff_decode_mastering_display_new(avctx, frame, &mdm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (mdm) {
|
||||
for (int i = 0; i < 3; i++) {
|
||||
mdm->display_primaries[i][0] =
|
||||
av_make_q(mdcv->primary_chromaticity_x[i], 1 << 16);
|
||||
mdm->display_primaries[i][1] =
|
||||
av_make_q(mdcv->primary_chromaticity_y[i], 1 << 16);
|
||||
}
|
||||
|
||||
mdm->white_point[0] =
|
||||
av_make_q(mdcv->white_point_chromaticity_x, 1 << 16);
|
||||
mdm->white_point[1] =
|
||||
av_make_q(mdcv->white_point_chromaticity_y, 1 << 16);
|
||||
|
||||
mdm->max_luminance =
|
||||
av_make_q(mdcv->max_mastering_luminance, 1 << 8);
|
||||
mdm->min_luminance =
|
||||
av_make_q(mdcv->min_mastering_luminance, 1 << 14);
|
||||
|
||||
mdm->has_primaries = 1;
|
||||
mdm->has_luminance = 1;
|
||||
}
|
||||
}
|
||||
break;
|
||||
case APV_METADATA_CLL:
|
||||
{
|
||||
const APVRawMetadataCLL *cll = &pl->cll;
|
||||
AVContentLightMetadata *clm;
|
||||
|
||||
err = ff_decode_content_light_new(avctx, frame, &clm);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
if (clm) {
|
||||
clm->MaxCLL = cll->max_cll;
|
||||
clm->MaxFALL = cll->max_fall;
|
||||
}
|
||||
}
|
||||
break;
|
||||
default:
|
||||
// Ignore other types of metadata.
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int apv_decode_frame(AVCodecContext *avctx, AVFrame *frame,
|
||||
int *got_frame, AVPacket *packet)
|
||||
{
|
||||
APVDecodeContext *apv = avctx->priv_data;
|
||||
CodedBitstreamFragment *au = &apv->au;
|
||||
int err;
|
||||
|
||||
err = ff_cbs_read_packet(apv->cbc, au, packet);
|
||||
if (err < 0) {
|
||||
av_log(avctx, AV_LOG_ERROR, "Failed to read packet.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
for (int i = 0; i < au->nb_units; i++) {
|
||||
CodedBitstreamUnit *pbu = &au->units[i];
|
||||
|
||||
switch (pbu->type) {
|
||||
case APV_PBU_PRIMARY_FRAME:
|
||||
err = apv_decode(avctx, frame, pbu->content);
|
||||
if (err < 0)
|
||||
return err;
|
||||
*got_frame = 1;
|
||||
break;
|
||||
case APV_PBU_METADATA:
|
||||
apv_decode_metadata(avctx, frame, pbu->content);
|
||||
break;
|
||||
case APV_PBU_NON_PRIMARY_FRAME:
|
||||
case APV_PBU_PREVIEW_FRAME:
|
||||
case APV_PBU_DEPTH_FRAME:
|
||||
case APV_PBU_ALPHA_FRAME:
|
||||
if (!avctx->internal->is_copy &&
|
||||
!apv->warned_additional_frames) {
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Stream contains additional non-primary frames "
|
||||
"which will be ignored by the decoder.\n");
|
||||
apv->warned_additional_frames = 1;
|
||||
}
|
||||
break;
|
||||
case APV_PBU_ACCESS_UNIT_INFORMATION:
|
||||
case APV_PBU_FILLER:
|
||||
// Not relevant to the decoder.
|
||||
break;
|
||||
default:
|
||||
if (!avctx->internal->is_copy &&
|
||||
!apv->warned_unknown_pbu_types) {
|
||||
av_log(avctx, AV_LOG_WARNING,
|
||||
"Stream contains PBUs with unknown types "
|
||||
"which will be ignored by the decoder.\n");
|
||||
apv->warned_unknown_pbu_types = 1;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ff_cbs_fragment_reset(au);
|
||||
|
||||
return packet->size;
|
||||
}
|
||||
|
||||
const FFCodec ff_apv_decoder = {
|
||||
.p.name = "apv",
|
||||
CODEC_LONG_NAME("Advanced Professional Video"),
|
||||
.p.type = AVMEDIA_TYPE_VIDEO,
|
||||
.p.id = AV_CODEC_ID_APV,
|
||||
.priv_data_size = sizeof(APVDecodeContext),
|
||||
.init = apv_decode_init,
|
||||
.close = apv_decode_close,
|
||||
FF_CODEC_DECODE_CB(apv_decode_frame),
|
||||
.p.capabilities = AV_CODEC_CAP_DR1 |
|
||||
AV_CODEC_CAP_SLICE_THREADS |
|
||||
AV_CODEC_CAP_FRAME_THREADS,
|
||||
};
|
Reference in New Issue
Block a user