1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-15 14:13:16 +02:00

h264: K&R formatting cosmetics for header files (part I/II)

This commit is contained in:
Diego Biurrun
2012-05-07 14:13:23 +02:00
parent d55961fa82
commit be545b8a34
3 changed files with 491 additions and 458 deletions

View File

@@ -150,7 +150,6 @@ typedef enum {
* Sequence parameter set
*/
typedef struct SPS {
int profile_idc;
int level_idc;
int chroma_format_idc;
@@ -380,8 +379,8 @@ typedef struct H264Context{
unsigned int list_count;
uint8_t *list_counts; ///< Array of list_count per MB specifying the slice type
Picture ref_list[2][48]; /**< 0..15: frame refs, 16..47: mbaff field refs.
Reordered version of default_ref_list
according to picture reordering in slice header */
* Reordered version of default_ref_list
* according to picture reordering in slice header */
int ref2frm[MAX_SLICES][2][64]; ///< reference to frame number lists, used in the loop filter, the first 2 are for -2,-1
// data partitioning
@@ -459,7 +458,6 @@ typedef struct H264Context{
uint16_t *slice_table_base;
// POC stuff
int poc_lsb;
int poc_msb;
@@ -582,7 +580,6 @@ typedef struct H264Context{
int cur_chroma_format_idc;
} H264Context;
extern const uint8_t ff_h264_chroma_qp[3][QP_MAX_NUM + 1]; ///< One chroma qp table for each supported bit depth (8, 9, 10).
extern const uint16_t ff_h264_mb_sizes[4];
@@ -610,13 +607,16 @@ int ff_h264_decode_picture_parameter_set(H264Context *h, int bit_length);
* Decode a network abstraction layer unit.
* @param consumed is the number of bytes used as input
* @param length is the length of the array
* @param dst_length is the number of decoded bytes FIXME here or a decode rbsp tailing?
* @param dst_length is the number of decoded bytes FIXME here
* or a decode rbsp tailing?
* @return decoded bytes, might be src+1 if no escapes
*/
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src, int *dst_length, int *consumed, int length);
const uint8_t *ff_h264_decode_nal(H264Context *h, const uint8_t *src,
int *dst_length, int *consumed, int length);
/**
* Free any data that may have been allocated in the H264 context like SPS, PPS etc.
* Free any data that may have been allocated in the H264 context
* like SPS, PPS etc.
*/
av_cold void ff_h264_free_context(H264Context *h);
@@ -649,14 +649,15 @@ int ff_h264_decode_ref_pic_marking(H264Context *h, GetBitContext *gb);
void ff_generate_sliding_window_mmcos(H264Context *h);
/**
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
* Check if the top & left blocks are available if needed & change the
* dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra4x4_pred_mode(H264Context *h);
/**
* Check if the top & left blocks are available if needed & change the dc mode so it only uses the available blocks.
* Check if the top & left blocks are available if needed & change the
* dc mode so it only uses the available blocks.
*/
int ff_h264_check_intra_pred_mode(H264Context *h, int mode, int is_chroma);
@@ -668,13 +669,13 @@ av_cold void ff_h264_decode_init_vlc(void);
/**
* Decode a macroblock
* @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR if an error is noticed
* @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
*/
int ff_h264_decode_mb_cavlc(H264Context *h);
/**
* Decode a CABAC coded macroblock
* @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR if an error is noticed
* @return 0 if OK, ER_AC_ERROR / ER_DC_ERROR / ER_MV_ERROR on error
*/
int ff_h264_decode_mb_cabac(H264Context *h);
@@ -684,8 +685,12 @@ void ff_h264_direct_dist_scale_factor(H264Context * const h);
void ff_h264_direct_ref_list_init(H264Context *const h);
void ff_h264_pred_direct_motion(H264Context *const h, int *mb_type);
void ff_h264_filter_mb_fast( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize);
void ff_h264_filter_mb_fast(H264Context *h, int mb_x, int mb_y,
uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
unsigned int linesize, unsigned int uvlinesize);
void ff_h264_filter_mb(H264Context *h, int mb_x, int mb_y,
uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr,
unsigned int linesize, unsigned int uvlinesize);
/**
* Reset SEI values at the beginning of the frame.
@@ -694,15 +699,14 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
*/
void ff_h264_reset_sei(H264Context *h);
/*
o-o o-o
/ / /
o-o o-o
,---'
o-o o-o
/ / /
o-o o-o
* o-o o-o
* / / /
* o-o o-o
* ,---'
* o-o o-o
* / / /
* o-o o-o
*/
/* Scan8 organization:
@@ -745,7 +749,8 @@ static const uint8_t scan8[16*3 + 3]={
0 + 0 * 8, 0 + 5 * 8, 0 + 10 * 8
};
static av_always_inline uint32_t pack16to32(int a, int b){
static av_always_inline uint32_t pack16to32(int a, int b)
{
#if HAVE_BIGENDIAN
return (b & 0xFFFF) + (a << 16);
#else
@@ -753,7 +758,8 @@ static av_always_inline uint32_t pack16to32(int a, int b){
#endif
}
static av_always_inline uint16_t pack8to16(int a, int b){
static av_always_inline uint16_t pack8to16(int a, int b)
{
#if HAVE_BIGENDIAN
return (b & 0xFF) + (a << 8);
#else
@@ -764,14 +770,16 @@ static av_always_inline uint16_t pack8to16(int a, int b){
/**
* Get the chroma qp.
*/
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale){
static av_always_inline int get_chroma_qp(H264Context *h, int t, int qscale)
{
return h->pps.chroma_qp_table[t][qscale];
}
/**
* Get the predicted intra4x4 prediction mode.
*/
static av_always_inline int pred_intra_mode(H264Context *h, int n){
static av_always_inline int pred_intra_mode(H264Context *h, int n)
{
const int index8 = scan8[n];
const int left = h->intra4x4_pred_mode_cache[index8 - 1];
const int top = h->intra4x4_pred_mode_cache[index8 - 8];
@@ -779,11 +787,14 @@ static av_always_inline int pred_intra_mode(H264Context *h, int n){
tprintf(h->s.avctx, "mode:%d %d min:%d\n", left, top, min);
if(min<0) return DC_PRED;
else return min;
if (min < 0)
return DC_PRED;
else
return min;
}
static av_always_inline void write_back_intra_pred_mode(H264Context *h){
static av_always_inline void write_back_intra_pred_mode(H264Context *h)
{
int8_t *i4x4 = h->intra4x4_pred_mode + h->mb2br_xy[h->mb_xy];
int8_t *i4x4_cache = h->intra4x4_pred_mode_cache;
@@ -793,7 +804,8 @@ static av_always_inline void write_back_intra_pred_mode(H264Context *h){
i4x4[6] = i4x4_cache[7 + 8 * 1];
}
static av_always_inline void write_back_non_zero_count(H264Context *h){
static av_always_inline void write_back_non_zero_count(H264Context *h)
{
const int mb_xy = h->mb_xy;
uint8_t *nnz = h->non_zero_count[mb_xy];
uint8_t *nnz_cache = h->non_zero_count_cache;
@@ -815,8 +827,11 @@ static av_always_inline void write_back_non_zero_count(H264Context *h){
}
}
static av_always_inline void write_back_motion_list(H264Context *h, MpegEncContext * const s, int b_stride,
int b_xy, int b8_xy, int mb_type, int list )
static av_always_inline void write_back_motion_list(H264Context *h,
MpegEncContext *const s,
int b_stride,
int b_xy, int b8_xy,
int mb_type, int list)
{
int16_t(*mv_dst)[2] = &s->current_picture.f.motion_val[list][b_xy];
int16_t(*mv_src)[2] = &h->mv_cache[list][scan8[0]];
@@ -825,11 +840,12 @@ static av_always_inline void write_back_motion_list(H264Context *h, MpegEncConte
AV_COPY128(mv_dst + 2 * b_stride, mv_src + 8 * 2);
AV_COPY128(mv_dst + 3 * b_stride, mv_src + 8 * 3);
if (CABAC) {
uint8_t (*mvd_dst)[2] = &h->mvd_table[list][FMO ? 8*h->mb_xy : h->mb2br_xy[h->mb_xy]];
uint8_t (*mvd_dst)[2] = &h->mvd_table[list][FMO ? 8 * h->mb_xy
: h->mb2br_xy[h->mb_xy]];
uint8_t(*mvd_src)[2] = &h->mvd_cache[list][scan8[0]];
if(IS_SKIP(mb_type))
if (IS_SKIP(mb_type)) {
AV_ZERO128(mvd_dst);
else{
} else {
AV_COPY64(mvd_dst, mvd_src + 8 * 3);
AV_COPY16(mvd_dst + 3 + 3, mvd_src + 3 + 8 * 0);
AV_COPY16(mvd_dst + 3 + 2, mvd_src + 3 + 8 * 1);
@@ -847,7 +863,8 @@ static av_always_inline void write_back_motion_list(H264Context *h, MpegEncConte
}
}
static av_always_inline void write_back_motion(H264Context *h, int mb_type){
static av_always_inline void write_back_motion(H264Context *h, int mb_type)
{
MpegEncContext *const s = &h->s;
const int b_stride = h->b_stride;
const int b_xy = 4 * s->mb_x + 4 * s->mb_y * h->b_stride; // try mb2b(8)_xy
@@ -859,9 +876,8 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type){
fill_rectangle(&s->current_picture.f.ref_index[0][b8_xy],
2, 2, 2, (uint8_t)LIST_NOT_USED, 1);
}
if(USES_LIST(mb_type, 1)){
if (USES_LIST(mb_type, 1))
write_back_motion_list(h, s, b_stride, b_xy, b8_xy, mb_type, 1);
}
if (h->slice_type_nos == AV_PICTURE_TYPE_B && CABAC) {
if (IS_8X8(mb_type)) {
@@ -873,11 +889,16 @@ static av_always_inline void write_back_motion(H264Context *h, int mb_type){
}
}
static av_always_inline int get_dct8x8_allowed(H264Context *h){
static av_always_inline int get_dct8x8_allowed(H264Context *h)
{
if (h->sps.direct_8x8_inference_flag)
return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8 )*0x0001000100010001ULL));
return !(AV_RN64A(h->sub_mb_type) &
((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8) *
0x0001000100010001ULL));
else
return !(AV_RN64A(h->sub_mb_type) & ((MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_8x8|MB_TYPE_DIRECT2)*0x0001000100010001ULL));
return !(AV_RN64A(h->sub_mb_type) &
((MB_TYPE_16x8 | MB_TYPE_8x16 | MB_TYPE_8x8 | MB_TYPE_DIRECT2) *
0x0001000100010001ULL));
}
#endif /* AVCODEC_H264_H */

View File

@@ -30,13 +30,15 @@
#define AVCODEC_H264DATA_H
#include <stdint.h>
#include "libavutil/rational.h"
#include "mpegvideo.h"
#include "h264.h"
static const uint8_t golomb_to_pict_type[5]=
{AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_SP, AV_PICTURE_TYPE_SI};
static const uint8_t golomb_to_pict_type[5] = {
AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, AV_PICTURE_TYPE_I,
AV_PICTURE_TYPE_SP, AV_PICTURE_TYPE_SI
};
static const uint8_t golomb_to_intra4x4_cbp[48] = {
47, 31, 15, 0, 23, 27, 29, 30, 7, 11, 13, 14, 39, 43, 45, 46,
@@ -257,6 +259,7 @@ static const uint8_t dequant4_coeff_init[6][3]={
static const uint8_t dequant8_coeff_init_scan[16] = {
0, 3, 4, 3, 3, 1, 5, 1, 4, 5, 2, 5, 3, 1, 5, 1
};
static const uint8_t dequant8_coeff_init[6][6] = {
{ 20, 18, 32, 19, 25, 24 },
{ 22, 19, 35, 21, 28, 26 },

View File

@@ -95,14 +95,23 @@ typedef struct H264PredContext{
void(*pred8x8[4 + 3 + 4])(uint8_t *src, int stride);
void(*pred16x16[4 + 3 + 2])(uint8_t *src, int stride);
void (*pred4x4_add [2])(uint8_t *pix/*align 4*/, const DCTELEM *block/*align 16*/, int stride);
void (*pred8x8l_add [2])(uint8_t *pix/*align 8*/, const DCTELEM *block/*align 16*/, int stride);
void (*pred8x8_add [3])(uint8_t *pix/*align 8*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
void (*pred16x16_add[3])(uint8_t *pix/*align 16*/, const int *block_offset, const DCTELEM *block/*align 16*/, int stride);
void(*pred4x4_add[2])(uint8_t *pix /*align 4*/,
const DCTELEM *block /*align 16*/, int stride);
void(*pred8x8l_add[2])(uint8_t *pix /*align 8*/,
const DCTELEM *block /*align 16*/, int stride);
void(*pred8x8_add[3])(uint8_t *pix /*align 8*/,
const int *block_offset,
const DCTELEM *block /*align 16*/, int stride);
void(*pred16x16_add[3])(uint8_t *pix /*align 16*/,
const int *block_offset,
const DCTELEM *block /*align 16*/, int stride);
} H264PredContext;
void ff_h264_pred_init(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id, const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_arm(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
void ff_h264_pred_init_x86(H264PredContext *h, int codec_id,
const int bit_depth, const int chroma_format_idc);
#endif /* AVCODEC_H264PRED_H */