mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
h264: add a parameter to the CHROMA444 macro.
This way it does not look like a constant.
This commit is contained in:
parent
e962bd08ee
commit
23e85be58f
@ -2041,7 +2041,7 @@ static av_always_inline void backup_mb_border(H264Context *h, uint8_t *src_y,
|
|||||||
uint8_t *top_border;
|
uint8_t *top_border;
|
||||||
int top_idx = 1;
|
int top_idx = 1;
|
||||||
const int pixel_shift = h->pixel_shift;
|
const int pixel_shift = h->pixel_shift;
|
||||||
int chroma444 = CHROMA444;
|
int chroma444 = CHROMA444(h);
|
||||||
int chroma422 = CHROMA422(h);
|
int chroma422 = CHROMA422(h);
|
||||||
|
|
||||||
src_y -= linesize;
|
src_y -= linesize;
|
||||||
@ -2438,7 +2438,7 @@ void ff_h264_hl_decode_mb(H264Context *h)
|
|||||||
const int mb_type = h->cur_pic.mb_type[mb_xy];
|
const int mb_type = h->cur_pic.mb_type[mb_xy];
|
||||||
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
|
int is_complex = CONFIG_SMALL || h->is_complex || IS_INTRA_PCM(mb_type) || h->qscale == 0;
|
||||||
|
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
if (is_complex || h->pixel_shift)
|
if (is_complex || h->pixel_shift)
|
||||||
hl_decode_mb_444_complex(h);
|
hl_decode_mb_444_complex(h);
|
||||||
else
|
else
|
||||||
@ -2926,7 +2926,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
|
|||||||
{
|
{
|
||||||
switch (h->sps.bit_depth_luma) {
|
switch (h->sps.bit_depth_luma) {
|
||||||
case 9:
|
case 9:
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||||
return AV_PIX_FMT_GBRP9;
|
return AV_PIX_FMT_GBRP9;
|
||||||
} else
|
} else
|
||||||
@ -2937,7 +2937,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
|
|||||||
return AV_PIX_FMT_YUV420P9;
|
return AV_PIX_FMT_YUV420P9;
|
||||||
break;
|
break;
|
||||||
case 10:
|
case 10:
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||||
return AV_PIX_FMT_GBRP10;
|
return AV_PIX_FMT_GBRP10;
|
||||||
} else
|
} else
|
||||||
@ -2948,7 +2948,7 @@ static enum AVPixelFormat get_pixel_format(H264Context *h)
|
|||||||
return AV_PIX_FMT_YUV420P10;
|
return AV_PIX_FMT_YUV420P10;
|
||||||
break;
|
break;
|
||||||
case 8:
|
case 8:
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
if (h->avctx->colorspace == AVCOL_SPC_RGB) {
|
||||||
return AV_PIX_FMT_GBRP;
|
return AV_PIX_FMT_GBRP;
|
||||||
} else
|
} else
|
||||||
@ -3190,7 +3190,7 @@ static int decode_slice_header(H264Context *h, H264Context *h0)
|
|||||||
|
|
||||||
h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
|
h->chroma_y_shift = h->sps.chroma_format_idc <= 1; // 400 uses yuv420p
|
||||||
|
|
||||||
h->width = 16 * h->mb_width - (2 >> CHROMA444) * FFMIN(h->sps.crop_right, (8 << CHROMA444) - 1);
|
h->width = 16 * h->mb_width - (2 >> CHROMA444(h)) * FFMIN(h->sps.crop_right, (8 << CHROMA444(h)) - 1);
|
||||||
if (h->sps.frame_mbs_only_flag)
|
if (h->sps.frame_mbs_only_flag)
|
||||||
h->height = 16 * h->mb_height - (1 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1);
|
h->height = 16 * h->mb_height - (1 << h->chroma_y_shift) * FFMIN(h->sps.crop_bottom, (16 >> h->chroma_y_shift) - 1);
|
||||||
else
|
else
|
||||||
@ -4001,10 +4001,10 @@ static void loop_filter(H264Context *h, int start_x, int end_x)
|
|||||||
dest_y = h->cur_pic.f.data[0] +
|
dest_y = h->cur_pic.f.data[0] +
|
||||||
((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
|
((mb_x << pixel_shift) + mb_y * h->linesize) * 16;
|
||||||
dest_cb = h->cur_pic.f.data[1] +
|
dest_cb = h->cur_pic.f.data[1] +
|
||||||
(mb_x << pixel_shift) * (8 << CHROMA444) +
|
(mb_x << pixel_shift) * (8 << CHROMA444(h)) +
|
||||||
mb_y * h->uvlinesize * block_h;
|
mb_y * h->uvlinesize * block_h;
|
||||||
dest_cr = h->cur_pic.f.data[2] +
|
dest_cr = h->cur_pic.f.data[2] +
|
||||||
(mb_x << pixel_shift) * (8 << CHROMA444) +
|
(mb_x << pixel_shift) * (8 << CHROMA444(h)) +
|
||||||
mb_y * h->uvlinesize * block_h;
|
mb_y * h->uvlinesize * block_h;
|
||||||
// FIXME simplify above
|
// FIXME simplify above
|
||||||
|
|
||||||
|
@ -86,7 +86,7 @@
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
#define CHROMA422(h) (h->sps.chroma_format_idc == 2)
|
#define CHROMA422(h) (h->sps.chroma_format_idc == 2)
|
||||||
#define CHROMA444 (h->sps.chroma_format_idc == 3)
|
#define CHROMA444(h) (h->sps.chroma_format_idc == 3)
|
||||||
|
|
||||||
#define EXTENDED_SAR 255
|
#define EXTENDED_SAR 255
|
||||||
|
|
||||||
|
@ -1812,7 +1812,7 @@ static av_always_inline void decode_cabac_residual_nondc(H264Context *h,
|
|||||||
int max_coeff)
|
int max_coeff)
|
||||||
{
|
{
|
||||||
/* read coded block flag */
|
/* read coded block flag */
|
||||||
if( (cat != 5 || CHROMA444) && get_cabac( &h->cabac, &h->cabac_state[get_cabac_cbf_ctx( h, cat, n, max_coeff, 0 ) ] ) == 0 ) {
|
if( (cat != 5 || CHROMA444(h)) && get_cabac( &h->cabac, &h->cabac_state[get_cabac_cbf_ctx( h, cat, n, max_coeff, 0 ) ] ) == 0 ) {
|
||||||
if( max_coeff == 64 ) {
|
if( max_coeff == 64 ) {
|
||||||
fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, 0, 1);
|
fill_rectangle(&h->non_zero_count_cache[scan8[n]], 2, 2, 8, 0, 1);
|
||||||
} else {
|
} else {
|
||||||
@ -2289,7 +2289,7 @@ decode_intra_mb:
|
|||||||
|
|
||||||
/* It would be better to do this in fill_decode_caches, but we don't know
|
/* It would be better to do this in fill_decode_caches, but we don't know
|
||||||
* the transform mode of the current macroblock there. */
|
* the transform mode of the current macroblock there. */
|
||||||
if (CHROMA444 && IS_8x8DCT(mb_type)){
|
if (CHROMA444(h) && IS_8x8DCT(mb_type)){
|
||||||
int i;
|
int i;
|
||||||
uint8_t *nnz_cache = h->non_zero_count_cache;
|
uint8_t *nnz_cache = h->non_zero_count_cache;
|
||||||
for (i = 0; i < 2; i++){
|
for (i = 0; i < 2; i++){
|
||||||
@ -2354,7 +2354,7 @@ decode_intra_mb:
|
|||||||
h->last_qscale_diff=0;
|
h->last_qscale_diff=0;
|
||||||
|
|
||||||
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 0);
|
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 0);
|
||||||
if(CHROMA444){
|
if (CHROMA444(h)) {
|
||||||
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 1);
|
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 1);
|
||||||
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 2);
|
decode_cabac_luma_residual(h, scan, scan8x8, pixel_shift, mb_type, cbp, 2);
|
||||||
} else if (CHROMA422(h)) {
|
} else if (CHROMA422(h)) {
|
||||||
|
@ -1106,7 +1106,7 @@ decode_intra_mb:
|
|||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
h->cbp_table[mb_xy] |= ret << 12;
|
h->cbp_table[mb_xy] |= ret << 12;
|
||||||
if(CHROMA444){
|
if (CHROMA444(h)) {
|
||||||
if( decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 1) < 0 ){
|
if( decode_luma_residual(h, gb, scan, scan8x8, pixel_shift, mb_type, cbp, 1) < 0 ){
|
||||||
return -1;
|
return -1;
|
||||||
}
|
}
|
||||||
|
@ -245,7 +245,7 @@ static av_always_inline void h264_filter_mb_fast_internal(H264Context *h,
|
|||||||
int pixel_shift)
|
int pixel_shift)
|
||||||
{
|
{
|
||||||
int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
|
int chroma = !(CONFIG_GRAY && (h->flags&CODEC_FLAG_GRAY));
|
||||||
int chroma444 = CHROMA444;
|
int chroma444 = CHROMA444(h);
|
||||||
int chroma422 = CHROMA422(h);
|
int chroma422 = CHROMA422(h);
|
||||||
|
|
||||||
int mb_xy = h->mb_xy;
|
int mb_xy = h->mb_xy;
|
||||||
@ -466,7 +466,7 @@ static int check_mv(H264Context *h, long b_idx, long bn_idx, int mvy_limit){
|
|||||||
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) {
|
static av_always_inline void filter_mb_dir(H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint8_t *img_cb, uint8_t *img_cr, unsigned int linesize, unsigned int uvlinesize, int mb_xy, int mb_type, int mvy_limit, int first_vertical_edge_done, int a, int b, int chroma, int dir) {
|
||||||
int edge;
|
int edge;
|
||||||
int chroma_qp_avg[2];
|
int chroma_qp_avg[2];
|
||||||
int chroma444 = CHROMA444;
|
int chroma444 = CHROMA444(h);
|
||||||
int chroma422 = CHROMA422(h);
|
int chroma422 = CHROMA422(h);
|
||||||
const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
|
const int mbm_xy = dir == 0 ? mb_xy -1 : h->top_mb_xy;
|
||||||
const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type;
|
const int mbm_type = dir == 0 ? h->left_type[LTOP] : h->top_type;
|
||||||
@ -779,7 +779,7 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
|||||||
filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_y , linesize, bS , 1, qp [0], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_y + 8* linesize, linesize, bS+4, 1, qp [1], a, b, 1 );
|
||||||
if (chroma){
|
if (chroma){
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cb, uvlinesize, bS , 1, bqp[0], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cb + 8*uvlinesize, uvlinesize, bS+4, 1, bqp[1], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cr, uvlinesize, bS , 1, rqp[0], a, b, 1 );
|
||||||
@ -800,7 +800,7 @@ void ff_h264_filter_mb( H264Context *h, int mb_x, int mb_y, uint8_t *img_y, uint
|
|||||||
filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_y , 2* linesize, bS , 2, qp [0], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_y + linesize, 2* linesize, bS+1, 2, qp [1], a, b, 1 );
|
||||||
if (chroma){
|
if (chroma){
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cb, 2*uvlinesize, bS , 2, bqp[0], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cb + uvlinesize, 2*uvlinesize, bS+1, 2, bqp[1], a, b, 1 );
|
||||||
filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
|
filter_mb_mbaff_edgev ( h, img_cr, 2*uvlinesize, bS , 2, rqp[0], a, b, 1 );
|
||||||
|
@ -556,7 +556,7 @@ static void fill_decode_caches(H264Context *h, int mb_type)
|
|||||||
nnz = h->non_zero_count[left_xy[LEFT(i)]];
|
nnz = h->non_zero_count[left_xy[LEFT(i)]];
|
||||||
nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
|
nnz_cache[3 + 8 * 1 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i]];
|
||||||
nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
|
nnz_cache[3 + 8 * 2 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i]];
|
||||||
if (CHROMA444) {
|
if (CHROMA444(h)) {
|
||||||
nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
|
nnz_cache[3 + 8 * 6 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 4 * 4];
|
||||||
nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
|
nnz_cache[3 + 8 * 7 + 2 * 8 * i] = nnz[left_block[8 + 1 + 2 * i] + 4 * 4];
|
||||||
nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
|
nnz_cache[3 + 8 * 11 + 2 * 8 * i] = nnz[left_block[8 + 0 + 2 * i] + 8 * 4];
|
||||||
|
Loading…
Reference in New Issue
Block a user