1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2025-08-04 22:03:09 +02:00

avcodec/mpegvideoenc: Add MPVEncContext

Many of the fields of MpegEncContext (which is also used by decoders)
are actually only used by encoders. Therefore this commit adds
a new encoder-only structure and moves all of the encoder-only
fields to it except for those which require more explicit
synchronisation between the main slice context and the other
slice contexts. This synchronisation is currently mainly provided
by ff_update_thread_context() which simply copies most of
the main slice context over the other slice contexts. Fields
which are moved to the new MPVEncContext no longer participate
in this (which is desired, because it is horrible and for the
fields b) below wasteful) which means that some fields can only
be moved when explicit synchronisation code is added in later commits.

More explicitly, this commit moves the following fields:
a) Fields not copied by ff_update_duplicate_context():
dct_error_sum and dct_count; the former does not need synchronisation,
the latter is synchronised in merge_context_after_encode().
b) Fields which do not change after initialisation (these fields
could also be put into MPVMainEncContext at the cost of
an indirection to access them): lambda_table, adaptive_quant,
{luma,chroma}_elim_threshold, new_pic, fdsp, mpvencdsp, pdsp,
{p,b_forw,b_back,b_bidir_forw,b_bidir_back,b_direct,b_field}_mv_table,
[pb]_field_select_table, mb_{type,var,mean}, mc_mb_var, {min,max}_qcoeff,
{inter,intra}_quant_bias, ac_esc_length, the *_vlc_length fields,
the q_{intra,inter,chroma_intra}_matrix{,16}, dct_offset, mb_info,
mjpeg_ctx, rtp_mode, rtp_payload_size, encode_mb, all function
pointers, mpv_flags, quantizer_noise_shaping,
frame_reconstruction_bitfield, error_rate and intra_penalty.
c) Fields which are already (re)set explicitly: The PutBitContexts
pb, tex_pb, pb2; dquant, skipdct, encoding_error, the statistics
fields {mv,i_tex,p_tex,misc,last}_bits and i_count; last_mv_dir,
esc_pos (reset when writing the header).
d) Fields which are only used by encoders not supporting slice
threading for which synchronisation doesn't matter: esc3_level_length
and the remaining mb_info fields.
e) coded_score: This field is only really used when FF_MPV_FLAG_CBP_RD
is set (which implies trellis) and even then it is only used for
non-intra blocks. For these blocks dct_quantize_trellis_c() either
sets coded_score[n] or returns a last_non_zero value of -1
in which case coded_score will be reset in encode_mb_internal().
Therefore no old values are ever used.

The MotionEstContext has not been moved yet.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
This commit is contained in:
Andreas Rheinhardt
2025-03-19 08:11:01 +01:00
parent d74d1707a6
commit a064d34a32
52 changed files with 3044 additions and 3032 deletions

View File

@ -21,66 +21,66 @@
#include "config.h" #include "config.h"
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavutil/aarch64/cpu.h" #include "libavutil/aarch64/cpu.h"
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideoenc.h"
int ff_pix_abs16_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs16_neon(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_xy2_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs16_xy2_neon(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_x2_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_y2_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_neon(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs8_neon(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int sse16_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int sse16_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int sse8_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int sse8_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int sse4_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int sse4_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsad16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int vsad16_neon(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsad_intra16_neon(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, int vsad_intra16_neon(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy,
ptrdiff_t stride, int h) ; ptrdiff_t stride, int h) ;
int vsad_intra8_neon(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, int vsad_intra8_neon(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy,
ptrdiff_t stride, int h) ; ptrdiff_t stride, int h) ;
int vsse16_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int vsse16_neon(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsse_intra16_neon(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, int vsse_intra16_neon(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int nsse16_neon(int multiplier, const uint8_t *s, const uint8_t *s2, int nsse16_neon(int multiplier, const uint8_t *s, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int nsse16_neon_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int nsse16_neon_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int pix_median_abs16_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int pix_median_abs16_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int pix_median_abs8_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int pix_median_abs8_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_x2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_x2_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_y2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_y2_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_xy2_neon(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_xy2_neon(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int nsse8_neon(int multiplier, const uint8_t *s, const uint8_t *s2, int nsse8_neon(int multiplier, const uint8_t *s, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int nsse8_neon_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int nsse8_neon_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsse8_neon(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int vsse8_neon(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsse_intra8_neon(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, int vsse_intra8_neon(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
#if HAVE_DOTPROD #if HAVE_DOTPROD
int sse16_neon_dotprod(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int sse16_neon_dotprod(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int vsse_intra16_neon_dotprod(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int vsse_intra16_neon_dotprod(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
#endif #endif
@ -129,20 +129,20 @@ av_cold void ff_me_cmp_init_aarch64(MECmpContext *c, AVCodecContext *avctx)
#endif #endif
} }
int nsse16_neon_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int nsse16_neon_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
if (c) if (c)
return nsse16_neon(c->avctx->nsse_weight, s1, s2, stride, h); return nsse16_neon(c->c.avctx->nsse_weight, s1, s2, stride, h);
else else
return nsse16_neon(8, s1, s2, stride, h); return nsse16_neon(8, s1, s2, stride, h);
} }
int nsse8_neon_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, int nsse8_neon_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
if (c) if (c)
return nsse8_neon(c->avctx->nsse_weight, s1, s2, stride, h); return nsse8_neon(c->c.avctx->nsse_weight, s1, s2, stride, h);
else else
return nsse8_neon(8, s1, s2, stride, h); return nsse8_neon(8, s1, s2, stride, h);
} }

View File

@ -23,19 +23,18 @@
#include "libavutil/arm/cpu.h" #include "libavutil/arm/cpu.h"
#include "libavcodec/avcodec.h" #include "libavcodec/avcodec.h"
#include "libavcodec/me_cmp.h" #include "libavcodec/me_cmp.h"
#include "libavcodec/mpegvideo.h"
int ff_pix_abs16_armv6(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs16_armv6(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_x2_armv6(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs16_x2_armv6(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_y2_armv6(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs16_y2_armv6(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_armv6(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_pix_abs8_armv6(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse16_armv6(MpegEncContext *s, const uint8_t *blk1, const uint8_t *blk2, int ff_sse16_armv6(MPVEncContext *s, const uint8_t *blk1, const uint8_t *blk2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx) av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)

View File

@ -117,12 +117,12 @@ void dnxhd_10bit_get_pixels_8x4_sym(int16_t *restrict block,
memcpy(block + 4 * 8, pixels + 3 * line_size, 8 * sizeof(*block)); memcpy(block + 4 * 8, pixels + 3 * line_size, 8 * sizeof(*block));
} }
static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block, static int dnxhd_10bit_dct_quantize_444(MPVEncContext *ctx, int16_t *block,
int n, int qscale, int *overflow) int n, int qscale, int *overflow)
{ {
int i, j, level, last_non_zero, start_i; int i, j, level, last_non_zero, start_i;
const int *qmat; const int *qmat;
const uint8_t *scantable= ctx->intra_scantable.scantable; const uint8_t *scantable = ctx->c.intra_scantable.scantable;
int bias; int bias;
int max = 0; int max = 0;
unsigned int threshold1, threshold2; unsigned int threshold1, threshold2;
@ -169,17 +169,17 @@ static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block,
*overflow = ctx->max_qcoeff < max; //overflow might have happened *overflow = ctx->max_qcoeff < max; //overflow might have happened
/* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
if (ctx->idsp.perm_type != FF_IDCT_PERM_NONE) if (ctx->c.idsp.perm_type != FF_IDCT_PERM_NONE)
ff_block_permute(block, ctx->idsp.idct_permutation, ff_block_permute(block, ctx->c.idsp.idct_permutation,
scantable, last_non_zero); scantable, last_non_zero);
return last_non_zero; return last_non_zero;
} }
static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block, static int dnxhd_10bit_dct_quantize(MPVEncContext *ctx, int16_t *block,
int n, int qscale, int *overflow) int n, int qscale, int *overflow)
{ {
const uint8_t *scantable= ctx->intra_scantable.scantable; const uint8_t *scantable = ctx->c.intra_scantable.scantable;
const int *qmat = n<4 ? ctx->q_intra_matrix[qscale] : ctx->q_chroma_intra_matrix[qscale]; const int *qmat = n<4 ? ctx->q_intra_matrix[qscale] : ctx->q_chroma_intra_matrix[qscale];
int last_non_zero = 0; int last_non_zero = 0;
int i; int i;
@ -200,8 +200,8 @@ static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block,
} }
/* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */ /* we need this permutation so that we correct the IDCT, we only permute the !=0 elements */
if (ctx->idsp.perm_type != FF_IDCT_PERM_NONE) if (ctx->c.idsp.perm_type != FF_IDCT_PERM_NONE)
ff_block_permute(block, ctx->idsp.idct_permutation, ff_block_permute(block, ctx->c.idsp.idct_permutation,
scantable, last_non_zero); scantable, last_non_zero);
return last_non_zero; return last_non_zero;
@ -266,34 +266,33 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
{ {
// init first elem to 1 to avoid div by 0 in convert_matrix // init first elem to 1 to avoid div by 0 in convert_matrix
uint16_t weight_matrix[64] = { 1, }; // convert_matrix needs uint16_t* uint16_t weight_matrix[64] = { 1, }; // convert_matrix needs uint16_t*
int qscale, i;
const uint8_t *luma_weight_table = ctx->cid_table->luma_weight; const uint8_t *luma_weight_table = ctx->cid_table->luma_weight;
const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight; const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
if (!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l, ctx->m.avctx->qmax + 1) || if (!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l, ctx->m.c.avctx->qmax + 1) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c, ctx->m.avctx->qmax + 1) || !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c, ctx->m.c.avctx->qmax + 1) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l16, ctx->m.avctx->qmax + 1) || !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l16, ctx->m.c.avctx->qmax + 1) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c16, ctx->m.avctx->qmax + 1)) !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c16, ctx->m.c.avctx->qmax + 1))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (ctx->bit_depth == 8) { if (ctx->bit_depth == 8) {
for (i = 1; i < 64; i++) { for (int i = 1; i < 64; i++) {
int j = ctx->m.idsp.idct_permutation[ff_zigzag_direct[i]]; int j = ctx->m.c.idsp.idct_permutation[ff_zigzag_direct[i]];
weight_matrix[j] = ctx->cid_table->luma_weight[i]; weight_matrix[j] = ctx->cid_table->luma_weight[i];
} }
ff_convert_matrix(&ctx->m, ctx->qmatrix_l, ctx->qmatrix_l16, ff_convert_matrix(&ctx->m, ctx->qmatrix_l, ctx->qmatrix_l16,
weight_matrix, ctx->intra_quant_bias, 1, weight_matrix, ctx->intra_quant_bias, 1,
ctx->m.avctx->qmax, 1); ctx->m.c.avctx->qmax, 1);
for (i = 1; i < 64; i++) { for (int i = 1; i < 64; i++) {
int j = ctx->m.idsp.idct_permutation[ff_zigzag_direct[i]]; int j = ctx->m.c.idsp.idct_permutation[ff_zigzag_direct[i]];
weight_matrix[j] = ctx->cid_table->chroma_weight[i]; weight_matrix[j] = ctx->cid_table->chroma_weight[i];
} }
ff_convert_matrix(&ctx->m, ctx->qmatrix_c, ctx->qmatrix_c16, ff_convert_matrix(&ctx->m, ctx->qmatrix_c, ctx->qmatrix_c16,
weight_matrix, ctx->intra_quant_bias, 1, weight_matrix, ctx->intra_quant_bias, 1,
ctx->m.avctx->qmax, 1); ctx->m.c.avctx->qmax, 1);
for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { for (int qscale = 1; qscale <= ctx->m.c.avctx->qmax; qscale++) {
for (i = 0; i < 64; i++) { for (int i = 0; i < 64; i++) {
ctx->qmatrix_l[qscale][i] <<= 2; ctx->qmatrix_l[qscale][i] <<= 2;
ctx->qmatrix_c[qscale][i] <<= 2; ctx->qmatrix_c[qscale][i] <<= 2;
ctx->qmatrix_l16[qscale][0][i] <<= 2; ctx->qmatrix_l16[qscale][0][i] <<= 2;
@ -304,8 +303,8 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
} }
} else { } else {
// 10-bit // 10-bit
for (qscale = 1; qscale <= ctx->m.avctx->qmax; qscale++) { for (int qscale = 1; qscale <= ctx->m.c.avctx->qmax; qscale++) {
for (i = 1; i < 64; i++) { for (int i = 1; i < 64; i++) {
int j = ff_zigzag_direct[i]; int j = ff_zigzag_direct[i];
/* The quantization formula from the VC-3 standard is: /* The quantization formula from the VC-3 standard is:
@ -337,12 +336,12 @@ static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx) static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
{ {
if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_rc, (ctx->m.avctx->qmax + 1) * ctx->m.mb_num)) if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_rc, (ctx->m.c.avctx->qmax + 1) * ctx->m.c.mb_num))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (ctx->m.avctx->mb_decision != FF_MB_DECISION_RD) { if (ctx->m.c.avctx->mb_decision != FF_MB_DECISION_RD) {
if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp, ctx->m.mb_num) || if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp, ctx->m.c.mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp_tmp, ctx->m.mb_num)) !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_cmp_tmp, ctx->m.c.mb_num))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
} }
ctx->frame_bits = (ctx->coding_unit_size - ctx->frame_bits = (ctx->coding_unit_size -
@ -414,21 +413,21 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
ctx->cid_table = ff_dnxhd_get_cid_table(ctx->cid); ctx->cid_table = ff_dnxhd_get_cid_table(ctx->cid);
av_assert0(ctx->cid_table); av_assert0(ctx->cid_table);
ctx->m.avctx = avctx; ctx->m.c.avctx = avctx;
ctx->m.mb_intra = 1; ctx->m.c.mb_intra = 1;
ctx->m.h263_aic = 1; ctx->m.c.h263_aic = 1;
avctx->bits_per_raw_sample = ctx->bit_depth; avctx->bits_per_raw_sample = ctx->bit_depth;
ff_blockdsp_init(&ctx->m.bdsp); ff_blockdsp_init(&ctx->m.c.bdsp);
ff_fdctdsp_init(&ctx->m.fdsp, avctx); ff_fdctdsp_init(&ctx->m.fdsp, avctx);
ff_mpv_idct_init(&ctx->m); ff_mpv_idct_init(&ctx->m.c);
ff_mpegvideoencdsp_init(&ctx->m.mpvencdsp, avctx); ff_mpegvideoencdsp_init(&ctx->m.mpvencdsp, avctx);
ff_pixblockdsp_init(&ctx->m.pdsp, avctx); ff_pixblockdsp_init(&ctx->m.pdsp, avctx);
ff_dct_encode_init(&ctx->m); ff_dct_encode_init(&ctx->m);
if (ctx->profile != AV_PROFILE_DNXHD) if (ctx->profile != AV_PROFILE_DNXHD)
ff_videodsp_init(&ctx->m.vdsp, ctx->bit_depth); ff_videodsp_init(&ctx->m.c.vdsp, ctx->bit_depth);
if (ctx->is_444 || ctx->profile == AV_PROFILE_DNXHR_HQX) { if (ctx->is_444 || ctx->profile == AV_PROFILE_DNXHR_HQX) {
ctx->m.dct_quantize = dnxhd_10bit_dct_quantize_444; ctx->m.dct_quantize = dnxhd_10bit_dct_quantize_444;
@ -445,12 +444,12 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
ff_dnxhdenc_init(ctx); ff_dnxhdenc_init(ctx);
ctx->m.mb_height = (avctx->height + 15) / 16; ctx->m.c.mb_height = (avctx->height + 15) / 16;
ctx->m.mb_width = (avctx->width + 15) / 16; ctx->m.c.mb_width = (avctx->width + 15) / 16;
if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) { if (avctx->flags & AV_CODEC_FLAG_INTERLACED_DCT) {
ctx->interlaced = 1; ctx->interlaced = 1;
ctx->m.mb_height /= 2; ctx->m.c.mb_height /= 2;
} }
if (ctx->interlaced && ctx->profile != AV_PROFILE_DNXHD) { if (ctx->interlaced && ctx->profile != AV_PROFILE_DNXHD) {
@ -459,7 +458,7 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
ctx->m.mb_num = ctx->m.mb_height * ctx->m.mb_width; ctx->m.c.mb_num = ctx->m.c.mb_height * ctx->m.c.mb_width;
if (ctx->cid_table->frame_size == DNXHD_VARIABLE) { if (ctx->cid_table->frame_size == DNXHD_VARIABLE) {
ctx->frame_size = ff_dnxhd_get_hr_frame_size(ctx->cid, ctx->frame_size = ff_dnxhd_get_hr_frame_size(ctx->cid,
@ -471,8 +470,8 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
ctx->coding_unit_size = ctx->cid_table->coding_unit_size; ctx->coding_unit_size = ctx->cid_table->coding_unit_size;
} }
if (ctx->m.mb_height > 68) if (ctx->m.c.mb_height > 68)
ctx->data_offset = 0x170 + (ctx->m.mb_height << 2); ctx->data_offset = 0x170 + (ctx->m.c.mb_height << 2);
else else
ctx->data_offset = 0x280; ctx->data_offset = 0x280;
@ -490,10 +489,10 @@ static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
if ((ret = dnxhd_init_rc(ctx)) < 0) if ((ret = dnxhd_init_rc(ctx)) < 0)
return ret; return ret;
if (!FF_ALLOCZ_TYPED_ARRAY(ctx->slice_size, ctx->m.mb_height) || if (!FF_ALLOCZ_TYPED_ARRAY(ctx->slice_size, ctx->m.c.mb_height) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->slice_offs, ctx->m.mb_height) || !FF_ALLOCZ_TYPED_ARRAY(ctx->slice_offs, ctx->m.c.mb_height) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_bits, ctx->m.mb_num) || !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_bits, ctx->m.c.mb_num) ||
!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_qscale, ctx->m.mb_num)) !FF_ALLOCZ_TYPED_ARRAY(ctx->mb_qscale, ctx->m.c.mb_num))
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
if (avctx->active_thread_type == FF_THREAD_SLICE) { if (avctx->active_thread_type == FF_THREAD_SLICE) {
@ -548,8 +547,8 @@ static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
buf[0x5f] = 0x01; // UDL buf[0x5f] = 0x01; // UDL
buf[0x167] = 0x02; // reserved buf[0x167] = 0x02; // reserved
AV_WB16(buf + 0x16a, ctx->m.mb_height * 4 + 4); // MSIPS AV_WB16(buf + 0x16a, ctx->m.c.mb_height * 4 + 4); // MSIPS
AV_WB16(buf + 0x16c, ctx->m.mb_height); // Ns AV_WB16(buf + 0x16c, ctx->m.c.mb_height); // Ns
buf[0x16f] = 0x10; // reserved buf[0x16f] = 0x10; // reserved
ctx->msip = buf + 0x170; ctx->msip = buf + 0x170;
@ -577,11 +576,11 @@ void dnxhd_encode_block(PutBitContext *pb, DNXHDEncContext *ctx,
int last_non_zero = 0; int last_non_zero = 0;
int slevel, i, j; int slevel, i, j;
dnxhd_encode_dc(pb, ctx, block[0] - ctx->m.last_dc[n]); dnxhd_encode_dc(pb, ctx, block[0] - ctx->m.c.last_dc[n]);
ctx->m.last_dc[n] = block[0]; ctx->m.c.last_dc[n] = block[0];
for (i = 1; i <= last_index; i++) { for (i = 1; i <= last_index; i++) {
j = ctx->m.intra_scantable.permutated[i]; j = ctx->m.c.intra_scantable.permutated[i];
slevel = block[j]; slevel = block[j];
if (slevel) { if (slevel) {
int run_level = i - last_non_zero - 1; int run_level = i - last_non_zero - 1;
@ -613,7 +612,7 @@ void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n,
} }
for (i = 1; i <= last_index; i++) { for (i = 1; i <= last_index; i++) {
int j = ctx->m.intra_scantable.permutated[i]; int j = ctx->m.c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
if (level) { if (level) {
if (level < 0) { if (level < 0) {
@ -661,7 +660,7 @@ int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
int bits = 0; int bits = 0;
int i, j, level; int i, j, level;
for (i = 1; i <= last_index; i++) { for (i = 1; i <= last_index; i++) {
j = ctx->m.intra_scantable.permutated[i]; j = ctx->m.c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
if (level) { if (level) {
int run_level = i - last_non_zero - 1; int run_level = i - last_non_zero - 1;
@ -680,36 +679,36 @@ void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
const int bw = 1 << bs; const int bw = 1 << bs;
int dct_y_offset = ctx->dct_y_offset; int dct_y_offset = ctx->dct_y_offset;
int dct_uv_offset = ctx->dct_uv_offset; int dct_uv_offset = ctx->dct_uv_offset;
int linesize = ctx->m.linesize; int linesize = ctx->m.c.linesize;
int uvlinesize = ctx->m.uvlinesize; int uvlinesize = ctx->m.c.uvlinesize;
const uint8_t *ptr_y = ctx->thread[0]->src[0] + const uint8_t *ptr_y = ctx->thread[0]->src[0] +
((mb_y << 4) * ctx->m.linesize) + (mb_x << bs + 1); ((mb_y << 4) * ctx->m.c.linesize) + (mb_x << bs + 1);
const uint8_t *ptr_u = ctx->thread[0]->src[1] + const uint8_t *ptr_u = ctx->thread[0]->src[1] +
((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs + ctx->is_444); ((mb_y << 4) * ctx->m.c.uvlinesize) + (mb_x << bs + ctx->is_444);
const uint8_t *ptr_v = ctx->thread[0]->src[2] + const uint8_t *ptr_v = ctx->thread[0]->src[2] +
((mb_y << 4) * ctx->m.uvlinesize) + (mb_x << bs + ctx->is_444); ((mb_y << 4) * ctx->m.c.uvlinesize) + (mb_x << bs + ctx->is_444);
PixblockDSPContext *pdsp = &ctx->m.pdsp; PixblockDSPContext *pdsp = &ctx->m.pdsp;
VideoDSPContext *vdsp = &ctx->m.vdsp; VideoDSPContext *vdsp = &ctx->m.c.vdsp;
if (ctx->bit_depth != 10 && vdsp->emulated_edge_mc && ((mb_x << 4) + 16 > ctx->m.avctx->width || if (ctx->bit_depth != 10 && vdsp->emulated_edge_mc && ((mb_x << 4) + 16 > ctx->m.c.avctx->width ||
(mb_y << 4) + 16 > ctx->m.avctx->height)) { (mb_y << 4) + 16 > ctx->m.c.avctx->height)) {
int y_w = ctx->m.avctx->width - (mb_x << 4); int y_w = ctx->m.c.avctx->width - (mb_x << 4);
int y_h = ctx->m.avctx->height - (mb_y << 4); int y_h = ctx->m.c.avctx->height - (mb_y << 4);
int uv_w = (y_w + 1) / 2; int uv_w = (y_w + 1) / 2;
int uv_h = y_h; int uv_h = y_h;
linesize = 16; linesize = 16;
uvlinesize = 8; uvlinesize = 8;
vdsp->emulated_edge_mc(&ctx->edge_buf_y[0], ptr_y, vdsp->emulated_edge_mc(&ctx->edge_buf_y[0], ptr_y,
linesize, ctx->m.linesize, linesize, ctx->m.c.linesize,
linesize, 16, linesize, 16,
0, 0, y_w, y_h); 0, 0, y_w, y_h);
vdsp->emulated_edge_mc(&ctx->edge_buf_uv[0][0], ptr_u, vdsp->emulated_edge_mc(&ctx->edge_buf_uv[0][0], ptr_u,
uvlinesize, ctx->m.uvlinesize, uvlinesize, ctx->m.c.uvlinesize,
uvlinesize, 16, uvlinesize, 16,
0, 0, uv_w, uv_h); 0, 0, uv_w, uv_h);
vdsp->emulated_edge_mc(&ctx->edge_buf_uv[1][0], ptr_v, vdsp->emulated_edge_mc(&ctx->edge_buf_uv[1][0], ptr_v,
uvlinesize, ctx->m.uvlinesize, uvlinesize, ctx->m.c.uvlinesize,
uvlinesize, 16, uvlinesize, 16,
0, 0, uv_w, uv_h); 0, 0, uv_w, uv_h);
@ -718,25 +717,25 @@ void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
ptr_y = &ctx->edge_buf_y[0]; ptr_y = &ctx->edge_buf_y[0];
ptr_u = &ctx->edge_buf_uv[0][0]; ptr_u = &ctx->edge_buf_uv[0][0];
ptr_v = &ctx->edge_buf_uv[1][0]; ptr_v = &ctx->edge_buf_uv[1][0];
} else if (ctx->bit_depth == 10 && vdsp->emulated_edge_mc && ((mb_x << 4) + 16 > ctx->m.avctx->width || } else if (ctx->bit_depth == 10 && vdsp->emulated_edge_mc && ((mb_x << 4) + 16 > ctx->m.c.avctx->width ||
(mb_y << 4) + 16 > ctx->m.avctx->height)) { (mb_y << 4) + 16 > ctx->m.c.avctx->height)) {
int y_w = ctx->m.avctx->width - (mb_x << 4); int y_w = ctx->m.c.avctx->width - (mb_x << 4);
int y_h = ctx->m.avctx->height - (mb_y << 4); int y_h = ctx->m.c.avctx->height - (mb_y << 4);
int uv_w = ctx->is_444 ? y_w : (y_w + 1) / 2; int uv_w = ctx->is_444 ? y_w : (y_w + 1) / 2;
int uv_h = y_h; int uv_h = y_h;
linesize = 32; linesize = 32;
uvlinesize = 16 + 16 * ctx->is_444; uvlinesize = 16 + 16 * ctx->is_444;
vdsp->emulated_edge_mc(&ctx->edge_buf_y[0], ptr_y, vdsp->emulated_edge_mc(&ctx->edge_buf_y[0], ptr_y,
linesize, ctx->m.linesize, linesize, ctx->m.c.linesize,
linesize / 2, 16, linesize / 2, 16,
0, 0, y_w, y_h); 0, 0, y_w, y_h);
vdsp->emulated_edge_mc(&ctx->edge_buf_uv[0][0], ptr_u, vdsp->emulated_edge_mc(&ctx->edge_buf_uv[0][0], ptr_u,
uvlinesize, ctx->m.uvlinesize, uvlinesize, ctx->m.c.uvlinesize,
uvlinesize / 2, 16, uvlinesize / 2, 16,
0, 0, uv_w, uv_h); 0, 0, uv_w, uv_h);
vdsp->emulated_edge_mc(&ctx->edge_buf_uv[1][0], ptr_v, vdsp->emulated_edge_mc(&ctx->edge_buf_uv[1][0], ptr_v,
uvlinesize, ctx->m.uvlinesize, uvlinesize, ctx->m.c.uvlinesize,
uvlinesize / 2, 16, uvlinesize / 2, 16,
0, 0, uv_w, uv_h); 0, 0, uv_w, uv_h);
@ -753,7 +752,7 @@ void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
pdsp->get_pixels(ctx->blocks[2], ptr_u, uvlinesize); pdsp->get_pixels(ctx->blocks[2], ptr_u, uvlinesize);
pdsp->get_pixels(ctx->blocks[3], ptr_v, uvlinesize); pdsp->get_pixels(ctx->blocks[3], ptr_v, uvlinesize);
if (mb_y + 1 == ctx->m.mb_height && ctx->m.avctx->height == 1080) { if (mb_y + 1 == ctx->m.c.mb_height && ctx->m.c.avctx->height == 1080) {
if (ctx->interlaced) { if (ctx->interlaced) {
ctx->get_pixels_8x4_sym(ctx->blocks[4], ctx->get_pixels_8x4_sym(ctx->blocks[4],
ptr_y + dct_y_offset, ptr_y + dct_y_offset,
@ -768,10 +767,10 @@ void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
ptr_v + dct_uv_offset, ptr_v + dct_uv_offset,
uvlinesize); uvlinesize);
} else { } else {
ctx->m.bdsp.clear_block(ctx->blocks[4]); ctx->m.c.bdsp.clear_block(ctx->blocks[4]);
ctx->m.bdsp.clear_block(ctx->blocks[5]); ctx->m.c.bdsp.clear_block(ctx->blocks[5]);
ctx->m.bdsp.clear_block(ctx->blocks[6]); ctx->m.c.bdsp.clear_block(ctx->blocks[6]);
ctx->m.bdsp.clear_block(ctx->blocks[7]); ctx->m.c.bdsp.clear_block(ctx->blocks[7]);
} }
} else { } else {
pdsp->get_pixels(ctx->blocks[4], pdsp->get_pixels(ctx->blocks[4],
@ -819,17 +818,17 @@ static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg,
int jobnr, int threadnr) int jobnr, int threadnr)
{ {
DNXHDEncContext *ctx = avctx->priv_data; DNXHDEncContext *ctx = avctx->priv_data;
int mb_y = jobnr, mb_x; int mb_y = jobnr;
int qscale = ctx->qscale; int qscale = ctx->qscale;
LOCAL_ALIGNED_16(int16_t, block, [64]); LOCAL_ALIGNED_16(int16_t, block, [64]);
ctx = ctx->thread[threadnr]; ctx = ctx->thread[threadnr];
ctx->m.last_dc[0] = ctx->m.c.last_dc[0] =
ctx->m.last_dc[1] = ctx->m.c.last_dc[1] =
ctx->m.last_dc[2] = 1 << (ctx->bit_depth + 2); ctx->m.c.last_dc[2] = 1 << (ctx->bit_depth + 2);
for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { for (int mb_x = 0; mb_x < ctx->m.c.mb_width; mb_x++) {
unsigned mb = mb_y * ctx->m.mb_width + mb_x; unsigned mb = mb_y * ctx->m.c.mb_width + mb_x;
int ssd = 0; int ssd = 0;
int ac_bits = 0; int ac_bits = 0;
int dc_bits = 0; int dc_bits = 0;
@ -848,7 +847,7 @@ static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg,
qscale, &overflow); qscale, &overflow);
ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index); ac_bits += dnxhd_calc_ac_bits(ctx, block, last_index);
diff = block[0] - ctx->m.last_dc[n]; diff = block[0] - ctx->m.c.last_dc[n];
if (diff < 0) if (diff < 0)
nbits = av_log2_16bit(-2 * diff); nbits = av_log2_16bit(-2 * diff);
else else
@ -857,16 +856,16 @@ static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg,
av_assert1(nbits < ctx->bit_depth + 4); av_assert1(nbits < ctx->bit_depth + 4);
dc_bits += ctx->cid_table->dc_bits[nbits] + nbits; dc_bits += ctx->cid_table->dc_bits[nbits] + nbits;
ctx->m.last_dc[n] = block[0]; ctx->m.c.last_dc[n] = block[0];
if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) { if (avctx->mb_decision == FF_MB_DECISION_RD || !RC_VARIANCE) {
dnxhd_unquantize_c(ctx, block, i, qscale, last_index); dnxhd_unquantize_c(ctx, block, i, qscale, last_index);
ctx->m.idsp.idct(block); ctx->m.c.idsp.idct(block);
ssd += dnxhd_ssd_block(block, src_block); ssd += dnxhd_ssd_block(block, src_block);
} }
} }
ctx->mb_rc[(qscale * ctx->m.mb_num) + mb].ssd = ssd; ctx->mb_rc[(qscale * ctx->m.c.mb_num) + mb].ssd = ssd;
ctx->mb_rc[(qscale * ctx->m.mb_num) + mb].bits = ac_bits + dc_bits + 12 + ctx->mb_rc[(qscale * ctx->m.c.mb_num) + mb].bits = ac_bits + dc_bits + 12 +
(1 + ctx->is_444) * 8 * ctx->vlc_bits[0]; (1 + ctx->is_444) * 8 * ctx->vlc_bits[0];
} }
return 0; return 0;
@ -877,16 +876,16 @@ static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg,
{ {
DNXHDEncContext *ctx = avctx->priv_data; DNXHDEncContext *ctx = avctx->priv_data;
PutBitContext pb0, *const pb = &pb0; PutBitContext pb0, *const pb = &pb0;
int mb_y = jobnr, mb_x; int mb_y = jobnr;
ctx = ctx->thread[threadnr]; ctx = ctx->thread[threadnr];
init_put_bits(pb, (uint8_t *)arg + ctx->data_offset + ctx->slice_offs[jobnr], init_put_bits(pb, (uint8_t *)arg + ctx->data_offset + ctx->slice_offs[jobnr],
ctx->slice_size[jobnr]); ctx->slice_size[jobnr]);
ctx->m.last_dc[0] = ctx->m.c.last_dc[0] =
ctx->m.last_dc[1] = ctx->m.c.last_dc[1] =
ctx->m.last_dc[2] = 1 << (ctx->bit_depth + 2); ctx->m.c.last_dc[2] = 1 << (ctx->bit_depth + 2);
for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { for (int mb_x = 0; mb_x < ctx->m.c.mb_width; mb_x++) {
unsigned mb = mb_y * ctx->m.mb_width + mb_x; unsigned mb = mb_y * ctx->m.c.mb_width + mb_x;
int qscale = ctx->mb_qscale[mb]; int qscale = ctx->mb_qscale[mb];
int i; int i;
@ -912,14 +911,12 @@ static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg,
static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx) static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
{ {
int mb_y, mb_x; for (int mb_y = 0, offset = 0; mb_y < ctx->m.c.mb_height; mb_y++) {
int offset = 0;
for (mb_y = 0; mb_y < ctx->m.mb_height; mb_y++) {
int thread_size; int thread_size;
ctx->slice_offs[mb_y] = offset; ctx->slice_offs[mb_y] = offset;
ctx->slice_size[mb_y] = 0; ctx->slice_size[mb_y] = 0;
for (mb_x = 0; mb_x < ctx->m.mb_width; mb_x++) { for (int mb_x = 0; mb_x < ctx->m.c.mb_width; mb_x++) {
unsigned mb = mb_y * ctx->m.mb_width + mb_x; unsigned mb = mb_y * ctx->m.c.mb_width + mb_x;
ctx->slice_size[mb_y] += ctx->mb_bits[mb]; ctx->slice_size[mb_y] += ctx->mb_bits[mb];
} }
ctx->slice_size[mb_y] = (ctx->slice_size[mb_y] + 31U) & ~31U; ctx->slice_size[mb_y] = (ctx->slice_size[mb_y] + 31U) & ~31U;
@ -933,28 +930,28 @@ static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg,
int jobnr, int threadnr) int jobnr, int threadnr)
{ {
DNXHDEncContext *ctx = avctx->priv_data; DNXHDEncContext *ctx = avctx->priv_data;
int mb_y = jobnr, mb_x, x, y; int mb_y = jobnr, x, y;
int partial_last_row = (mb_y == ctx->m.mb_height - 1) && int partial_last_row = (mb_y == ctx->m.c.mb_height - 1) &&
((avctx->height >> ctx->interlaced) & 0xF); ((avctx->height >> ctx->interlaced) & 0xF);
ctx = ctx->thread[threadnr]; ctx = ctx->thread[threadnr];
if (ctx->bit_depth == 8) { if (ctx->bit_depth == 8) {
const uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.linesize); const uint8_t *pix = ctx->thread[0]->src[0] + ((mb_y << 4) * ctx->m.c.linesize);
for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x, pix += 16) { for (int mb_x = 0; mb_x < ctx->m.c.mb_width; ++mb_x, pix += 16) {
unsigned mb = mb_y * ctx->m.mb_width + mb_x; unsigned mb = mb_y * ctx->m.c.mb_width + mb_x;
int sum; int sum;
int varc; int varc;
if (!partial_last_row && mb_x * 16 <= avctx->width - 16 && (avctx->width % 16) == 0) { if (!partial_last_row && mb_x * 16 <= avctx->width - 16 && (avctx->width % 16) == 0) {
sum = ctx->m.mpvencdsp.pix_sum(pix, ctx->m.linesize); sum = ctx->m.mpvencdsp.pix_sum(pix, ctx->m.c.linesize);
varc = ctx->m.mpvencdsp.pix_norm1(pix, ctx->m.linesize); varc = ctx->m.mpvencdsp.pix_norm1(pix, ctx->m.c.linesize);
} else { } else {
int bw = FFMIN(avctx->width - 16 * mb_x, 16); int bw = FFMIN(avctx->width - 16 * mb_x, 16);
int bh = FFMIN((avctx->height >> ctx->interlaced) - 16 * mb_y, 16); int bh = FFMIN((avctx->height >> ctx->interlaced) - 16 * mb_y, 16);
sum = varc = 0; sum = varc = 0;
for (y = 0; y < bh; y++) { for (y = 0; y < bh; y++) {
for (x = 0; x < bw; x++) { for (x = 0; x < bw; x++) {
uint8_t val = pix[x + y * ctx->m.linesize]; uint8_t val = pix[x + y * ctx->m.c.linesize];
sum += val; sum += val;
varc += val * val; varc += val * val;
} }
@ -966,11 +963,11 @@ static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg,
ctx->mb_cmp[mb].mb = mb; ctx->mb_cmp[mb].mb = mb;
} }
} else { // 10-bit } else { // 10-bit
const int linesize = ctx->m.linesize >> 1; const int linesize = ctx->m.c.linesize >> 1;
for (mb_x = 0; mb_x < ctx->m.mb_width; ++mb_x) { for (int mb_x = 0; mb_x < ctx->m.c.mb_width; ++mb_x) {
const uint16_t *pix = (const uint16_t *)ctx->thread[0]->src[0] + const uint16_t *pix = (const uint16_t *)ctx->thread[0]->src[0] +
((mb_y << 4) * linesize) + (mb_x << 4); ((mb_y << 4) * linesize) + (mb_x << 4);
unsigned mb = mb_y * ctx->m.mb_width + mb_x; unsigned mb = mb_y * ctx->m.c.mb_width + mb_x;
int sum = 0; int sum = 0;
int sqsum = 0; int sqsum = 0;
int bw = FFMIN(avctx->width - 16 * mb_x, 16); int bw = FFMIN(avctx->width - 16 * mb_x, 16);
@ -1001,12 +998,11 @@ static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
{ {
int lambda, up_step, down_step; int lambda, up_step, down_step;
int last_lower = INT_MAX, last_higher = 0; int last_lower = INT_MAX, last_higher = 0;
int x, y, q;
for (q = 1; q < avctx->qmax; q++) { for (int q = 1; q < avctx->qmax; q++) {
ctx->qscale = q; ctx->qscale = q;
avctx->execute2(avctx, dnxhd_calc_bits_thread, avctx->execute2(avctx, dnxhd_calc_bits_thread,
NULL, NULL, ctx->m.mb_height); NULL, NULL, ctx->m.c.mb_height);
} }
up_step = down_step = 2 << LAMBDA_FRAC_BITS; up_step = down_step = 2 << LAMBDA_FRAC_BITS;
lambda = ctx->lambda; lambda = ctx->lambda;
@ -1018,14 +1014,14 @@ static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
lambda++; lambda++;
end = 1; // need to set final qscales/bits end = 1; // need to set final qscales/bits
} }
for (y = 0; y < ctx->m.mb_height; y++) { for (int y = 0; y < ctx->m.c.mb_height; y++) {
for (x = 0; x < ctx->m.mb_width; x++) { for (int x = 0; x < ctx->m.c.mb_width; x++) {
unsigned min = UINT_MAX; unsigned min = UINT_MAX;
int qscale = 1; int qscale = 1;
int mb = y * ctx->m.mb_width + x; int mb = y * ctx->m.c.mb_width + x;
int rc = 0; int rc = 0;
for (q = 1; q < avctx->qmax; q++) { for (int q = 1; q < avctx->qmax; q++) {
int i = (q*ctx->m.mb_num) + mb; int i = (q*ctx->m.c.mb_num) + mb;
unsigned score = ctx->mb_rc[i].bits * lambda + unsigned score = ctx->mb_rc[i].bits * lambda +
((unsigned) ctx->mb_rc[i].ssd << LAMBDA_FRAC_BITS); ((unsigned) ctx->mb_rc[i].ssd << LAMBDA_FRAC_BITS);
if (score < min) { if (score < min) {
@ -1082,18 +1078,17 @@ static int dnxhd_find_qscale(DNXHDEncContext *ctx)
int last_higher = 0; int last_higher = 0;
int last_lower = INT_MAX; int last_lower = INT_MAX;
int qscale; int qscale;
int x, y;
qscale = ctx->qscale; qscale = ctx->qscale;
for (;;) { for (;;) {
bits = 0; bits = 0;
ctx->qscale = qscale; ctx->qscale = qscale;
// XXX avoid recalculating bits // XXX avoid recalculating bits
ctx->m.avctx->execute2(ctx->m.avctx, dnxhd_calc_bits_thread, ctx->m.c.avctx->execute2(ctx->m.c.avctx, dnxhd_calc_bits_thread,
NULL, NULL, ctx->m.mb_height); NULL, NULL, ctx->m.c.mb_height);
for (y = 0; y < ctx->m.mb_height; y++) { for (int y = 0; y < ctx->m.c.mb_height; y++) {
for (x = 0; x < ctx->m.mb_width; x++) for (int x = 0; x < ctx->m.c.mb_width; x++)
bits += ctx->mb_rc[(qscale*ctx->m.mb_num) + (y*ctx->m.mb_width+x)].bits; bits += ctx->mb_rc[(qscale*ctx->m.c.mb_num) + (y*ctx->m.c.mb_width+x)].bits;
bits = (bits+31)&~31; // padding bits = (bits+31)&~31; // padding
if (bits > ctx->frame_bits) if (bits > ctx->frame_bits)
break; break;
@ -1122,7 +1117,7 @@ static int dnxhd_find_qscale(DNXHDEncContext *ctx)
else else
qscale += up_step++; qscale += up_step++;
down_step = 1; down_step = 1;
if (qscale >= ctx->m.avctx->qmax) if (qscale >= ctx->m.c.avctx->qmax)
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
} }
@ -1189,24 +1184,24 @@ static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx) static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
{ {
int max_bits = 0; int max_bits = 0;
int ret, x, y; int ret;
if ((ret = dnxhd_find_qscale(ctx)) < 0) if ((ret = dnxhd_find_qscale(ctx)) < 0)
return ret; return ret;
for (y = 0; y < ctx->m.mb_height; y++) { for (int y = 0; y < ctx->m.c.mb_height; y++) {
for (x = 0; x < ctx->m.mb_width; x++) { for (int x = 0; x < ctx->m.c.mb_width; x++) {
int mb = y * ctx->m.mb_width + x; int mb = y * ctx->m.c.mb_width + x;
int rc = (ctx->qscale * ctx->m.mb_num ) + mb; int rc = (ctx->qscale * ctx->m.c.mb_num ) + mb;
int delta_bits; int delta_bits;
ctx->mb_qscale[mb] = ctx->qscale; ctx->mb_qscale[mb] = ctx->qscale;
ctx->mb_bits[mb] = ctx->mb_rc[rc].bits; ctx->mb_bits[mb] = ctx->mb_rc[rc].bits;
max_bits += ctx->mb_rc[rc].bits; max_bits += ctx->mb_rc[rc].bits;
if (!RC_VARIANCE) { if (!RC_VARIANCE) {
delta_bits = ctx->mb_rc[rc].bits - delta_bits = ctx->mb_rc[rc].bits -
ctx->mb_rc[rc + ctx->m.mb_num].bits; ctx->mb_rc[rc + ctx->m.c.mb_num].bits;
ctx->mb_cmp[mb].mb = mb; ctx->mb_cmp[mb].mb = mb;
ctx->mb_cmp[mb].value = ctx->mb_cmp[mb].value =
delta_bits ? ((ctx->mb_rc[rc].ssd - delta_bits ? ((ctx->mb_rc[rc].ssd -
ctx->mb_rc[rc + ctx->m.mb_num].ssd) * 100) / ctx->mb_rc[rc + ctx->m.c.mb_num].ssd) * 100) /
delta_bits delta_bits
: INT_MIN; // avoid increasing qscale : INT_MIN; // avoid increasing qscale
} }
@ -1216,17 +1211,17 @@ static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
if (!ret) { if (!ret) {
if (RC_VARIANCE) if (RC_VARIANCE)
avctx->execute2(avctx, dnxhd_mb_var_thread, avctx->execute2(avctx, dnxhd_mb_var_thread,
NULL, NULL, ctx->m.mb_height); NULL, NULL, ctx->m.c.mb_height);
radix_sort(ctx->mb_cmp, ctx->mb_cmp_tmp, ctx->m.mb_num); radix_sort(ctx->mb_cmp, ctx->mb_cmp_tmp, ctx->m.c.mb_num);
retry: retry:
for (x = 0; x < ctx->m.mb_num && max_bits > ctx->frame_bits; x++) { for (int x = 0; x < ctx->m.c.mb_num && max_bits > ctx->frame_bits; x++) {
int mb = ctx->mb_cmp[x].mb; int mb = ctx->mb_cmp[x].mb;
int rc = (ctx->qscale * ctx->m.mb_num ) + mb; int rc = (ctx->qscale * ctx->m.c.mb_num ) + mb;
max_bits -= ctx->mb_rc[rc].bits - max_bits -= ctx->mb_rc[rc].bits -
ctx->mb_rc[rc + ctx->m.mb_num].bits; ctx->mb_rc[rc + ctx->m.c.mb_num].bits;
if (ctx->mb_qscale[mb] < 255) if (ctx->mb_qscale[mb] < 255)
ctx->mb_qscale[mb]++; ctx->mb_qscale[mb]++;
ctx->mb_bits[mb] = ctx->mb_rc[rc + ctx->m.mb_num].bits; ctx->mb_bits[mb] = ctx->mb_rc[rc + ctx->m.c.mb_num].bits;
} }
if (max_bits > ctx->frame_bits) if (max_bits > ctx->frame_bits)
@ -1237,13 +1232,11 @@ retry:
static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame) static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
{ {
int i; for (int i = 0; i < ctx->m.c.avctx->thread_count; i++) {
ctx->thread[i]->m.c.linesize = frame->linesize[0] << ctx->interlaced;
for (i = 0; i < ctx->m.avctx->thread_count; i++) { ctx->thread[i]->m.c.uvlinesize = frame->linesize[1] << ctx->interlaced;
ctx->thread[i]->m.linesize = frame->linesize[0] << ctx->interlaced; ctx->thread[i]->dct_y_offset = ctx->m.c.linesize *8;
ctx->thread[i]->m.uvlinesize = frame->linesize[1] << ctx->interlaced; ctx->thread[i]->dct_uv_offset = ctx->m.c.uvlinesize*8;
ctx->thread[i]->dct_y_offset = ctx->m.linesize *8;
ctx->thread[i]->dct_uv_offset = ctx->m.uvlinesize*8;
} }
ctx->cur_field = (frame->flags & AV_FRAME_FLAG_INTERLACED) && ctx->cur_field = (frame->flags & AV_FRAME_FLAG_INTERLACED) &&
@ -1286,13 +1279,13 @@ encode_coding_unit:
dnxhd_setup_threads_slices(ctx); dnxhd_setup_threads_slices(ctx);
offset = 0; offset = 0;
for (i = 0; i < ctx->m.mb_height; i++) { for (i = 0; i < ctx->m.c.mb_height; i++) {
AV_WB32(ctx->msip + i * 4, offset); AV_WB32(ctx->msip + i * 4, offset);
offset += ctx->slice_size[i]; offset += ctx->slice_size[i];
av_assert1(!(ctx->slice_size[i] & 3)); av_assert1(!(ctx->slice_size[i] & 3));
} }
avctx->execute2(avctx, dnxhd_encode_thread, buf, NULL, ctx->m.mb_height); avctx->execute2(avctx, dnxhd_encode_thread, buf, NULL, ctx->m.c.mb_height);
av_assert1(ctx->data_offset + offset + 4 <= ctx->coding_unit_size); av_assert1(ctx->data_offset + offset + 4 <= ctx->coding_unit_size);
memset(buf + ctx->data_offset + offset, 0, memset(buf + ctx->data_offset + offset, 0,

View File

@ -28,7 +28,7 @@
#include "libavutil/mem_internal.h" #include "libavutil/mem_internal.h"
#include "mpegvideo.h" #include "mpegvideoenc.h"
#include "dnxhddata.h" #include "dnxhddata.h"
typedef struct RCCMPEntry { typedef struct RCCMPEntry {
@ -43,7 +43,7 @@ typedef struct RCEntry {
typedef struct DNXHDEncContext { typedef struct DNXHDEncContext {
AVClass *class; AVClass *class;
MpegEncContext m; ///< Used for quantization dsp functions MPVEncContext m; ///< Used for quantization dsp functions
int cid; int cid;
int profile; int profile;

View File

@ -25,42 +25,42 @@
int ff_flv_encode_picture_header(MPVMainEncContext *const m) int ff_flv_encode_picture_header(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int format; int format;
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits(&s->pb, 17, 1); put_bits(&s->pb, 17, 1);
/* 0: H.263 escape codes 1: 11-bit escape codes */ /* 0: H.263 escape codes 1: 11-bit escape codes */
put_bits(&s->pb, 5, (s->h263_flv - 1)); put_bits(&s->pb, 5, (s->c.h263_flv - 1));
put_bits(&s->pb, 8, put_bits(&s->pb, 8,
(((int64_t) s->picture_number * 30 * s->avctx->time_base.num) / // FIXME use timestamp (((int64_t) s->c.picture_number * 30 * s->c.avctx->time_base.num) / // FIXME use timestamp
s->avctx->time_base.den) & 0xff); /* TemporalReference */ s->c.avctx->time_base.den) & 0xff); /* TemporalReference */
if (s->width == 352 && s->height == 288) if (s->c.width == 352 && s->c.height == 288)
format = 2; format = 2;
else if (s->width == 176 && s->height == 144) else if (s->c.width == 176 && s->c.height == 144)
format = 3; format = 3;
else if (s->width == 128 && s->height == 96) else if (s->c.width == 128 && s->c.height == 96)
format = 4; format = 4;
else if (s->width == 320 && s->height == 240) else if (s->c.width == 320 && s->c.height == 240)
format = 5; format = 5;
else if (s->width == 160 && s->height == 120) else if (s->c.width == 160 && s->c.height == 120)
format = 6; format = 6;
else if (s->width <= 255 && s->height <= 255) else if (s->c.width <= 255 && s->c.height <= 255)
format = 0; /* use 1 byte width & height */ format = 0; /* use 1 byte width & height */
else else
format = 1; /* use 2 bytes width & height */ format = 1; /* use 2 bytes width & height */
put_bits(&s->pb, 3, format); /* PictureSize */ put_bits(&s->pb, 3, format); /* PictureSize */
if (format == 0) { if (format == 0) {
put_bits(&s->pb, 8, s->width); put_bits(&s->pb, 8, s->c.width);
put_bits(&s->pb, 8, s->height); put_bits(&s->pb, 8, s->c.height);
} else if (format == 1) { } else if (format == 1) {
put_bits(&s->pb, 16, s->width); put_bits(&s->pb, 16, s->c.width);
put_bits(&s->pb, 16, s->height); put_bits(&s->pb, 16, s->c.height);
} }
put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_P); /* PictureType */ put_bits(&s->pb, 2, s->c.pict_type == AV_PICTURE_TYPE_P); /* PictureType */
put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */ put_bits(&s->pb, 1, 1); /* DeblockingFlag: on */
put_bits(&s->pb, 5, s->qscale); /* Quantizer */ put_bits(&s->pb, 5, s->c.qscale); /* Quantizer */
put_bits(&s->pb, 1, 0); /* ExtraInformation */ put_bits(&s->pb, 1, 0); /* ExtraInformation */
return 0; return 0;

View File

@ -69,20 +69,20 @@ typedef struct H261EncContext {
static int h261_encode_picture_header(MPVMainEncContext *const m) static int h261_encode_picture_header(MPVMainEncContext *const m)
{ {
H261EncContext *const h = (H261EncContext *)m; H261EncContext *const h = (H261EncContext *)m;
MpegEncContext *const s = &h->s.s; MPVEncContext *const s = &h->s.s;
int temp_ref; int temp_ref;
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits(&s->pb, 20, 0x10); /* PSC */ put_bits(&s->pb, 20, 0x10); /* PSC */
temp_ref = s->picture_number * 30000LL * s->avctx->time_base.num / temp_ref = s->c.picture_number * 30000LL * s->c.avctx->time_base.num /
(1001LL * s->avctx->time_base.den); // FIXME maybe this should use a timestamp (1001LL * s->c.avctx->time_base.den); // FIXME maybe this should use a timestamp
put_sbits(&s->pb, 5, temp_ref); /* TemporalReference */ put_sbits(&s->pb, 5, temp_ref); /* TemporalReference */
put_bits(&s->pb, 1, 0); /* split screen off */ put_bits(&s->pb, 1, 0); /* split screen off */
put_bits(&s->pb, 1, 0); /* camera off */ put_bits(&s->pb, 1, 0); /* camera off */
put_bits(&s->pb, 1, s->pict_type == AV_PICTURE_TYPE_I); /* freeze picture release on/off */ put_bits(&s->pb, 1, s->c.pict_type == AV_PICTURE_TYPE_I); /* freeze picture release on/off */
put_bits(&s->pb, 1, h->format); /* 0 == QCIF, 1 == CIF */ put_bits(&s->pb, 1, h->format); /* 0 == QCIF, 1 == CIF */
@ -91,7 +91,7 @@ static int h261_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 1, 0); /* no PEI */ put_bits(&s->pb, 1, 0); /* no PEI */
h->gob_number = h->format - 1; h->gob_number = h->format - 1;
s->mb_skip_run = 0; s->c.mb_skip_run = 0;
return 0; return 0;
} }
@ -99,7 +99,7 @@ static int h261_encode_picture_header(MPVMainEncContext *const m)
/** /**
* Encode a group of blocks header. * Encode a group of blocks header.
*/ */
static void h261_encode_gob_header(MpegEncContext *s, int mb_line) static void h261_encode_gob_header(MPVEncContext *const s, int mb_line)
{ {
H261EncContext *const h = (H261EncContext *)s; H261EncContext *const h = (H261EncContext *)s;
if (h->format == H261_QCIF) { if (h->format == H261_QCIF) {
@ -109,38 +109,38 @@ static void h261_encode_gob_header(MpegEncContext *s, int mb_line)
} }
put_bits(&s->pb, 16, 1); /* GBSC */ put_bits(&s->pb, 16, 1); /* GBSC */
put_bits(&s->pb, 4, h->gob_number); /* GN */ put_bits(&s->pb, 4, h->gob_number); /* GN */
put_bits(&s->pb, 5, s->qscale); /* GQUANT */ put_bits(&s->pb, 5, s->c.qscale); /* GQUANT */
put_bits(&s->pb, 1, 0); /* no GEI */ put_bits(&s->pb, 1, 0); /* no GEI */
s->mb_skip_run = 0; s->c.mb_skip_run = 0;
s->last_mv[0][0][0] = 0; s->c.last_mv[0][0][0] = 0;
s->last_mv[0][0][1] = 0; s->c.last_mv[0][0][1] = 0;
} }
void ff_h261_reorder_mb_index(MpegEncContext *s) void ff_h261_reorder_mb_index(MPVEncContext *const s)
{ {
const H261EncContext *const h = (H261EncContext*)s; const H261EncContext *const h = (H261EncContext*)s;
int index = s->mb_x + s->mb_y * s->mb_width; int index = s->c.mb_x + s->c.mb_y * s->c.mb_width;
if (index % 11 == 0) { if (index % 11 == 0) {
if (index % 33 == 0) if (index % 33 == 0)
h261_encode_gob_header(s, 0); h261_encode_gob_header(s, 0);
s->last_mv[0][0][0] = 0; s->c.last_mv[0][0][0] = 0;
s->last_mv[0][0][1] = 0; s->c.last_mv[0][0][1] = 0;
} }
/* for CIF the GOB's are fragmented in the middle of a scanline /* for CIF the GOB's are fragmented in the middle of a scanline
* that's why we need to adjust the x and y index of the macroblocks */ * that's why we need to adjust the x and y index of the macroblocks */
if (h->format == H261_CIF) { if (h->format == H261_CIF) {
s->mb_x = index % 11; s->c.mb_x = index % 11;
index /= 11; index /= 11;
s->mb_y = index % 3; s->c.mb_y = index % 3;
index /= 3; index /= 3;
s->mb_x += 11 * (index % 2); s->c.mb_x += 11 * (index % 2);
index /= 2; index /= 2;
s->mb_y += 3 * index; s->c.mb_y += 3 * index;
ff_init_block_index(s); ff_init_block_index(&s->c);
ff_update_block_index(s, 8, 0, 1); ff_update_block_index(&s->c, 8, 0, 1);
} }
} }
@ -150,12 +150,12 @@ static void h261_encode_motion(PutBitContext *pb, int val)
h261_mv_codes[MV_TAB_OFFSET + val][0]); h261_mv_codes[MV_TAB_OFFSET + val][0]);
} }
static inline int get_cbp(MpegEncContext *s, int16_t block[6][64]) static inline int get_cbp(const int block_last_index[6])
{ {
int i, cbp; int i, cbp;
cbp = 0; cbp = 0;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
if (s->block_last_index[i] >= 0) if (block_last_index[i] >= 0)
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
return cbp; return cbp;
} }
@ -167,10 +167,10 @@ static inline int get_cbp(MpegEncContext *s, int16_t block[6][64])
*/ */
static void h261_encode_block(H261EncContext *h, int16_t *block, int n) static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
{ {
MpegEncContext *const s = &h->s.s; MPVEncContext *const s = &h->s.s;
int level, run, i, j, last_index, last_non_zero; int level, run, i, j, last_index, last_non_zero;
if (s->mb_intra) { if (s->c.mb_intra) {
/* DC coef */ /* DC coef */
level = block[0]; level = block[0];
/* 255 cannot be represented, so we clamp */ /* 255 cannot be represented, so we clamp */
@ -189,7 +189,7 @@ static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
put_bits(&s->pb, 8, level); put_bits(&s->pb, 8, level);
i = 1; i = 1;
} else if ((block[0] == 1 || block[0] == -1) && } else if ((block[0] == 1 || block[0] == -1) &&
(s->block_last_index[n] > -1)) { (s->c.block_last_index[n] > -1)) {
// special case // special case
put_bits(&s->pb, 2, block[0] > 0 ? 2 : 3); put_bits(&s->pb, 2, block[0] > 0 ? 2 : 3);
i = 1; i = 1;
@ -198,10 +198,10 @@ static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
} }
/* AC coefs */ /* AC coefs */
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
last_non_zero = i - 1; last_non_zero = i - 1;
for (; i <= last_index; i++) { for (; i <= last_index; i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
if (level) { if (level) {
run = i - last_non_zero - 1; run = i - last_non_zero - 1;
@ -225,7 +225,7 @@ static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
put_bits(&s->pb, 2, 0x2); // EOB put_bits(&s->pb, 2, 0x2); // EOB
} }
static void h261_encode_mb(MpegEncContext *const s, int16_t block[6][64], static void h261_encode_mb(MPVEncContext *const s, int16_t block[6][64],
int motion_x, int motion_y) int motion_x, int motion_y)
{ {
/* The following is only allowed because this encoder /* The following is only allowed because this encoder
@ -238,36 +238,36 @@ static void h261_encode_mb(MpegEncContext *const s, int16_t block[6][64],
com->mtype = 0; com->mtype = 0;
if (!s->mb_intra) { if (!s->c.mb_intra) {
/* compute cbp */ /* compute cbp */
cbp = get_cbp(s, block); cbp = get_cbp(s->c.block_last_index);
/* mvd indicates if this block is motion compensated */ /* mvd indicates if this block is motion compensated */
mvd = motion_x | motion_y; mvd = motion_x | motion_y;
if ((cbp | mvd) == 0) { if ((cbp | mvd) == 0) {
/* skip macroblock */ /* skip macroblock */
s->mb_skip_run++; s->c.mb_skip_run++;
s->last_mv[0][0][0] = 0; s->c.last_mv[0][0][0] = 0;
s->last_mv[0][0][1] = 0; s->c.last_mv[0][0][1] = 0;
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
return; return;
} }
} }
/* MB is not skipped, encode MBA */ /* MB is not skipped, encode MBA */
put_bits(&s->pb, put_bits(&s->pb,
ff_h261_mba_bits[s->mb_skip_run], ff_h261_mba_bits[s->c.mb_skip_run],
ff_h261_mba_code[s->mb_skip_run]); ff_h261_mba_code[s->c.mb_skip_run]);
s->mb_skip_run = 0; s->c.mb_skip_run = 0;
/* calculate MTYPE */ /* calculate MTYPE */
if (!s->mb_intra) { if (!s->c.mb_intra) {
com->mtype++; com->mtype++;
if (mvd || s->loop_filter) if (mvd || s->c.loop_filter)
com->mtype += 3; com->mtype += 3;
if (s->loop_filter) if (s->c.loop_filter)
com->mtype += 3; com->mtype += 3;
if (cbp) if (cbp)
com->mtype++; com->mtype++;
@ -277,7 +277,7 @@ static void h261_encode_mb(MpegEncContext *const s, int16_t block[6][64],
if (s->dquant && cbp) { if (s->dquant && cbp) {
com->mtype++; com->mtype++;
} else } else
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
put_bits(&s->pb, put_bits(&s->pb,
ff_h261_mtype_bits[com->mtype], ff_h261_mtype_bits[com->mtype],
@ -286,15 +286,15 @@ static void h261_encode_mb(MpegEncContext *const s, int16_t block[6][64],
com->mtype = ff_h261_mtype_map[com->mtype]; com->mtype = ff_h261_mtype_map[com->mtype];
if (IS_QUANT(com->mtype)) { if (IS_QUANT(com->mtype)) {
ff_set_qscale(s, s->qscale + s->dquant); ff_set_qscale(&s->c, s->c.qscale + s->dquant);
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
} }
if (IS_16X16(com->mtype)) { if (IS_16X16(com->mtype)) {
mv_diff_x = (motion_x >> 1) - s->last_mv[0][0][0]; mv_diff_x = (motion_x >> 1) - s->c.last_mv[0][0][0];
mv_diff_y = (motion_y >> 1) - s->last_mv[0][0][1]; mv_diff_y = (motion_y >> 1) - s->c.last_mv[0][0][1];
s->last_mv[0][0][0] = (motion_x >> 1); s->c.last_mv[0][0][0] = (motion_x >> 1);
s->last_mv[0][0][1] = (motion_y >> 1); s->c.last_mv[0][0][1] = (motion_y >> 1);
h261_encode_motion(&s->pb, mv_diff_x); h261_encode_motion(&s->pb, mv_diff_x);
h261_encode_motion(&s->pb, mv_diff_y); h261_encode_motion(&s->pb, mv_diff_y);
} }
@ -310,8 +310,8 @@ static void h261_encode_mb(MpegEncContext *const s, int16_t block[6][64],
h261_encode_block(h, block[i], i); h261_encode_block(h, block[i], i);
if (!IS_16X16(com->mtype)) { if (!IS_16X16(com->mtype)) {
s->last_mv[0][0][0] = 0; s->c.last_mv[0][0][0] = 0;
s->last_mv[0][0][1] = 0; s->c.last_mv[0][0][1] = 0;
} }
} }
@ -356,7 +356,7 @@ static av_cold int h261_encode_init(AVCodecContext *avctx)
{ {
static AVOnce init_static_once = AV_ONCE_INIT; static AVOnce init_static_once = AV_ONCE_INIT;
H261EncContext *const h = avctx->priv_data; H261EncContext *const h = avctx->priv_data;
MpegEncContext *const s = &h->s.s; MPVEncContext *const s = &h->s.s;
if (avctx->width == 176 && avctx->height == 144) { if (avctx->width == 176 && avctx->height == 144) {
h->format = H261_QCIF; h->format = H261_QCIF;
@ -369,7 +369,7 @@ static av_cold int h261_encode_init(AVCodecContext *avctx)
avctx->width, avctx->height); avctx->width, avctx->height);
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
s->private_ctx = &h->common; s->c.private_ctx = &h->common;
h->s.encode_picture_header = h261_encode_picture_header; h->s.encode_picture_header = h261_encode_picture_header;
s->encode_mb = h261_encode_mb; s->encode_mb = h261_encode_mb;
@ -377,7 +377,7 @@ static av_cold int h261_encode_init(AVCodecContext *avctx)
s->max_qcoeff = 127; s->max_qcoeff = 127;
s->ac_esc_length = H261_ESC_LEN; s->ac_esc_length = H261_ESC_LEN;
s->me.mv_penalty = mv_penalty; s->c.me.mv_penalty = mv_penalty;
s->intra_ac_vlc_length = s->inter_ac_vlc_length = uni_h261_rl_len; s->intra_ac_vlc_length = s->inter_ac_vlc_length = uni_h261_rl_len;
s->intra_ac_vlc_last_length = s->inter_ac_vlc_last_length = uni_h261_rl_len_last; s->intra_ac_vlc_last_length = s->inter_ac_vlc_last_length = uni_h261_rl_len_last;

View File

@ -28,8 +28,8 @@
#ifndef AVCODEC_H261ENC_H #ifndef AVCODEC_H261ENC_H
#define AVCODEC_H261ENC_H #define AVCODEC_H261ENC_H
#include "mpegvideo.h" typedef struct MPVEncContext MPVEncContext;
void ff_h261_reorder_mb_index(MpegEncContext *s); void ff_h261_reorder_mb_index(MPVEncContext *s);
#endif #endif

View File

@ -27,22 +27,22 @@
const uint8_t (*ff_h263_get_mv_penalty(void))[MAX_DMV*2+1]; const uint8_t (*ff_h263_get_mv_penalty(void))[MAX_DMV*2+1];
void ff_h263_encode_init(MPVMainEncContext *m); void ff_h263_encode_init(MPVMainEncContext *m);
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line); void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line);
void ff_h263_encode_mba(MpegEncContext *s); void ff_h263_encode_mba(MPVEncContext *s);
void ff_clean_h263_qscales(MpegEncContext *s); void ff_clean_h263_qscales(MPVEncContext *s);
void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code); void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code);
void ff_h263_update_mb(MpegEncContext *s); void ff_h263_update_mb(MPVEncContext *s);
static inline void ff_h263_encode_motion_vector(MpegEncContext * s, static inline void ff_h263_encode_motion_vector(MPVEncContext *s,
int x, int y, int f_code) int x, int y, int f_code)
{ {
ff_h263_encode_motion(&s->pb, x, f_code); ff_h263_encode_motion(&s->pb, x, f_code);
ff_h263_encode_motion(&s->pb, y, f_code); ff_h263_encode_motion(&s->pb, y, f_code);
} }
static inline int get_p_cbp(MpegEncContext * s, static inline int get_p_cbp(MPVEncContext *const s,
int16_t block[6][64], int16_t block[6][64],
int motion_x, int motion_y){ int motion_x, int motion_y){
int cbp; int cbp;
@ -51,8 +51,8 @@ static inline int get_p_cbp(MpegEncContext * s,
int best_cbpy_score = INT_MAX; int best_cbpy_score = INT_MAX;
int best_cbpc_score = INT_MAX; int best_cbpc_score = INT_MAX;
int cbpc = (-1), cbpy = (-1); int cbpc = (-1), cbpy = (-1);
const int offset = (s->mv_type == MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0); const int offset = (s->c.mv_type == MV_TYPE_16X16 ? 0 : 16) + (s->dquant ? 8 : 0);
const int lambda = s->lambda2 >> (FF_LAMBDA_SHIFT - 6); const int lambda = s->c.lambda2 >> (FF_LAMBDA_SHIFT - 6);
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
int score = ff_h263_inter_MCBPC_bits[i + offset] * lambda; int score = ff_h263_inter_MCBPC_bits[i + offset] * lambda;
@ -78,21 +78,21 @@ static inline int get_p_cbp(MpegEncContext * s,
} }
} }
cbp = cbpc + 4 * cbpy; cbp = cbpc + 4 * cbpy;
if (!(motion_x | motion_y | s->dquant) && s->mv_type == MV_TYPE_16X16) { if (!(motion_x | motion_y | s->dquant) && s->c.mv_type == MV_TYPE_16X16) {
if (best_cbpy_score + best_cbpc_score + 2 * lambda >= 0) if (best_cbpy_score + best_cbpc_score + 2 * lambda >= 0)
cbp= 0; cbp= 0;
} }
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0 && !((cbp >> (5 - i)) & 1)) { if (s->c.block_last_index[i] >= 0 && !((cbp >> (5 - i)) & 1)) {
s->block_last_index[i] = -1; s->c.block_last_index[i] = -1;
s->bdsp.clear_block(s->block[i]); s->c.bdsp.clear_block(s->c.block[i]);
} }
} }
} else { } else {
cbp = 0; cbp = 0;
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0) if (s->c.block_last_index[i] >= 0)
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
} }
} }

View File

@ -223,19 +223,19 @@ av_const int ff_h263_aspect_to_info(AVRational aspect){
static int h263_encode_picture_header(MPVMainEncContext *const m) static int h263_encode_picture_header(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref; int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
int best_clock_code=1; int best_clock_code=1;
int best_divisor=60; int best_divisor=60;
int best_error= INT_MAX; int best_error= INT_MAX;
int custom_pcf; int custom_pcf;
if(s->h263_plus){ if(s->c.h263_plus){
for(i=0; i<2; i++){ for(i=0; i<2; i++){
int div, error; int div, error;
div= (s->avctx->time_base.num*1800000LL + 500LL*s->avctx->time_base.den) / ((1000LL+i)*s->avctx->time_base.den); div= (s->c.avctx->time_base.num*1800000LL + 500LL*s->c.avctx->time_base.den) / ((1000LL+i)*s->c.avctx->time_base.den);
div= av_clip(div, 1, 127); div= av_clip(div, 1, 127);
error= FFABS(s->avctx->time_base.num*1800000LL - (1000LL+i)*s->avctx->time_base.den*div); error= FFABS(s->c.avctx->time_base.num*1800000LL - (1000LL+i)*s->c.avctx->time_base.den*div);
if(error < best_error){ if(error < best_error){
best_error= error; best_error= error;
best_divisor= div; best_divisor= div;
@ -250,8 +250,8 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits(&s->pb, 22, 0x20); /* PSC */ put_bits(&s->pb, 22, 0x20); /* PSC */
temp_ref= s->picture_number * (int64_t)coded_frame_rate * s->avctx->time_base.num / //FIXME use timestamp temp_ref= s->c.picture_number * (int64_t)coded_frame_rate * s->c.avctx->time_base.num / //FIXME use timestamp
(coded_frame_rate_base * (int64_t)s->avctx->time_base.den); (coded_frame_rate_base * (int64_t)s->c.avctx->time_base.den);
put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */ put_sbits(&s->pb, 8, temp_ref); /* TemporalReference */
put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* marker */
@ -260,19 +260,19 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 1, 0); /* camera off */ put_bits(&s->pb, 1, 0); /* camera off */
put_bits(&s->pb, 1, 0); /* freeze picture release off */ put_bits(&s->pb, 1, 0); /* freeze picture release off */
format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->width, s->height); format = ff_match_2uint16(ff_h263_format, FF_ARRAY_ELEMS(ff_h263_format), s->c.width, s->c.height);
if (!s->h263_plus) { if (!s->c.h263_plus) {
/* H.263v1 */ /* H.263v1 */
put_bits(&s->pb, 3, format); put_bits(&s->pb, 3, format);
put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P)); put_bits(&s->pb, 1, (s->c.pict_type == AV_PICTURE_TYPE_P));
/* By now UMV IS DISABLED ON H.263v1, since the restrictions /* By now UMV IS DISABLED ON H.263v1, since the restrictions
of H.263v1 UMV implies to check the predicted MV after of H.263v1 UMV implies to check the predicted MV after
calculation of the current MB to see if we're on the limits */ calculation of the current MB to see if we're on the limits */
put_bits(&s->pb, 1, 0); /* Unrestricted Motion Vector: off */ put_bits(&s->pb, 1, 0); /* Unrestricted Motion Vector: off */
put_bits(&s->pb, 1, 0); /* SAC: off */ put_bits(&s->pb, 1, 0); /* SAC: off */
put_bits(&s->pb, 1, s->obmc); /* Advanced Prediction */ put_bits(&s->pb, 1, s->c.obmc); /* Advanced Prediction */
put_bits(&s->pb, 1, 0); /* only I/P-frames, no PB-frame */ put_bits(&s->pb, 1, 0); /* only I/P-frames, no PB-frame */
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */ put_bits(&s->pb, 1, 0); /* Continuous Presence Multipoint mode: off */
} else { } else {
int ufep=1; int ufep=1;
@ -287,24 +287,24 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 3, format); put_bits(&s->pb, 3, format);
put_bits(&s->pb,1, custom_pcf); put_bits(&s->pb,1, custom_pcf);
put_bits(&s->pb,1, s->umvplus); /* Unrestricted Motion Vector */ put_bits(&s->pb,1, s->c.umvplus); /* Unrestricted Motion Vector */
put_bits(&s->pb,1,0); /* SAC: off */ put_bits(&s->pb,1,0); /* SAC: off */
put_bits(&s->pb,1,s->obmc); /* Advanced Prediction Mode */ put_bits(&s->pb,1,s->c.obmc); /* Advanced Prediction Mode */
put_bits(&s->pb,1,s->h263_aic); /* Advanced Intra Coding */ put_bits(&s->pb,1,s->c.h263_aic); /* Advanced Intra Coding */
put_bits(&s->pb,1,s->loop_filter); /* Deblocking Filter */ put_bits(&s->pb,1,s->c.loop_filter); /* Deblocking Filter */
put_bits(&s->pb,1,s->h263_slice_structured); /* Slice Structured */ put_bits(&s->pb,1,s->c.h263_slice_structured); /* Slice Structured */
put_bits(&s->pb,1,0); /* Reference Picture Selection: off */ put_bits(&s->pb,1,0); /* Reference Picture Selection: off */
put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */ put_bits(&s->pb,1,0); /* Independent Segment Decoding: off */
put_bits(&s->pb,1,s->alt_inter_vlc); /* Alternative Inter VLC */ put_bits(&s->pb,1,s->c.alt_inter_vlc); /* Alternative Inter VLC */
put_bits(&s->pb,1,s->modified_quant); /* Modified Quantization: */ put_bits(&s->pb,1,s->c.modified_quant); /* Modified Quantization: */
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,3,0); /* Reserved */ put_bits(&s->pb,3,0); /* Reserved */
put_bits(&s->pb, 3, s->pict_type == AV_PICTURE_TYPE_P); put_bits(&s->pb, 3, s->c.pict_type == AV_PICTURE_TYPE_P);
put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */ put_bits(&s->pb,1,0); /* Reference Picture Resampling: off */
put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */ put_bits(&s->pb,1,0); /* Reduced-Resolution Update: off */
put_bits(&s->pb,1,s->no_rounding); /* Rounding Type */ put_bits(&s->pb,1,s->c.no_rounding); /* Rounding Type */
put_bits(&s->pb,2,0); /* Reserved */ put_bits(&s->pb,2,0); /* Reserved */
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
@ -313,15 +313,15 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
if (format == 8) { if (format == 8) {
/* Custom Picture Format (CPFMT) */ /* Custom Picture Format (CPFMT) */
unsigned aspect_ratio_info = ff_h263_aspect_to_info(s->avctx->sample_aspect_ratio); unsigned aspect_ratio_info = ff_h263_aspect_to_info(s->c.avctx->sample_aspect_ratio);
put_bits(&s->pb,4, aspect_ratio_info); put_bits(&s->pb,4, aspect_ratio_info);
put_bits(&s->pb,9,(s->width >> 2) - 1); put_bits(&s->pb,9,(s->c.width >> 2) - 1);
put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */ put_bits(&s->pb,1,1); /* "1" to prevent start code emulation */
put_bits(&s->pb,9,(s->height >> 2)); put_bits(&s->pb,9,(s->c.height >> 2));
if (aspect_ratio_info == FF_ASPECT_EXTENDED){ if (aspect_ratio_info == FF_ASPECT_EXTENDED){
put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.num); put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.num);
put_bits(&s->pb, 8, s->avctx->sample_aspect_ratio.den); put_bits(&s->pb, 8, s->c.avctx->sample_aspect_ratio.den);
} }
} }
if (custom_pcf) { if (custom_pcf) {
@ -333,22 +333,22 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
} }
/* Unlimited Unrestricted Motion Vectors Indicator (UUI) */ /* Unlimited Unrestricted Motion Vectors Indicator (UUI) */
if (s->umvplus) if (s->c.umvplus)
// put_bits(&s->pb,1,1); /* Limited according tables of Annex D */ // put_bits(&s->pb,1,1); /* Limited according tables of Annex D */
//FIXME check actual requested range //FIXME check actual requested range
put_bits(&s->pb,2,1); /* unlimited */ put_bits(&s->pb,2,1); /* unlimited */
if(s->h263_slice_structured) if(s->c.h263_slice_structured)
put_bits(&s->pb,2,0); /* no weird submodes */ put_bits(&s->pb,2,0); /* no weird submodes */
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
} }
put_bits(&s->pb, 1, 0); /* no PEI */ put_bits(&s->pb, 1, 0); /* no PEI */
if(s->h263_slice_structured){ if(s->c.h263_slice_structured){
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
av_assert1(s->mb_x == 0 && s->mb_y == 0); av_assert1(s->c.mb_x == 0 && s->c.mb_y == 0);
ff_h263_encode_mba(s); ff_h263_encode_mba(s);
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
@ -360,50 +360,51 @@ static int h263_encode_picture_header(MPVMainEncContext *const m)
/** /**
* Encode a group of blocks header. * Encode a group of blocks header.
*/ */
void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line) void ff_h263_encode_gob_header(MPVEncContext *const s, int mb_line)
{ {
put_bits(&s->pb, 17, 1); /* GBSC */ put_bits(&s->pb, 17, 1); /* GBSC */
if(s->h263_slice_structured){ if(s->c.h263_slice_structured){
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
ff_h263_encode_mba(s); ff_h263_encode_mba(s);
if(s->mb_num > 1583) if(s->c.mb_num > 1583)
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 5, s->qscale); /* GQUANT */ put_bits(&s->pb, 5, s->c.qscale); /* GQUANT */
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */ put_bits(&s->pb, 2, s->c.pict_type == AV_PICTURE_TYPE_I); /* GFID */
}else{ }else{
int gob_number= mb_line / s->gob_index; int gob_number= mb_line / s->c.gob_index;
put_bits(&s->pb, 5, gob_number); /* GN */ put_bits(&s->pb, 5, gob_number); /* GN */
put_bits(&s->pb, 2, s->pict_type == AV_PICTURE_TYPE_I); /* GFID */ put_bits(&s->pb, 2, s->c.pict_type == AV_PICTURE_TYPE_I); /* GFID */
put_bits(&s->pb, 5, s->qscale); /* GQUANT */ put_bits(&s->pb, 5, s->c.qscale); /* GQUANT */
} }
} }
/** /**
* modify qscale so that encoding is actually possible in H.263 (limit difference to -2..2) * modify qscale so that encoding is actually possible in H.263 (limit difference to -2..2)
*/ */
void ff_clean_h263_qscales(MpegEncContext *s){ void ff_clean_h263_qscales(MPVEncContext *const s)
int i; {
int8_t * const qscale_table = s->cur_pic.qscale_table; int8_t * const qscale_table = s->c.cur_pic.qscale_table;
for(i=1; i<s->mb_num; i++){ for (int i = 1; i < s->c.mb_num; i++) {
if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i-1] ] >2) if (qscale_table[ s->c.mb_index2xy[i] ] - qscale_table[ s->c.mb_index2xy[i-1] ] > 2)
qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i-1] ]+2; qscale_table[ s->c.mb_index2xy[i] ] = qscale_table[ s->c.mb_index2xy[i-1] ] + 2;
} }
for(i=s->mb_num-2; i>=0; i--){ for(int i = s->c.mb_num - 2; i >= 0; i--) {
if(qscale_table[ s->mb_index2xy[i] ] - qscale_table[ s->mb_index2xy[i+1] ] >2) if (qscale_table[ s->c.mb_index2xy[i] ] - qscale_table[ s->c.mb_index2xy[i+1] ] > 2)
qscale_table[ s->mb_index2xy[i] ]= qscale_table[ s->mb_index2xy[i+1] ]+2; qscale_table[ s->c.mb_index2xy[i] ] = qscale_table[ s->c.mb_index2xy[i+1] ] + 2;
} }
if(s->codec_id != AV_CODEC_ID_H263P){ if (s->c.codec_id != AV_CODEC_ID_H263P) {
for(i=1; i<s->mb_num; i++){ for (int i = 1; i < s->c.mb_num; i++) {
int mb_xy= s->mb_index2xy[i]; int mb_xy = s->c.mb_index2xy[i];
if(qscale_table[mb_xy] != qscale_table[s->mb_index2xy[i-1]] && (s->mb_type[mb_xy]&CANDIDATE_MB_TYPE_INTER4V)){ if (qscale_table[mb_xy] != qscale_table[s->c.mb_index2xy[i - 1]] &&
(s->mb_type[mb_xy] & CANDIDATE_MB_TYPE_INTER4V)) {
s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_INTER; s->mb_type[mb_xy]|= CANDIDATE_MB_TYPE_INTER;
} }
} }
@ -417,13 +418,13 @@ static const int dquant_code[5]= {1,0,9,2,3};
* @param block the 8x8 block * @param block the 8x8 block
* @param n block index (0-3 are luma, 4-5 are chroma) * @param n block index (0-3 are luma, 4-5 are chroma)
*/ */
static void h263_encode_block(MpegEncContext * s, int16_t * block, int n) static void h263_encode_block(MPVEncContext *const s, int16_t block[], int n)
{ {
int level, run, last, i, j, last_index, last_non_zero, sign, slevel, code; int level, run, last, i, j, last_index, last_non_zero, sign, slevel, code;
const RLTable *rl; const RLTable *rl;
rl = &ff_h263_rl_inter; rl = &ff_h263_rl_inter;
if (s->mb_intra && !s->h263_aic) { if (s->c.mb_intra && !s->c.h263_aic) {
/* DC coef */ /* DC coef */
level = block[0]; level = block[0];
/* 255 cannot be represented, so we clamp */ /* 255 cannot be represented, so we clamp */
@ -443,19 +444,19 @@ static void h263_encode_block(MpegEncContext * s, int16_t * block, int n)
i = 1; i = 1;
} else { } else {
i = 0; i = 0;
if (s->h263_aic && s->mb_intra) if (s->c.h263_aic && s->c.mb_intra)
rl = &ff_rl_intra_aic; rl = &ff_rl_intra_aic;
if(s->alt_inter_vlc && !s->mb_intra){ if(s->c.alt_inter_vlc && !s->c.mb_intra){
int aic_vlc_bits=0; int aic_vlc_bits=0;
int inter_vlc_bits=0; int inter_vlc_bits=0;
int wrong_pos=-1; int wrong_pos=-1;
int aic_code; int aic_code;
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
last_non_zero = i - 1; last_non_zero = i - 1;
for (; i <= last_index; i++) { for (; i <= last_index; i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
if (level) { if (level) {
run = i - last_non_zero - 1; run = i - last_non_zero - 1;
@ -486,10 +487,10 @@ static void h263_encode_block(MpegEncContext * s, int16_t * block, int n)
} }
/* AC coefs */ /* AC coefs */
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
last_non_zero = i - 1; last_non_zero = i - 1;
for (; i <= last_index; i++) { for (; i <= last_index; i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
if (level) { if (level) {
run = i - last_non_zero - 1; run = i - last_non_zero - 1;
@ -503,7 +504,7 @@ static void h263_encode_block(MpegEncContext * s, int16_t * block, int n)
code = get_rl_index(rl, last, run, level); code = get_rl_index(rl, last, run, level);
put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
if (code == rl->n) { if (code == rl->n) {
if(!CONFIG_FLV_ENCODER || s->h263_flv <= 1){ if(!CONFIG_FLV_ENCODER || s->c.h263_flv <= 1){
put_bits(&s->pb, 1, last); put_bits(&s->pb, 1, last);
put_bits(&s->pb, 6, run); put_bits(&s->pb, 6, run);
@ -565,22 +566,22 @@ static void h263p_encode_umotion(PutBitContext *pb, int val)
} }
} }
static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr) static int h263_pred_dc(MPVEncContext *const s, int n, int16_t **dc_val_ptr)
{ {
int x, y, wrap, a, c, pred_dc; int x, y, wrap, a, c, pred_dc;
int16_t *dc_val; int16_t *dc_val;
/* find prediction */ /* find prediction */
if (n < 4) { if (n < 4) {
x = 2 * s->mb_x + (n & 1); x = 2 * s->c.mb_x + (n & 1);
y = 2 * s->mb_y + ((n & 2) >> 1); y = 2 * s->c.mb_y + ((n & 2) >> 1);
wrap = s->b8_stride; wrap = s->c.b8_stride;
dc_val = s->dc_val[0]; dc_val = s->c.dc_val[0];
} else { } else {
x = s->mb_x; x = s->c.mb_x;
y = s->mb_y; y = s->c.mb_y;
wrap = s->mb_stride; wrap = s->c.mb_stride;
dc_val = s->dc_val[n - 4 + 1]; dc_val = s->c.dc_val[n - 4 + 1];
} }
/* B C /* B C
* A X * A X
@ -589,9 +590,9 @@ static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
c = dc_val[(x) + (y - 1) * wrap]; c = dc_val[(x) + (y - 1) * wrap];
/* No prediction outside GOB boundary */ /* No prediction outside GOB boundary */
if (s->first_slice_line && n != 3) { if (s->c.first_slice_line && n != 3) {
if (n != 2) c = 1024; if (n != 2) c = 1024;
if (n != 1 && s->mb_x == s->resync_mb_x) a = 1024; if (n != 1 && s->c.mb_x == s->c.resync_mb_x) a = 1024;
} }
/* just DC prediction */ /* just DC prediction */
if (a != 1024 && c != 1024) if (a != 1024 && c != 1024)
@ -606,7 +607,7 @@ static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
return pred_dc; return pred_dc;
} }
static void h263_encode_mb(MpegEncContext *const s, static void h263_encode_mb(MPVEncContext *const s,
int16_t block[][64], int16_t block[][64],
int motion_x, int motion_y) int motion_x, int motion_y)
{ {
@ -614,13 +615,13 @@ static void h263_encode_mb(MpegEncContext *const s,
int16_t pred_dc; int16_t pred_dc;
int16_t rec_intradc[6]; int16_t rec_intradc[6];
int16_t *dc_ptr[6]; int16_t *dc_ptr[6];
const int interleaved_stats = s->avctx->flags & AV_CODEC_FLAG_PASS1; const int interleaved_stats = s->c.avctx->flags & AV_CODEC_FLAG_PASS1;
if (!s->mb_intra) { if (!s->c.mb_intra) {
/* compute cbp */ /* compute cbp */
cbp= get_p_cbp(s, block, motion_x, motion_y); cbp= get_p_cbp(s, block, motion_x, motion_y);
if ((cbp | motion_x | motion_y | s->dquant | (s->mv_type - MV_TYPE_16X16)) == 0) { if ((cbp | motion_x | motion_y | s->dquant | (s->c.mv_type - MV_TYPE_16X16)) == 0) {
/* skip macroblock */ /* skip macroblock */
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
if(interleaved_stats){ if(interleaved_stats){
@ -634,10 +635,10 @@ static void h263_encode_mb(MpegEncContext *const s,
cbpc = cbp & 3; cbpc = cbp & 3;
cbpy = cbp >> 2; cbpy = cbp >> 2;
if(s->alt_inter_vlc==0 || cbpc!=3) if(s->c.alt_inter_vlc==0 || cbpc!=3)
cbpy ^= 0xF; cbpy ^= 0xF;
if(s->dquant) cbpc+= 8; if(s->dquant) cbpc+= 8;
if(s->mv_type==MV_TYPE_16X16){ if(s->c.mv_type==MV_TYPE_16X16){
put_bits(&s->pb, put_bits(&s->pb,
ff_h263_inter_MCBPC_bits[cbpc], ff_h263_inter_MCBPC_bits[cbpc],
ff_h263_inter_MCBPC_code[cbpc]); ff_h263_inter_MCBPC_code[cbpc]);
@ -651,9 +652,9 @@ static void h263_encode_mb(MpegEncContext *const s,
} }
/* motion vectors: 16x16 mode */ /* motion vectors: 16x16 mode */
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
if (!s->umvplus) { if (!s->c.umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x, ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1); motion_y - pred_y, 1);
} }
@ -678,11 +679,11 @@ static void h263_encode_mb(MpegEncContext *const s,
for(i=0; i<4; i++){ for(i=0; i<4; i++){
/* motion vectors: 8x8 mode*/ /* motion vectors: 8x8 mode*/
ff_h263_pred_motion(s, i, 0, &pred_x, &pred_y); ff_h263_pred_motion(&s->c, i, 0, &pred_x, &pred_y);
motion_x = s->cur_pic.motion_val[0][s->block_index[i]][0]; motion_x = s->c.cur_pic.motion_val[0][s->c.block_index[i]][0];
motion_y = s->cur_pic.motion_val[0][s->block_index[i]][1]; motion_y = s->c.cur_pic.motion_val[0][s->c.block_index[i]][1];
if (!s->umvplus) { if (!s->c.umvplus) {
ff_h263_encode_motion_vector(s, motion_x - pred_x, ff_h263_encode_motion_vector(s, motion_x - pred_x,
motion_y - pred_y, 1); motion_y - pred_y, 1);
} }
@ -700,17 +701,14 @@ static void h263_encode_mb(MpegEncContext *const s,
s->mv_bits+= get_bits_diff(s); s->mv_bits+= get_bits_diff(s);
} }
} else { } else {
av_assert2(s->mb_intra); av_assert2(s->c.mb_intra);
cbp = 0; cbp = 0;
if (s->h263_aic) { if (s->c.h263_aic) {
/* Predict DC */ /* Predict DC */
for(i=0; i<6; i++) { for(i=0; i<6; i++) {
int16_t level = block[i][0]; int16_t level = block[i][0];
int scale; int scale = i < 4 ? s->c.y_dc_scale : s->c.c_dc_scale;
if(i<4) scale= s->y_dc_scale;
else scale= s->c_dc_scale;
pred_dc = h263_pred_dc(s, i, &dc_ptr[i]); pred_dc = h263_pred_dc(s, i, &dc_ptr[i]);
level -= pred_dc; level -= pred_dc;
@ -720,7 +718,7 @@ static void h263_encode_mb(MpegEncContext *const s,
else else
level = (level - (scale>>1))/scale; level = (level - (scale>>1))/scale;
if(!s->modified_quant){ if (!s->c.modified_quant) {
if (level < -127) if (level < -127)
level = -127; level = -127;
else if (level > 127) else if (level > 127)
@ -743,20 +741,20 @@ static void h263_encode_mb(MpegEncContext *const s,
/* Update AC/DC tables */ /* Update AC/DC tables */
*dc_ptr[i] = rec_intradc[i]; *dc_ptr[i] = rec_intradc[i];
/* AIC can change CBP */ /* AIC can change CBP */
if (s->block_last_index[i] > 0 || if (s->c.block_last_index[i] > 0 ||
(s->block_last_index[i] == 0 && level !=0)) (s->c.block_last_index[i] == 0 && level !=0))
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
} }
}else{ }else{
for(i=0; i<6; i++) { for(i=0; i<6; i++) {
/* compute cbp */ /* compute cbp */
if (s->block_last_index[i] >= 1) if (s->c.block_last_index[i] >= 1)
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
} }
} }
cbpc = cbp & 3; cbpc = cbp & 3;
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
if(s->dquant) cbpc+=4; if(s->dquant) cbpc+=4;
put_bits(&s->pb, put_bits(&s->pb,
ff_h263_intra_MCBPC_bits[cbpc], ff_h263_intra_MCBPC_bits[cbpc],
@ -768,7 +766,7 @@ static void h263_encode_mb(MpegEncContext *const s,
ff_h263_inter_MCBPC_bits[cbpc + 4], ff_h263_inter_MCBPC_bits[cbpc + 4],
ff_h263_inter_MCBPC_code[cbpc + 4]); ff_h263_inter_MCBPC_code[cbpc + 4]);
} }
if (s->h263_aic) { if (s->c.h263_aic) {
/* XXX: currently, we do not try to use ac prediction */ /* XXX: currently, we do not try to use ac prediction */
put_bits(&s->pb, 1, 0); /* no AC prediction */ put_bits(&s->pb, 1, 0); /* no AC prediction */
} }
@ -787,14 +785,12 @@ static void h263_encode_mb(MpegEncContext *const s,
h263_encode_block(s, block[i], i); h263_encode_block(s, block[i], i);
/* Update INTRADC for decoding */ /* Update INTRADC for decoding */
if (s->h263_aic && s->mb_intra) { if (s->c.h263_aic && s->c.mb_intra)
block[i][0] = rec_intradc[i]; block[i][0] = rec_intradc[i];
}
} }
if(interleaved_stats){ if(interleaved_stats){
if (!s->mb_intra) { if (!s->c.mb_intra) {
s->p_tex_bits+= get_bits_diff(s); s->p_tex_bits+= get_bits_diff(s);
}else{ }else{
s->i_tex_bits+= get_bits_diff(s); s->i_tex_bits+= get_bits_diff(s);
@ -803,54 +799,54 @@ static void h263_encode_mb(MpegEncContext *const s,
} }
} }
void ff_h263_update_mb(MpegEncContext *s) void ff_h263_update_mb(MPVEncContext *const s)
{ {
const int mb_xy = s->mb_y * s->mb_stride + s->mb_x; const int mb_xy = s->c.mb_y * s->c.mb_stride + s->c.mb_x;
if (s->cur_pic.mbskip_table) if (s->c.cur_pic.mbskip_table)
s->cur_pic.mbskip_table[mb_xy] = s->mb_skipped; s->c.cur_pic.mbskip_table[mb_xy] = s->c.mb_skipped;
if (s->mv_type == MV_TYPE_8X8) if (s->c.mv_type == MV_TYPE_8X8)
s->cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_8x8; s->c.cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_8x8;
else if(s->mb_intra) else if(s->c.mb_intra)
s->cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA; s->c.cur_pic.mb_type[mb_xy] = MB_TYPE_INTRA;
else else
s->cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_16x16; s->c.cur_pic.mb_type[mb_xy] = MB_TYPE_FORWARD_MV | MB_TYPE_16x16;
ff_h263_update_motion_val(s); ff_h263_update_motion_val(&s->c);
} }
av_cold void ff_h263_encode_init(MPVMainEncContext *const m) av_cold void ff_h263_encode_init(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
s->me.mv_penalty = ff_h263_get_mv_penalty(); // FIXME exact table for MSMPEG4 & H.263+ s->c.me.mv_penalty = ff_h263_get_mv_penalty(); // FIXME exact table for MSMPEG4 & H.263+
ff_h263dsp_init(&s->h263dsp); ff_h263dsp_init(&s->c.h263dsp);
if (s->codec_id == AV_CODEC_ID_MPEG4) if (s->c.codec_id == AV_CODEC_ID_MPEG4)
return; return;
s->intra_ac_vlc_length =s->inter_ac_vlc_length = uni_h263_inter_rl_len; s->intra_ac_vlc_length =s->inter_ac_vlc_length = uni_h263_inter_rl_len;
s->intra_ac_vlc_last_length=s->inter_ac_vlc_last_length= uni_h263_inter_rl_len + 128*64; s->intra_ac_vlc_last_length=s->inter_ac_vlc_last_length= uni_h263_inter_rl_len + 128*64;
if(s->h263_aic){ if (s->c.h263_aic) {
s->intra_ac_vlc_length = uni_h263_intra_aic_rl_len; s->intra_ac_vlc_length = uni_h263_intra_aic_rl_len;
s->intra_ac_vlc_last_length= uni_h263_intra_aic_rl_len + 128*64; s->intra_ac_vlc_last_length= uni_h263_intra_aic_rl_len + 128*64;
s->y_dc_scale_table = s->c.y_dc_scale_table =
s->c_dc_scale_table = ff_aic_dc_scale_table; s->c.c_dc_scale_table = ff_aic_dc_scale_table;
} }
s->ac_esc_length= 7+1+6+8; s->ac_esc_length= 7+1+6+8;
if (s->modified_quant) if (s->c.modified_quant)
s->chroma_qscale_table = ff_h263_chroma_qscale_table; s->c.chroma_qscale_table = ff_h263_chroma_qscale_table;
// use fcodes >1 only for MPEG-4 & H.263 & H.263+ FIXME // use fcodes >1 only for MPEG-4 & H.263 & H.263+ FIXME
switch(s->codec_id){ switch(s->c.codec_id){
case AV_CODEC_ID_H263P: case AV_CODEC_ID_H263P:
if(s->umvplus) if (s->c.umvplus)
m->fcode_tab = umv_fcode_tab + MAX_MV; m->fcode_tab = umv_fcode_tab + MAX_MV;
if(s->modified_quant){ if (s->c.modified_quant) {
s->min_qcoeff= -2047; s->min_qcoeff= -2047;
s->max_qcoeff= 2047; s->max_qcoeff= 2047;
}else{ }else{
@ -861,7 +857,7 @@ av_cold void ff_h263_encode_init(MPVMainEncContext *const m)
// Note for MPEG-4 & H.263 the dc-scale table will be set per frame as needed later // Note for MPEG-4 & H.263 the dc-scale table will be set per frame as needed later
case AV_CODEC_ID_FLV1: case AV_CODEC_ID_FLV1:
m->encode_picture_header = ff_flv_encode_picture_header; m->encode_picture_header = ff_flv_encode_picture_header;
if (s->h263_flv > 1) { if (s->c.h263_flv > 1) {
s->min_qcoeff= -1023; s->min_qcoeff= -1023;
s->max_qcoeff= 1023; s->max_qcoeff= 1023;
} else { } else {
@ -880,14 +876,14 @@ av_cold void ff_h263_encode_init(MPVMainEncContext *const m)
s->encode_mb = h263_encode_mb; s->encode_mb = h263_encode_mb;
} }
void ff_h263_encode_mba(MpegEncContext *s) void ff_h263_encode_mba(MPVEncContext *const s)
{ {
int i, mb_pos; int i, mb_pos;
for(i=0; i<6; i++){ for(i=0; i<6; i++){
if(s->mb_num-1 <= ff_mba_max[i]) break; if(s->c.mb_num-1 <= ff_mba_max[i]) break;
} }
mb_pos= s->mb_x + s->mb_width*s->mb_y; mb_pos= s->c.mb_x + s->c.mb_width*s->c.mb_y;
put_bits(&s->pb, ff_mba_length[i], mb_pos); put_bits(&s->pb, ff_mba_length[i], mb_pos);
} }
@ -895,7 +891,7 @@ void ff_h263_encode_mba(MpegEncContext *s)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption h263_options[] = { static const AVOption h263_options[] = {
{ "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "obmc", "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
{ "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE }, { "mb_info", "emit macroblock info for RFC 2190 packetization, the parameter value is the maximum payload size", FF_MPV_OFFSET(mb_info), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
FF_MPV_COMMON_OPTS FF_MPV_COMMON_OPTS
FF_MPV_COMMON_MOTION_EST_OPTS FF_MPV_COMMON_MOTION_EST_OPTS
{ NULL }, { NULL },

View File

@ -69,7 +69,7 @@ const uint32_t ff_square_tab[512] = {
57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025, 57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
}; };
static int sse4_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sse4_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -86,7 +86,7 @@ static int sse4_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
return s; return s;
} }
static int sse8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sse8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -107,7 +107,7 @@ static int sse8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
return s; return s;
} }
static int sse16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sse16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -149,7 +149,7 @@ static int sum_abs_dctelem_c(const int16_t *block)
#define avg2(a, b) (((a) + (b) + 1) >> 1) #define avg2(a, b) (((a) + (b) + 1) >> 1)
#define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2) #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
static inline int pix_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static inline int pix_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -177,7 +177,7 @@ static inline int pix_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint
return s; return s;
} }
static inline int pix_median_abs16_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static inline int pix_median_abs16_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i, j; int s = 0, i, j;
@ -216,7 +216,7 @@ static inline int pix_median_abs16_c(MpegEncContext *v, const uint8_t *pix1, con
return s; return s;
} }
static int pix_abs16_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs16_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -244,7 +244,7 @@ static int pix_abs16_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t
return s; return s;
} }
static int pix_abs16_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs16_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -274,7 +274,7 @@ static int pix_abs16_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t
return s; return s;
} }
static int pix_abs16_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs16_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -304,7 +304,7 @@ static int pix_abs16_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t
return s; return s;
} }
static inline int pix_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static inline int pix_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -324,7 +324,7 @@ static inline int pix_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8
return s; return s;
} }
static inline int pix_median_abs8_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static inline int pix_median_abs8_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i, j; int s = 0, i, j;
@ -355,7 +355,7 @@ static inline int pix_median_abs8_c(MpegEncContext *v, const uint8_t *pix1, cons
return s; return s;
} }
static int pix_abs8_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs8_x2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -375,7 +375,7 @@ static int pix_abs8_x2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *
return s; return s;
} }
static int pix_abs8_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs8_y2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -397,7 +397,7 @@ static int pix_abs8_y2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *
return s; return s;
} }
static int pix_abs8_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int pix_abs8_xy2_c(MPVEncContext *unused, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int s = 0, i; int s = 0, i;
@ -419,7 +419,7 @@ static int pix_abs8_xy2_c(MpegEncContext *v, const uint8_t *pix1, const uint8_t
return s; return s;
} }
static int nsse16_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, static int nsse16_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int score1 = 0, score2 = 0, x, y; int score1 = 0, score2 = 0, x, y;
@ -439,12 +439,12 @@ static int nsse16_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
} }
if (c) if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight; return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
else else
return score1 + FFABS(score2) * 8; return score1 + FFABS(score2) * 8;
} }
static int nsse8_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, static int nsse8_c(MPVEncContext *const c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int score1 = 0, score2 = 0, x, y; int score1 = 0, score2 = 0, x, y;
@ -464,12 +464,12 @@ static int nsse8_c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2,
} }
if (c) if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight; return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
else else
return score1 + FFABS(score2) * 8; return score1 + FFABS(score2) * 8;
} }
static int zero_cmp(MpegEncContext *s, const uint8_t *a, const uint8_t *b, static int zero_cmp(MPVEncContext *s, const uint8_t *a, const uint8_t *b,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return 0; return 0;
@ -546,7 +546,7 @@ av_cold int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int type, int mp
#define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y))) #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
static int hadamard8_diff8x8_c(MpegEncContext *s, const uint8_t *dst, static int hadamard8_diff8x8_c(MPVEncContext *unused, const uint8_t *dst,
const uint8_t *src, ptrdiff_t stride, int h) const uint8_t *src, ptrdiff_t stride, int h)
{ {
int i, temp[64], sum = 0; int i, temp[64], sum = 0;
@ -596,7 +596,7 @@ static int hadamard8_diff8x8_c(MpegEncContext *s, const uint8_t *dst,
return sum; return sum;
} }
static int hadamard8_intra8x8_c(MpegEncContext *s, const uint8_t *src, static int hadamard8_intra8x8_c(MPVEncContext *unused, const uint8_t *src,
const uint8_t *dummy, ptrdiff_t stride, int h) const uint8_t *dummy, ptrdiff_t stride, int h)
{ {
int i, temp[64], sum = 0; int i, temp[64], sum = 0;
@ -646,7 +646,7 @@ static int hadamard8_intra8x8_c(MpegEncContext *s, const uint8_t *src,
return sum; return sum;
} }
static int dct_sad8x8_c(MpegEncContext *s, const uint8_t *src1, static int dct_sad8x8_c(MPVEncContext *const s, const uint8_t *src1,
const uint8_t *src2, ptrdiff_t stride, int h) const uint8_t *src2, ptrdiff_t stride, int h)
{ {
LOCAL_ALIGNED_16(int16_t, temp, [64]); LOCAL_ALIGNED_16(int16_t, temp, [64]);
@ -685,7 +685,7 @@ static int dct_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
DST(7, (a4 >> 2) - a7); \ DST(7, (a4 >> 2) - a7); \
} }
static int dct264_sad8x8_c(MpegEncContext *s, const uint8_t *src1, static int dct264_sad8x8_c(MPVEncContext *const s, const uint8_t *src1,
const uint8_t *src2, ptrdiff_t stride, int h) const uint8_t *src2, ptrdiff_t stride, int h)
{ {
int16_t dct[8][8]; int16_t dct[8][8];
@ -710,7 +710,7 @@ static int dct264_sad8x8_c(MpegEncContext *s, const uint8_t *src1,
} }
#endif #endif
static int dct_max8x8_c(MpegEncContext *s, const uint8_t *src1, static int dct_max8x8_c(MPVEncContext *const s, const uint8_t *src1,
const uint8_t *src2, ptrdiff_t stride, int h) const uint8_t *src2, ptrdiff_t stride, int h)
{ {
LOCAL_ALIGNED_16(int16_t, temp, [64]); LOCAL_ALIGNED_16(int16_t, temp, [64]);
@ -725,22 +725,22 @@ static int dct_max8x8_c(MpegEncContext *s, const uint8_t *src1,
return sum; return sum;
} }
static int quant_psnr8x8_c(MpegEncContext *s, const uint8_t *src1, static int quant_psnr8x8_c(MPVEncContext *const s, const uint8_t *src1,
const uint8_t *src2, ptrdiff_t stride, int h) const uint8_t *src2, ptrdiff_t stride, int h)
{ {
LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]); LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
int16_t *const bak = temp + 64; int16_t *const bak = temp + 64;
int sum = 0, i; int sum = 0, i;
s->mb_intra = 0; s->c.mb_intra = 0;
s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride); s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
memcpy(bak, temp, 64 * sizeof(int16_t)); memcpy(bak, temp, 64 * sizeof(int16_t));
s->block_last_index[0 /* FIXME */] = s->c.block_last_index[0 /* FIXME */] =
s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i); s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
s->dct_unquantize_inter(s, temp, 0, s->qscale); s->c.dct_unquantize_inter(&s->c, temp, 0, s->c.qscale);
ff_simple_idct_int16_8bit(temp); // FIXME ff_simple_idct_int16_8bit(temp); // FIXME
for (i = 0; i < 64; i++) for (i = 0; i < 64; i++)
@ -749,10 +749,10 @@ static int quant_psnr8x8_c(MpegEncContext *s, const uint8_t *src1,
return sum; return sum;
} }
static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, static int rd8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
const uint8_t *scantable = s->intra_scantable.permutated; const uint8_t *scantable = s->c.intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]); LOCAL_ALIGNED_16(int16_t, temp, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]); LOCAL_ALIGNED_16(uint8_t, lsrc1, [64]);
LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]); LOCAL_ALIGNED_16(uint8_t, lsrc2, [64]);
@ -765,13 +765,13 @@ static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8); s->pdsp.diff_pixels(temp, lsrc1, lsrc2, 8);
s->block_last_index[0 /* FIXME */] = s->c.block_last_index[0 /* FIXME */] =
last = last =
s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i); s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
bits = 0; bits = 0;
if (s->mb_intra) { if (s->c.mb_intra) {
start_i = 1; start_i = 1;
length = s->intra_ac_vlc_length; length = s->intra_ac_vlc_length;
last_length = s->intra_ac_vlc_last_length; last_length = s->intra_ac_vlc_last_length;
@ -811,23 +811,23 @@ static int rd8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
} }
if (last >= 0) { if (last >= 0) {
if (s->mb_intra) if (s->c.mb_intra)
s->dct_unquantize_intra(s, temp, 0, s->qscale); s->c.dct_unquantize_intra(&s->c, temp, 0, s->c.qscale);
else else
s->dct_unquantize_inter(s, temp, 0, s->qscale); s->c.dct_unquantize_inter(&s->c, temp, 0, s->c.qscale);
} }
s->idsp.idct_add(lsrc2, 8, temp); s->c.idsp.idct_add(lsrc2, 8, temp);
distortion = s->sse_cmp[1](NULL, lsrc2, lsrc1, 8, 8); distortion = s->sse_cmp[1](NULL, lsrc2, lsrc1, 8, 8);
return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7); return distortion + ((bits * s->c.qscale * s->c.qscale * 109 + 64) >> 7);
} }
static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2, static int bit8x8_c(MPVEncContext *const s, const uint8_t *src1, const uint8_t *src2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
const uint8_t *scantable = s->intra_scantable.permutated; const uint8_t *scantable = s->c.intra_scantable.permutated;
LOCAL_ALIGNED_16(int16_t, temp, [64]); LOCAL_ALIGNED_16(int16_t, temp, [64]);
int i, last, run, bits, level, start_i; int i, last, run, bits, level, start_i;
const int esc_length = s->ac_esc_length; const int esc_length = s->ac_esc_length;
@ -835,13 +835,13 @@ static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride); s->pdsp.diff_pixels_unaligned(temp, src1, src2, stride);
s->block_last_index[0 /* FIXME */] = s->c.block_last_index[0 /* FIXME */] =
last = last =
s->dct_quantize(s, temp, 0 /* FIXME */, s->qscale, &i); s->dct_quantize(s, temp, 0 /* FIXME */, s->c.qscale, &i);
bits = 0; bits = 0;
if (s->mb_intra) { if (s->c.mb_intra) {
start_i = 1; start_i = 1;
length = s->intra_ac_vlc_length; length = s->intra_ac_vlc_length;
last_length = s->intra_ac_vlc_last_length; last_length = s->intra_ac_vlc_last_length;
@ -884,7 +884,7 @@ static int bit8x8_c(MpegEncContext *s, const uint8_t *src1, const uint8_t *src2,
} }
#define VSAD_INTRA(size) \ #define VSAD_INTRA(size) \
static int vsad_intra ## size ## _c(MpegEncContext *c, \ static int vsad_intra ## size ## _c(MPVEncContext *unused, \
const uint8_t *s, const uint8_t *dummy, \ const uint8_t *s, const uint8_t *dummy, \
ptrdiff_t stride, int h) \ ptrdiff_t stride, int h) \
{ \ { \
@ -906,7 +906,7 @@ VSAD_INTRA(8)
VSAD_INTRA(16) VSAD_INTRA(16)
#define VSAD(size) \ #define VSAD(size) \
static int vsad ## size ## _c(MpegEncContext *c, \ static int vsad ## size ## _c(MPVEncContext *unused, \
const uint8_t *s1, const uint8_t *s2, \ const uint8_t *s1, const uint8_t *s2, \
ptrdiff_t stride, int h) \ ptrdiff_t stride, int h) \
{ \ { \
@ -926,7 +926,7 @@ VSAD(16)
#define SQ(a) ((a) * (a)) #define SQ(a) ((a) * (a))
#define VSSE_INTRA(size) \ #define VSSE_INTRA(size) \
static int vsse_intra ## size ## _c(MpegEncContext *c, \ static int vsse_intra ## size ## _c(MPVEncContext *unused, \
const uint8_t *s, const uint8_t *dummy, \ const uint8_t *s, const uint8_t *dummy, \
ptrdiff_t stride, int h) \ ptrdiff_t stride, int h) \
{ \ { \
@ -948,8 +948,8 @@ VSSE_INTRA(8)
VSSE_INTRA(16) VSSE_INTRA(16)
#define VSSE(size) \ #define VSSE(size) \
static int vsse ## size ## _c(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, \ static int vsse ## size ## _c(MPVEncContext *unused, const uint8_t *s1, \
ptrdiff_t stride, int h) \ const uint8_t *s2, ptrdiff_t stride, int h) \
{ \ { \
int score = 0, x, y; \ int score = 0, x, y; \
\ \
@ -966,8 +966,8 @@ VSSE(8)
VSSE(16) VSSE(16)
#define WRAPPER8_16_SQ(name8, name16) \ #define WRAPPER8_16_SQ(name8, name16) \
static int name16(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, \ static int name16(MPVEncContext *const s, const uint8_t *dst, \
ptrdiff_t stride, int h) \ const uint8_t *src, ptrdiff_t stride, int h) \
{ \ { \
int score = 0; \ int score = 0; \
\ \

View File

@ -41,13 +41,13 @@ EXTERN const uint32_t ff_square_tab[512];
* !future video codecs might need functions with less strict alignment * !future video codecs might need functions with less strict alignment
*/ */
struct MpegEncContext; typedef struct MPVEncContext MPVEncContext;
/* Motion estimation: /* Motion estimation:
* h is limited to { width / 2, width, 2 * width }, * h is limited to { width / 2, width, 2 * width },
* but never larger than 16 and never smaller than 2. * but never larger than 16 and never smaller than 2.
* Although currently h < 4 is not used as functions with * Although currently h < 4 is not used as functions with
* width < 8 are neither used nor implemented. */ * width < 8 are neither used nor implemented. */
typedef int (*me_cmp_func)(struct MpegEncContext *c, typedef int (*me_cmp_func)(MPVEncContext *c,
const uint8_t *blk1 /* align width (8 or 16) */, const uint8_t *blk1 /* align width (8 or 16) */,
const uint8_t *blk2 /* align 1 */, ptrdiff_t stride, const uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
int h); int h);
@ -86,7 +86,7 @@ void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx);
* Fill the function pointer array cmp[6] with me_cmp_funcs from * Fill the function pointer array cmp[6] with me_cmp_funcs from
* c based upon type. If mpvenc is not set, an error is returned * c based upon type. If mpvenc is not set, an error is returned
* if the type of comparison functions requires an initialized * if the type of comparison functions requires an initialized
* MpegEncContext. * MPVEncContext.
*/ */
int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp, int ff_set_cmp(const MECmpContext *c, me_cmp_func *cmp,
int type, int mpvenc); int type, int mpvenc);

View File

@ -21,38 +21,38 @@
#ifndef AVCODEC_MIPS_ME_CMP_MIPS_H #ifndef AVCODEC_MIPS_ME_CMP_MIPS_H
#define AVCODEC_MIPS_ME_CMP_MIPS_H #define AVCODEC_MIPS_ME_CMP_MIPS_H
#include "../mpegvideo.h" #include "../mpegvideoenc.h"
#include "libavcodec/bit_depth_template.c" #include "libavcodec/bit_depth_template.c"
int ff_hadamard8_diff8x8_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, int ff_hadamard8_diff8x8_msa(MPVEncContext *s, const uint8_t *dst, const uint8_t *src,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_hadamard8_intra8x8_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, int ff_hadamard8_intra8x8_msa(MPVEncContext *s, const uint8_t *dst, const uint8_t *src,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_hadamard8_diff16_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, int ff_hadamard8_diff16_msa(MPVEncContext *s, const uint8_t *dst, const uint8_t *src,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_hadamard8_intra16_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, int ff_hadamard8_intra16_msa(MPVEncContext *s, const uint8_t *dst, const uint8_t *src,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_x2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_y2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_xy2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_x2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_y2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_xy2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse16_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref, int ff_sse16_msa(MPVEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
ptrdiff_t stride, int i32Height); ptrdiff_t stride, int i32Height);
int ff_sse8_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref, int ff_sse8_msa(MPVEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
ptrdiff_t stride, int i32Height); ptrdiff_t stride, int i32Height);
int ff_sse4_msa(MpegEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref, int ff_sse4_msa(MPVEncContext *v, const uint8_t *pu8Src, const uint8_t *pu8Ref,
ptrdiff_t stride, int i32Height); ptrdiff_t stride, int i32Height);
void ff_add_pixels8_msa(const uint8_t *restrict pixels, int16_t *block, void ff_add_pixels8_msa(const uint8_t *restrict pixels, int16_t *block,
ptrdiff_t stride); ptrdiff_t stride);

View File

@ -732,79 +732,79 @@ static int32_t hadamard_intra_8x8_msa(const uint8_t *src, int32_t src_stride,
return sum_res; return sum_res;
} }
int ff_pix_abs16_msa(MpegEncContext *v, const uint8_t *src, const uint8_t *ref, int ff_pix_abs16_msa(MPVEncContext *v, const uint8_t *src, const uint8_t *ref,
ptrdiff_t stride, int height) ptrdiff_t stride, int height)
{ {
return sad_16width_msa(src, stride, ref, stride, height); return sad_16width_msa(src, stride, ref, stride, height);
} }
int ff_pix_abs8_msa(MpegEncContext *v, const uint8_t *src, const uint8_t *ref, int ff_pix_abs8_msa(MPVEncContext *v, const uint8_t *src, const uint8_t *ref,
ptrdiff_t stride, int height) ptrdiff_t stride, int height)
{ {
return sad_8width_msa(src, stride, ref, stride, height); return sad_8width_msa(src, stride, ref, stride, height);
} }
int ff_pix_abs16_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_x2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_horiz_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h); return sad_horiz_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
} }
int ff_pix_abs16_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_y2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_vert_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h); return sad_vert_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
} }
int ff_pix_abs16_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_xy2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_hv_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h); return sad_hv_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
} }
int ff_pix_abs8_x2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_x2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_horiz_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h); return sad_horiz_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
} }
int ff_pix_abs8_y2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_y2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_vert_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h); return sad_vert_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
} }
int ff_pix_abs8_xy2_msa(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_xy2_msa(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return sad_hv_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h); return sad_hv_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
} }
int ff_sse16_msa(MpegEncContext *v, const uint8_t *src, const uint8_t *ref, int ff_sse16_msa(MPVEncContext *v, const uint8_t *src, const uint8_t *ref,
ptrdiff_t stride, int height) ptrdiff_t stride, int height)
{ {
return sse_16width_msa(src, stride, ref, stride, height); return sse_16width_msa(src, stride, ref, stride, height);
} }
int ff_sse8_msa(MpegEncContext *v, const uint8_t *src, const uint8_t *ref, int ff_sse8_msa(MPVEncContext *v, const uint8_t *src, const uint8_t *ref,
ptrdiff_t stride, int height) ptrdiff_t stride, int height)
{ {
return sse_8width_msa(src, stride, ref, stride, height); return sse_8width_msa(src, stride, ref, stride, height);
} }
int ff_sse4_msa(MpegEncContext *v, const uint8_t *src, const uint8_t *ref, int ff_sse4_msa(MPVEncContext *v, const uint8_t *src, const uint8_t *ref,
ptrdiff_t stride, int height) ptrdiff_t stride, int height)
{ {
return sse_4width_msa(src, stride, ref, stride, height); return sse_4width_msa(src, stride, ref, stride, height);
} }
int ff_hadamard8_diff8x8_msa(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, int ff_hadamard8_diff8x8_msa(MPVEncContext *s, const uint8_t *dst, const uint8_t *src,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return hadamard_diff_8x8_msa(src, stride, dst, stride); return hadamard_diff_8x8_msa(src, stride, dst, stride);
} }
int ff_hadamard8_intra8x8_msa(MpegEncContext *s, const uint8_t *src, const uint8_t *dummy, int ff_hadamard8_intra8x8_msa(MPVEncContext *s, const uint8_t *src, const uint8_t *dummy,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
return hadamard_intra_8x8_msa(src, stride, dummy, stride); return hadamard_intra_8x8_msa(src, stride, dummy, stride);
@ -812,7 +812,7 @@ int ff_hadamard8_intra8x8_msa(MpegEncContext *s, const uint8_t *src, const uint8
/* Hadamard Transform functions */ /* Hadamard Transform functions */
#define WRAPPER8_16_SQ(name8, name16) \ #define WRAPPER8_16_SQ(name8, name16) \
int name16(MpegEncContext *s, const uint8_t *dst, const uint8_t *src, \ int name16(MPVEncContext *s, const uint8_t *dst, const uint8_t *src, \
ptrdiff_t stride, int h) \ ptrdiff_t stride, int h) \
{ \ { \
int score = 0; \ int score = 0; \

View File

@ -22,6 +22,7 @@
#define AVCODEC_MIPS_MPEGVIDEO_MIPS_H #define AVCODEC_MIPS_MPEGVIDEO_MIPS_H
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideo.h"
#include "libavcodec/mpegvideoenc.h"
void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block, void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
int n, int qscale); int n, int qscale);
@ -33,6 +34,6 @@ void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
int n, int qscale); int n, int qscale);
void ff_dct_unquantize_mpeg2_intra_mmi(MpegEncContext *s, int16_t *block, void ff_dct_unquantize_mpeg2_intra_mmi(MpegEncContext *s, int16_t *block,
int n, int qscale); int n, int qscale);
void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block); void ff_denoise_dct_mmi(MPVEncContext *s, int16_t *block);
#endif /* AVCODEC_MIPS_MPEGVIDEO_MIPS_H */ #endif /* AVCODEC_MIPS_MPEGVIDEO_MIPS_H */

View File

@ -23,7 +23,7 @@
#include "libavcodec/mpegvideoenc.h" #include "libavcodec/mpegvideoenc.h"
#include "mpegvideo_mips.h" #include "mpegvideo_mips.h"
av_cold void ff_mpvenc_dct_init_mips(MpegEncContext *s) av_cold void ff_mpvenc_dct_init_mips(MPVEncContext *s)
{ {
int cpu_flags = av_get_cpu_flags(); int cpu_flags = av_get_cpu_flags();

View File

@ -25,9 +25,9 @@
#include "mpegvideo_mips.h" #include "mpegvideo_mips.h"
#include "libavutil/mips/mmiutils.h" #include "libavutil/mips/mmiutils.h"
void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block) void ff_denoise_dct_mmi(MPVEncContext *s, int16_t *block)
{ {
const int intra = s->mb_intra; const int intra = s->c.mb_intra;
int *sum = s->dct_error_sum[intra]; int *sum = s->dct_error_sum[intra];
uint16_t *offset = s->dct_offset[intra]; uint16_t *offset = s->dct_offset[intra];
double ftmp[8]; double ftmp[8];

View File

@ -21,6 +21,7 @@
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
#include "libavutil/mips/cpu.h" #include "libavutil/mips/cpu.h"
#include "libavcodec/bit_depth_template.c" #include "libavcodec/bit_depth_template.c"
#include "libavcodec/mpegvideoencdsp.h"
#include "h263dsp_mips.h" #include "h263dsp_mips.h"
av_cold void ff_mpegvideoencdsp_init_mips(MpegvideoEncDSPContext *c, av_cold void ff_mpegvideoencdsp_init_mips(MpegvideoEncDSPContext *c,

View File

@ -20,6 +20,7 @@
*/ */
#include "libavutil/mips/cpu.h" #include "libavutil/mips/cpu.h"
#include "libavcodec/pixblockdsp.h"
#include "pixblockdsp_mips.h" #include "pixblockdsp_mips.h"
void ff_pixblockdsp_init_mips(PixblockDSPContext *c, AVCodecContext *avctx, void ff_pixblockdsp_init_mips(PixblockDSPContext *c, AVCodecContext *avctx,

View File

@ -22,7 +22,8 @@
#ifndef AVCODEC_MIPS_PIXBLOCKDSP_MIPS_H #ifndef AVCODEC_MIPS_PIXBLOCKDSP_MIPS_H
#define AVCODEC_MIPS_PIXBLOCKDSP_MIPS_H #define AVCODEC_MIPS_PIXBLOCKDSP_MIPS_H
#include "../mpegvideo.h" #include <stdint.h>
#include <stddef.h>
void ff_diff_pixels_msa(int16_t *restrict block, const uint8_t *src1, void ff_diff_pixels_msa(int16_t *restrict block, const uint8_t *src1,
const uint8_t *src2, ptrdiff_t stride); const uint8_t *src2, ptrdiff_t stride);

View File

@ -61,8 +61,8 @@ typedef struct MJpegHuffmanCode {
/* The following is the private context of MJPEG/AMV decoder. /* The following is the private context of MJPEG/AMV decoder.
* Note that when using slice threading only the main thread's * Note that when using slice threading only the main thread's
* MpegEncContext is followed by a MjpegContext; the other threads * MPVEncContext is followed by a MjpegContext; the other threads
* can access this shared context via MpegEncContext.mjpeg. */ * can access this shared context via MPVEncContext.mjpeg. */
typedef struct MJPEGEncContext { typedef struct MJPEGEncContext {
MPVMainEncContext mpeg; MPVMainEncContext mpeg;
MJpegContext mjpeg; MJpegContext mjpeg;
@ -92,22 +92,22 @@ static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256],
} }
} }
static void mjpeg_encode_picture_header(MpegEncContext *s) static void mjpeg_encode_picture_header(MPVEncContext *const s)
{ {
ff_mjpeg_encode_picture_header(s->avctx, &s->pb, s->cur_pic.ptr->f, s->mjpeg_ctx, ff_mjpeg_encode_picture_header(s->c.avctx, &s->pb, s->c.cur_pic.ptr->f, s->mjpeg_ctx,
s->intra_scantable.permutated, 0, s->c.intra_scantable.permutated, 0,
s->intra_matrix, s->chroma_intra_matrix, s->c.intra_matrix, s->c.chroma_intra_matrix,
s->slice_context_count > 1); s->c.slice_context_count > 1);
s->esc_pos = put_bytes_count(&s->pb, 0); s->esc_pos = put_bytes_count(&s->pb, 0);
for (int i = 1; i < s->slice_context_count; i++) for (int i = 1; i < s->c.slice_context_count; i++)
s->thread_context[i]->esc_pos = 0; s->c.enc_contexts[i]->esc_pos = 0;
} }
static int mjpeg_amv_encode_picture_header(MPVMainEncContext *const m) static int mjpeg_amv_encode_picture_header(MPVMainEncContext *const m)
{ {
MJPEGEncContext *const m2 = (MJPEGEncContext*)m; MJPEGEncContext *const m2 = (MJPEGEncContext*)m;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
av_assert2(s->mjpeg_ctx == &m2->mjpeg); av_assert2(s->mjpeg_ctx == &m2->mjpeg);
/* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */ /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
if (!CONFIG_MJPEG_ENCODER || m2->mjpeg.huffman != HUFFMAN_TABLE_OPTIMAL) if (!CONFIG_MJPEG_ENCODER || m2->mjpeg.huffman != HUFFMAN_TABLE_OPTIMAL)
@ -120,11 +120,11 @@ static int mjpeg_amv_encode_picture_header(MPVMainEncContext *const m)
/** /**
* Encodes and outputs the entire frame in the JPEG format. * Encodes and outputs the entire frame in the JPEG format.
* *
* @param s The MpegEncContext. * @param main The MPVMainEncContext.
*/ */
static void mjpeg_encode_picture_frame(MPVMainEncContext *const main) static void mjpeg_encode_picture_frame(MPVMainEncContext *const main)
{ {
MpegEncContext *const s = &main->s; MPVEncContext *const s = &main->s;
int nbits, code, table_id; int nbits, code, table_id;
MJpegContext *m = s->mjpeg_ctx; MJpegContext *m = s->mjpeg_ctx;
uint8_t *huff_size[4] = { m->huff_size_dc_luminance, uint8_t *huff_size[4] = { m->huff_size_dc_luminance,
@ -232,14 +232,14 @@ static void mjpeg_build_optimal_huffman(MJpegContext *m)
* *
* Header + values + stuffing. * Header + values + stuffing.
* *
* @param s The MpegEncContext. * @param s The MPVEncContext.
* @return int Error code, 0 if successful. * @return int Error code, 0 if successful.
*/ */
int ff_mjpeg_encode_stuffing(MpegEncContext *s) int ff_mjpeg_encode_stuffing(MPVEncContext *const s)
{ {
MJpegContext *const m = s->mjpeg_ctx; MJpegContext *const m = s->mjpeg_ctx;
PutBitContext *pbc = &s->pb; PutBitContext *pbc = &s->pb;
int mb_y = s->mb_y - !s->mb_x; int mb_y = s->c.mb_y - !s->c.mb_x;
int ret; int ret;
#if CONFIG_MJPEG_ENCODER #if CONFIG_MJPEG_ENCODER
@ -267,19 +267,19 @@ int ff_mjpeg_encode_stuffing(MpegEncContext *s)
ret = ff_mpv_reallocate_putbitbuffer(s, put_bits_count(&s->pb) / 8 + 100, ret = ff_mpv_reallocate_putbitbuffer(s, put_bits_count(&s->pb) / 8 + 100,
put_bits_count(&s->pb) / 4 + 1000); put_bits_count(&s->pb) / 4 + 1000);
if (ret < 0) { if (ret < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Buffer reallocation failed\n"); av_log(s->c.avctx, AV_LOG_ERROR, "Buffer reallocation failed\n");
goto fail; goto fail;
} }
ff_mjpeg_escape_FF(pbc, s->esc_pos); ff_mjpeg_escape_FF(pbc, s->esc_pos);
if (s->slice_context_count > 1 && mb_y < s->mb_height - 1) if (s->c.slice_context_count > 1 && mb_y < s->c.mb_height - 1)
put_marker(pbc, RST0 + (mb_y&7)); put_marker(pbc, RST0 + (mb_y&7));
s->esc_pos = put_bytes_count(pbc, 0); s->esc_pos = put_bytes_count(pbc, 0);
fail: fail:
for (int i = 0; i < 3; i++) for (int i = 0; i < 3; i++)
s->last_dc[i] = 128 << s->intra_dc_precision; s->c.last_dc[i] = 128 << s->c.intra_dc_precision;
return ret; return ret;
} }
@ -287,14 +287,14 @@ fail:
static int alloc_huffman(MJPEGEncContext *const m2) static int alloc_huffman(MJPEGEncContext *const m2)
{ {
MJpegContext *const m = &m2->mjpeg; MJpegContext *const m = &m2->mjpeg;
MpegEncContext *const s = &m2->mpeg.s; MPVEncContext *const s = &m2->mpeg.s;
static const char blocks_per_mb[] = { static const char blocks_per_mb[] = {
[CHROMA_420] = 6, [CHROMA_422] = 8, [CHROMA_444] = 12 [CHROMA_420] = 6, [CHROMA_422] = 8, [CHROMA_444] = 12
}; };
size_t num_blocks, num_codes; size_t num_blocks, num_codes;
// Make sure we have enough space to hold this frame. // Make sure we have enough space to hold this frame.
num_blocks = s->mb_num * blocks_per_mb[s->chroma_format]; num_blocks = s->c.mb_num * blocks_per_mb[s->c.chroma_format];
num_codes = num_blocks * 64; num_codes = num_blocks * 64;
m->huff_buffer = av_malloc_array(num_codes, m->huff_buffer = av_malloc_array(num_codes,
@ -358,11 +358,11 @@ static void mjpeg_encode_coef(MJpegContext *s, uint8_t table_id, int val, int ru
/** /**
* Add the block's data into the JPEG buffer. * Add the block's data into the JPEG buffer.
* *
* @param s The MpegEncContext that contains the JPEG buffer. * @param s The MPVEncContext that contains the JPEG buffer.
* @param block The block. * @param block The block.
* @param n The block's index or number. * @param n The block's index or number.
*/ */
static void record_block(MpegEncContext *s, int16_t *block, int n) static void record_block(MPVEncContext *const s, int16_t block[], int n)
{ {
int i, j, table_id; int i, j, table_id;
int component, dc, last_index, val, run; int component, dc, last_index, val, run;
@ -372,20 +372,20 @@ static void record_block(MpegEncContext *s, int16_t *block, int n)
component = (n <= 3 ? 0 : (n&1) + 1); component = (n <= 3 ? 0 : (n&1) + 1);
table_id = (n <= 3 ? 0 : 1); table_id = (n <= 3 ? 0 : 1);
dc = block[0]; /* overflow is impossible */ dc = block[0]; /* overflow is impossible */
val = dc - s->last_dc[component]; val = dc - s->c.last_dc[component];
mjpeg_encode_coef(m, table_id, val, 0); mjpeg_encode_coef(m, table_id, val, 0);
s->last_dc[component] = dc; s->c.last_dc[component] = dc;
/* AC coefs */ /* AC coefs */
run = 0; run = 0;
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
table_id |= 2; table_id |= 2;
for(i=1;i<=last_index;i++) { for(i=1;i<=last_index;i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
val = block[j]; val = block[j];
if (val == 0) { if (val == 0) {
@ -405,7 +405,7 @@ static void record_block(MpegEncContext *s, int16_t *block, int n)
mjpeg_encode_code(m, table_id, 0); mjpeg_encode_code(m, table_id, 0);
} }
static void encode_block(MpegEncContext *s, int16_t *block, int n) static void encode_block(MPVEncContext *const s, int16_t block[], int n)
{ {
int mant, nbits, code, i, j; int mant, nbits, code, i, j;
int component, dc, run, last_index, val; int component, dc, run, last_index, val;
@ -416,7 +416,7 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
/* DC coef */ /* DC coef */
component = (n <= 3 ? 0 : (n&1) + 1); component = (n <= 3 ? 0 : (n&1) + 1);
dc = block[0]; /* overflow is impossible */ dc = block[0]; /* overflow is impossible */
val = dc - s->last_dc[component]; val = dc - s->c.last_dc[component];
if (n < 4) { if (n < 4) {
ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_luminance, m->huff_code_dc_luminance); ff_mjpeg_encode_dc(&s->pb, val, m->huff_size_dc_luminance, m->huff_code_dc_luminance);
huff_size_ac = m->huff_size_ac_luminance; huff_size_ac = m->huff_size_ac_luminance;
@ -426,14 +426,14 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
huff_size_ac = m->huff_size_ac_chrominance; huff_size_ac = m->huff_size_ac_chrominance;
huff_code_ac = m->huff_code_ac_chrominance; huff_code_ac = m->huff_code_ac_chrominance;
} }
s->last_dc[component] = dc; s->c.last_dc[component] = dc;
/* AC coefs */ /* AC coefs */
run = 0; run = 0;
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
for(i=1;i<=last_index;i++) { for(i=1;i<=last_index;i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
val = block[j]; val = block[j];
if (val == 0) { if (val == 0) {
run++; run++;
@ -463,10 +463,10 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]); put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
} }
static void mjpeg_record_mb(MpegEncContext *const s, int16_t block[][64], static void mjpeg_record_mb(MPVEncContext *const s, int16_t block[][64],
int unused_x, int unused_y) int unused_x, int unused_y)
{ {
if (s->chroma_format == CHROMA_444) { if (s->c.chroma_format == CHROMA_444) {
record_block(s, block[0], 0); record_block(s, block[0], 0);
record_block(s, block[2], 2); record_block(s, block[2], 2);
record_block(s, block[4], 4); record_block(s, block[4], 4);
@ -474,7 +474,7 @@ static void mjpeg_record_mb(MpegEncContext *const s, int16_t block[][64],
record_block(s, block[5], 5); record_block(s, block[5], 5);
record_block(s, block[9], 9); record_block(s, block[9], 9);
if (16*s->mb_x+8 < s->width) { if (16*s->c.mb_x+8 < s->c.width) {
record_block(s, block[1], 1); record_block(s, block[1], 1);
record_block(s, block[3], 3); record_block(s, block[3], 3);
record_block(s, block[6], 6); record_block(s, block[6], 6);
@ -485,7 +485,7 @@ static void mjpeg_record_mb(MpegEncContext *const s, int16_t block[][64],
} else { } else {
for (int i = 0; i < 5; i++) for (int i = 0; i < 5; i++)
record_block(s, block[i], i); record_block(s, block[i], i);
if (s->chroma_format == CHROMA_420) { if (s->c.chroma_format == CHROMA_420) {
record_block(s, block[5], 5); record_block(s, block[5], 5);
} else { } else {
record_block(s, block[6], 6); record_block(s, block[6], 6);
@ -495,10 +495,10 @@ static void mjpeg_record_mb(MpegEncContext *const s, int16_t block[][64],
} }
} }
static void mjpeg_encode_mb(MpegEncContext *const s, int16_t block[][64], static void mjpeg_encode_mb(MPVEncContext *const s, int16_t block[][64],
int unused_x, int unused_y) int unused_x, int unused_y)
{ {
if (s->chroma_format == CHROMA_444) { if (s->c.chroma_format == CHROMA_444) {
encode_block(s, block[0], 0); encode_block(s, block[0], 0);
encode_block(s, block[2], 2); encode_block(s, block[2], 2);
encode_block(s, block[4], 4); encode_block(s, block[4], 4);
@ -506,7 +506,7 @@ static void mjpeg_encode_mb(MpegEncContext *const s, int16_t block[][64],
encode_block(s, block[5], 5); encode_block(s, block[5], 5);
encode_block(s, block[9], 9); encode_block(s, block[9], 9);
if (16 * s->mb_x + 8 < s->width) { if (16 * s->c.mb_x + 8 < s->c.width) {
encode_block(s, block[1], 1); encode_block(s, block[1], 1);
encode_block(s, block[3], 3); encode_block(s, block[3], 3);
encode_block(s, block[6], 6); encode_block(s, block[6], 6);
@ -517,7 +517,7 @@ static void mjpeg_encode_mb(MpegEncContext *const s, int16_t block[][64],
} else { } else {
for (int i = 0; i < 5; i++) for (int i = 0; i < 5; i++)
encode_block(s, block[i], i); encode_block(s, block[i], i);
if (s->chroma_format == CHROMA_420) { if (s->c.chroma_format == CHROMA_420) {
encode_block(s, block[5], 5); encode_block(s, block[5], 5);
} else { } else {
encode_block(s, block[6], 6); encode_block(s, block[6], 6);
@ -533,7 +533,7 @@ static av_cold int mjpeg_encode_init(AVCodecContext *avctx)
{ {
MJPEGEncContext *const m2 = avctx->priv_data; MJPEGEncContext *const m2 = avctx->priv_data;
MJpegContext *const m = &m2->mjpeg; MJpegContext *const m = &m2->mjpeg;
MpegEncContext *const s = &m2->mpeg.s; MPVEncContext *const s = &m2->mpeg.s;
int ret; int ret;
s->mjpeg_ctx = m; s->mjpeg_ctx = m;
@ -597,7 +597,7 @@ static av_cold int mjpeg_encode_init(AVCodecContext *avctx)
// Buffers start out empty. // Buffers start out empty.
m->huff_ncode = 0; m->huff_ncode = 0;
if (s->slice_context_count > 1) if (s->c.slice_context_count > 1)
m->huffman = HUFFMAN_TABLE_DEFAULT; m->huffman = HUFFMAN_TABLE_DEFAULT;
if (m->huffman == HUFFMAN_TABLE_OPTIMAL) { if (m->huffman == HUFFMAN_TABLE_OPTIMAL) {
@ -615,7 +615,7 @@ static av_cold int mjpeg_encode_init(AVCodecContext *avctx)
static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *pic_arg, int *got_packet) const AVFrame *pic_arg, int *got_packet)
{ {
MpegEncContext *s = avctx->priv_data; MPVEncContext *const s = avctx->priv_data;
AVFrame *pic; AVFrame *pic;
int i, ret; int i, ret;
int chroma_v_shift = 1; /* AMV is 420-only */ int chroma_v_shift = 1; /* AMV is 420-only */
@ -635,7 +635,7 @@ static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
//picture should be flipped upside-down //picture should be flipped upside-down
for(i=0; i < 3; i++) { for(i=0; i < 3; i++) {
int vsample = i ? 2 >> chroma_v_shift : 2; int vsample = i ? 2 >> chroma_v_shift : 2;
pic->data[i] += pic->linesize[i] * (vsample * s->height / V_MAX - 1); pic->data[i] += pic->linesize[i] * (vsample * s->c.height / V_MAX - 1);
pic->linesize[i] *= -1; pic->linesize[i] *= -1;
} }
ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet); ret = ff_mpv_encode_picture(avctx, pkt, pic, got_packet);

View File

@ -56,9 +56,9 @@ typedef struct MJpegContext {
uint8_t huff_size_ac_chrominance[256]; ///< AC chrominance Huffman table size. uint8_t huff_size_ac_chrominance[256]; ///< AC chrominance Huffman table size.
uint16_t huff_code_ac_chrominance[256]; ///< AC chrominance Huffman table codes. uint16_t huff_code_ac_chrominance[256]; ///< AC chrominance Huffman table codes.
/** Storage for AC luminance VLC (in MpegEncContext) */ /** Storage for AC luminance VLC */
uint8_t uni_ac_vlc_len[64 * 64 * 2]; uint8_t uni_ac_vlc_len[64 * 64 * 2];
/** Storage for AC chrominance VLC (in MpegEncContext) */ /** Storage for AC chrominance VLC */
uint8_t uni_chroma_ac_vlc_len[64 * 64 * 2]; uint8_t uni_chroma_ac_vlc_len[64 * 64 * 2];
// Default DC tables have exactly 12 values // Default DC tables have exactly 12 values
@ -92,8 +92,8 @@ static inline void put_marker(PutBitContext *p, enum JpegMarker code)
put_bits(p, 8, code); put_bits(p, 8, code);
} }
typedef struct MpegEncContext MpegEncContext; typedef struct MPVEncContext MPVEncContext;
int ff_mjpeg_encode_stuffing(MpegEncContext *s); int ff_mjpeg_encode_stuffing(MPVEncContext *s);
#endif /* AVCODEC_MJPEGENC_H */ #endif /* AVCODEC_MJPEGENC_H */

File diff suppressed because it is too large Load Diff

View File

@ -28,7 +28,7 @@
#include "me_cmp.h" #include "me_cmp.h"
#include "qpeldsp.h" #include "qpeldsp.h"
struct MpegEncContext; typedef struct MPVEncContext MPVEncContext;
typedef struct MPVMainEncContext MPVMainEncContext; typedef struct MPVMainEncContext MPVMainEncContext;
#if ARCH_IA64 // Limit static arrays to avoid gcc failing "short data segment overflowed" #if ARCH_IA64 // Limit static arrays to avoid gcc failing "short data segment overflowed"
@ -100,7 +100,7 @@ typedef struct MotionEstContext {
qpel_mc_func(*qpel_avg)[16]; qpel_mc_func(*qpel_avg)[16];
const uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV const uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
const uint8_t *current_mv_penalty; const uint8_t *current_mv_penalty;
int (*sub_motion_search)(struct MpegEncContext *s, int (*sub_motion_search)(MPVEncContext *s,
int *mx_ptr, int *my_ptr, int dmin, int *mx_ptr, int *my_ptr, int dmin,
int src_index, int ref_index, int src_index, int ref_index,
int size, int h); int size, int h);
@ -122,27 +122,27 @@ static inline int ff_h263_round_chroma(int x)
int ff_me_init(MotionEstContext *c, struct AVCodecContext *avctx, int ff_me_init(MotionEstContext *c, struct AVCodecContext *avctx,
const struct MECmpContext *mecc, int mpvenc); const struct MECmpContext *mecc, int mpvenc);
void ff_me_init_pic(struct MpegEncContext *s); void ff_me_init_pic(MPVEncContext *s);
void ff_estimate_p_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y); void ff_estimate_p_frame_motion(MPVEncContext *s, int mb_x, int mb_y);
void ff_estimate_b_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y); void ff_estimate_b_frame_motion(MPVEncContext *s, int mb_x, int mb_y);
int ff_pre_estimate_p_frame_motion(struct MpegEncContext *s, int ff_pre_estimate_p_frame_motion(MPVEncContext *s,
int mb_x, int mb_y); int mb_x, int mb_y);
int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr, int ff_epzs_motion_search(MPVEncContext *s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index, int P[10][2], int src_index, int ref_index,
const int16_t (*last_mv)[2], int ref_mv_scale, const int16_t (*last_mv)[2], int ref_mv_scale,
int size, int h); int size, int h);
int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index, int ff_get_mb_score(MPVEncContext *s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate); int ref_index, int size, int h, int add_rate);
int ff_get_best_fcode(MPVMainEncContext *m, int ff_get_best_fcode(MPVMainEncContext *m,
const int16_t (*mv_table)[2], int type); const int16_t (*mv_table)[2], int type);
void ff_fix_long_p_mvs(struct MpegEncContext *s, int type); void ff_fix_long_p_mvs(MPVEncContext *s, int type);
void ff_fix_long_mvs(struct MpegEncContext *s, uint8_t *field_select_table, void ff_fix_long_mvs(MPVEncContext *s, uint8_t *field_select_table,
int field_select, int16_t (*mv_table)[2], int f_code, int field_select, int16_t (*mv_table)[2], int f_code,
int type, int truncate); int type, int truncate);

View File

@ -25,7 +25,7 @@
*/ */
#include "libavutil/qsort.h" #include "libavutil/qsort.h"
#include "mpegvideo.h" #include "mpegvideoenc.h"
//Let us hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...) //Let us hope gcc will remove the unused vars ...(gcc 3.2.2 seems to do it ...)
#define LOAD_COMMON\ #define LOAD_COMMON\
@ -47,12 +47,12 @@
COPY3_IF_LT(dmin, d, bx, hx, by, hy)\ COPY3_IF_LT(dmin, d, bx, hx, by, hy)\
} }
static int hpel_motion_search(MpegEncContext * s, static int hpel_motion_search(MPVEncContext *const s,
int *mx_ptr, int *my_ptr, int dmin, int *mx_ptr, int *my_ptr, int dmin,
int src_index, int ref_index, int src_index, int ref_index,
int size, int h) int size, int h)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
const int mx = *mx_ptr; const int mx = *mx_ptr;
const int my = *my_ptr; const int my = *my_ptr;
const int penalty_factor= c->sub_penalty_factor; const int penalty_factor= c->sub_penalty_factor;
@ -152,7 +152,7 @@ static int hpel_motion_search(MpegEncContext * s,
return dmin; return dmin;
} }
static int no_sub_motion_search(MpegEncContext * s, static int no_sub_motion_search(MPVEncContext *const s,
int *mx_ptr, int *my_ptr, int dmin, int *mx_ptr, int *my_ptr, int dmin,
int src_index, int ref_index, int src_index, int ref_index,
int size, int h) int size, int h)
@ -162,11 +162,11 @@ static int no_sub_motion_search(MpegEncContext * s,
return dmin; return dmin;
} }
static inline int get_mb_score(MpegEncContext *s, int mx, int my, static inline int get_mb_score(MPVEncContext *const s, int mx, int my,
int src_index, int ref_index, int size, int src_index, int ref_index, int size,
int h, int add_rate) int h, int add_rate)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
const int penalty_factor= c->mb_penalty_factor; const int penalty_factor= c->mb_penalty_factor;
const int flags= c->mb_flags; const int flags= c->mb_flags;
const int qpel= flags & FLAG_QPEL; const int qpel= flags & FLAG_QPEL;
@ -189,7 +189,7 @@ static inline int get_mb_score(MpegEncContext *s, int mx, int my,
return d; return d;
} }
int ff_get_mb_score(MpegEncContext *s, int mx, int my, int src_index, int ff_get_mb_score(MPVEncContext *const s, int mx, int my, int src_index,
int ref_index, int size, int h, int add_rate) int ref_index, int size, int h, int add_rate)
{ {
return get_mb_score(s, mx, my, src_index, ref_index, size, h, add_rate); return get_mb_score(s, mx, my, src_index, ref_index, size, h, add_rate);
@ -204,12 +204,12 @@ int ff_get_mb_score(MpegEncContext *s, int mx, int my, int src_index,
COPY3_IF_LT(dmin, d, bx, hx, by, hy)\ COPY3_IF_LT(dmin, d, bx, hx, by, hy)\
} }
static int qpel_motion_search(MpegEncContext * s, static int qpel_motion_search(MPVEncContext *const s,
int *mx_ptr, int *my_ptr, int dmin, int *mx_ptr, int *my_ptr, int dmin,
int src_index, int ref_index, int src_index, int ref_index,
int size, int h) int size, int h)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
const int mx = *mx_ptr; const int mx = *mx_ptr;
const int my = *my_ptr; const int my = *my_ptr;
const int penalty_factor= c->sub_penalty_factor; const int penalty_factor= c->sub_penalty_factor;
@ -256,7 +256,7 @@ static int qpel_motion_search(MpegEncContext * s,
int best_pos[8][2]; int best_pos[8][2];
memset(best, 64, sizeof(int)*8); memset(best, 64, sizeof(int)*8);
if(s->me.dia_size>=2){ if(s->c.me.dia_size>=2){
const int tl= score_map[(index-(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; const int tl= score_map[(index-(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)];
const int bl= score_map[(index+(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)]; const int bl= score_map[(index+(1<<ME_MAP_SHIFT)-1)&(ME_MAP_SIZE-1)];
const int tr= score_map[(index-(1<<ME_MAP_SHIFT)+1)&(ME_MAP_SIZE-1)]; const int tr= score_map[(index-(1<<ME_MAP_SHIFT)+1)&(ME_MAP_SIZE-1)];
@ -403,21 +403,21 @@ static int qpel_motion_search(MpegEncContext * s,
} }
#define check(x,y,S,v)\ #define check(x,y,S,v)\
if( (x)<(xmin<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d xmin" #v, xmin, (x), (y), s->mb_x, s->mb_y);\ if( (x)<(xmin<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d xmin" #v, xmin, (x), (y), s->c.mb_x, s->c.mb_y);\
if( (x)>(xmax<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d xmax" #v, xmax, (x), (y), s->mb_x, s->mb_y);\ if( (x)>(xmax<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d xmax" #v, xmax, (x), (y), s->c.mb_x, s->c.mb_y);\
if( (y)<(ymin<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d ymin" #v, ymin, (x), (y), s->mb_x, s->mb_y);\ if( (y)<(ymin<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d ymin" #v, ymin, (x), (y), s->c.mb_x, s->c.mb_y);\
if( (y)>(ymax<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d ymax" #v, ymax, (x), (y), s->mb_x, s->mb_y);\ if( (y)>(ymax<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d ymax" #v, ymax, (x), (y), s->c.mb_x, s->c.mb_y);\
#define LOAD_COMMON2\ #define LOAD_COMMON2\
uint32_t *map= c->map;\ uint32_t *map= c->map;\
const int qpel= flags&FLAG_QPEL;\ const int qpel= flags&FLAG_QPEL;\
const int shift= 1+qpel;\ const int shift= 1+qpel;\
static av_always_inline int small_diamond_search(MpegEncContext * s, int *best, int dmin, static av_always_inline int small_diamond_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
int next_dir=-1; int next_dir=-1;
LOAD_COMMON LOAD_COMMON
@ -454,11 +454,11 @@ static av_always_inline int small_diamond_search(MpegEncContext * s, int *best,
} }
} }
static int funny_diamond_search(MpegEncContext * s, int *best, int dmin, static int funny_diamond_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
int dia_size; int dia_size;
LOAD_COMMON LOAD_COMMON
@ -496,11 +496,11 @@ static int funny_diamond_search(MpegEncContext * s, int *best, int dmin,
return dmin; return dmin;
} }
static int hex_search(MpegEncContext * s, int *best, int dmin, static int hex_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags, int dia_size) int size, int h, int flags, int dia_size)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
LOAD_COMMON2 LOAD_COMMON2
@ -530,11 +530,11 @@ static int hex_search(MpegEncContext * s, int *best, int dmin,
return dmin; return dmin;
} }
static int l2s_dia_search(MpegEncContext * s, int *best, int dmin, static int l2s_dia_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
LOAD_COMMON2 LOAD_COMMON2
@ -568,11 +568,11 @@ static int l2s_dia_search(MpegEncContext * s, int *best, int dmin,
return dmin; return dmin;
} }
static int umh_search(MpegEncContext * s, int *best, int dmin, static int umh_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
LOAD_COMMON2 LOAD_COMMON2
@ -615,11 +615,11 @@ static int umh_search(MpegEncContext * s, int *best, int dmin,
return hex_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags, 2); return hex_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags, 2);
} }
static int full_search(MpegEncContext * s, int *best, int dmin, static int full_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
LOAD_COMMON2 LOAD_COMMON2
@ -678,11 +678,11 @@ static int full_search(MpegEncContext * s, int *best, int dmin,
} }
#define MAX_SAB_SIZE ME_MAP_SIZE #define MAX_SAB_SIZE ME_MAP_SIZE
static int sab_diamond_search(MpegEncContext * s, int *best, int dmin, static int sab_diamond_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
Minima minima[MAX_SAB_SIZE]; Minima minima[MAX_SAB_SIZE];
const int minima_count= FFABS(c->dia_size); const int minima_count= FFABS(c->dia_size);
@ -768,11 +768,11 @@ static int sab_diamond_search(MpegEncContext * s, int *best, int dmin,
return dmin; return dmin;
} }
static int var_diamond_search(MpegEncContext * s, int *best, int dmin, static int var_diamond_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags) int size, int h, int flags)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
int dia_size; int dia_size;
LOAD_COMMON LOAD_COMMON
@ -829,10 +829,10 @@ static int var_diamond_search(MpegEncContext * s, int *best, int dmin,
return dmin; return dmin;
} }
static av_always_inline int diamond_search(MpegEncContext * s, int *best, int dmin, static av_always_inline int diamond_search(MPVEncContext *const s, int *best, int dmin,
int src_index, int ref_index, const int penalty_factor, int src_index, int ref_index, const int penalty_factor,
int size, int h, int flags){ int size, int h, int flags){
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
if(c->dia_size==-1) if(c->dia_size==-1)
return funny_diamond_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags); return funny_diamond_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags);
else if(c->dia_size<-1) else if(c->dia_size<-1)
@ -857,11 +857,11 @@ static av_always_inline int diamond_search(MpegEncContext * s, int *best, int dm
it takes fewer iterations. And it increases the chance that we find the it takes fewer iterations. And it increases the chance that we find the
optimal mv. optimal mv.
*/ */
static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int *mx_ptr, int *my_ptr, static av_always_inline int epzs_motion_search_internal(MPVEncContext *const s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index, const int16_t (*last_mv)[2], int P[10][2], int src_index, int ref_index, const int16_t (*last_mv)[2],
int ref_mv_scale, int flags, int size, int h) int ref_mv_scale, int flags, int size, int h)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
int best[2]={0, 0}; /**< x and y coordinates of the best motion vector. int best[2]={0, 0}; /**< x and y coordinates of the best motion vector.
i.e. the difference between the position of the i.e. the difference between the position of the
block currently being encoded and the position of block currently being encoded and the position of
@ -871,8 +871,8 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
corresponding to the mv stored in best[]. */ corresponding to the mv stored in best[]. */
unsigned map_generation; unsigned map_generation;
int penalty_factor; int penalty_factor;
const int ref_mv_stride= s->mb_stride; //pass as arg FIXME const int ref_mv_stride= s->c.mb_stride; //pass as arg FIXME
const int ref_mv_xy = s->mb_x + s->mb_y * ref_mv_stride; // add to last_mv before passing FIXME const int ref_mv_xy = s->c.mb_x + s->c.mb_y * ref_mv_stride; // add to last_mv before passing FIXME
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
@ -896,12 +896,12 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
score_map[0]= dmin; score_map[0]= dmin;
//FIXME precalc first term below? //FIXME precalc first term below?
if ((s->pict_type == AV_PICTURE_TYPE_B && !(c->flags & FLAG_DIRECT)) || if ((s->c.pict_type == AV_PICTURE_TYPE_B && !(c->flags & FLAG_DIRECT)) ||
s->mpv_flags & FF_MPV_FLAG_MV0) s->mpv_flags & FF_MPV_FLAG_MV0)
dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor; dmin += (mv_penalty[pred_x] + mv_penalty[pred_y])*penalty_factor;
/* first line */ /* first line */
if (s->first_slice_line) { if (s->c.first_slice_line) {
CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift)
CHECK_CLIPPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16)
@ -930,13 +930,13 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
if(c->pre_pass){ if(c->pre_pass){
CHECK_CLIPPED_MV((last_mv[ref_mv_xy-1][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy-1][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy-1][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy-1][1]*ref_mv_scale + (1<<15))>>16)
if(!s->first_slice_line) if(!s->c.first_slice_line)
CHECK_CLIPPED_MV((last_mv[ref_mv_xy-ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy-ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy-ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy-ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16)
}else{ }else{
CHECK_CLIPPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16)
if(s->mb_y+1<s->end_mb_y) //FIXME replace at least with last_slice_line if(s->c.mb_y+1<s->c.end_mb_y) //FIXME replace at least with last_slice_line
CHECK_CLIPPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16)
} }
@ -944,10 +944,10 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
if(c->avctx->last_predictor_count){ if(c->avctx->last_predictor_count){
const int count= c->avctx->last_predictor_count; const int count= c->avctx->last_predictor_count;
const int xstart= FFMAX(0, s->mb_x - count); const int xstart= FFMAX(0, s->c.mb_x - count);
const int ystart= FFMAX(0, s->mb_y - count); const int ystart= FFMAX(0, s->c.mb_y - count);
const int xend= FFMIN(s->mb_width , s->mb_x + count + 1); const int xend= FFMIN(s->c.mb_width , s->c.mb_x + count + 1);
const int yend= FFMIN(s->mb_height, s->mb_y + count + 1); const int yend= FFMIN(s->c.mb_height, s->c.mb_y + count + 1);
int mb_y; int mb_y;
for(mb_y=ystart; mb_y<yend; mb_y++){ for(mb_y=ystart; mb_y<yend; mb_y++){
@ -974,12 +974,12 @@ static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
} }
//this function is dedicated to the brain damaged gcc //this function is dedicated to the brain damaged gcc
int ff_epzs_motion_search(MpegEncContext *s, int *mx_ptr, int *my_ptr, int ff_epzs_motion_search(MPVEncContext *const s, int *mx_ptr, int *my_ptr,
int P[10][2], int src_index, int ref_index, int P[10][2], int src_index, int ref_index,
const int16_t (*last_mv)[2], int ref_mv_scale, const int16_t (*last_mv)[2], int ref_mv_scale,
int size, int h) int size, int h)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
//FIXME convert other functions in the same way if faster //FIXME convert other functions in the same way if faster
if(c->flags==0 && h==16 && size==0){ if(c->flags==0 && h==16 && size==0){
return epzs_motion_search_internal(s, mx_ptr, my_ptr, P, src_index, ref_index, last_mv, ref_mv_scale, 0, 0, 16); return epzs_motion_search_internal(s, mx_ptr, my_ptr, P, src_index, ref_index, last_mv, ref_mv_scale, 0, 0, 16);
@ -990,19 +990,19 @@ int ff_epzs_motion_search(MpegEncContext *s, int *mx_ptr, int *my_ptr,
} }
} }
static int epzs_motion_search2(MpegEncContext * s, static int epzs_motion_search2(MPVEncContext *const s,
int *mx_ptr, int *my_ptr, int P[10][2], int *mx_ptr, int *my_ptr, int P[10][2],
int src_index, int ref_index, const int16_t (*last_mv)[2], int src_index, int ref_index, const int16_t (*last_mv)[2],
int ref_mv_scale, const int size) int ref_mv_scale, const int size)
{ {
MotionEstContext * const c= &s->me; MotionEstContext * const c= &s->c.me;
int best[2]={0, 0}; int best[2]={0, 0};
int d, dmin; int d, dmin;
unsigned map_generation; unsigned map_generation;
const int penalty_factor= c->penalty_factor; const int penalty_factor= c->penalty_factor;
const int h=8; const int h=8;
const int ref_mv_stride= s->mb_stride; const int ref_mv_stride= s->c.mb_stride;
const int ref_mv_xy= s->mb_x + s->mb_y *ref_mv_stride; const int ref_mv_xy= s->c.mb_x + s->c.mb_y *ref_mv_stride;
me_cmp_func cmpf, chroma_cmpf; me_cmp_func cmpf, chroma_cmpf;
LOAD_COMMON LOAD_COMMON
int flags= c->flags; int flags= c->flags;
@ -1016,7 +1016,7 @@ static int epzs_motion_search2(MpegEncContext * s,
dmin = 1000000; dmin = 1000000;
/* first line */ /* first line */
if (s->first_slice_line) { if (s->c.first_slice_line) {
CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift)
CHECK_CLIPPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16)
@ -1034,7 +1034,7 @@ static int epzs_motion_search2(MpegEncContext * s,
if(dmin>64*4){ if(dmin>64*4){
CHECK_CLIPPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16)
if(s->mb_y+1<s->end_mb_y) //FIXME replace at least with last_slice_line if(s->c.mb_y+1<s->c.end_mb_y) //FIXME replace at least with last_slice_line
CHECK_CLIPPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, CHECK_CLIPPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16,
(last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) (last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16)
} }

View File

@ -137,7 +137,7 @@ av_cold void ff_mpeg1_init_uni_ac_vlc(const int8_t max_level[],
} }
#if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
static void put_header(MpegEncContext *s, uint32_t header) static void put_header(MPVEncContext *const s, uint32_t header)
{ {
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits32(&s->pb, header); put_bits32(&s->pb, header);
@ -146,16 +146,16 @@ static void put_header(MpegEncContext *s, uint32_t header)
/* put sequence header if needed */ /* put sequence header if needed */
static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12) static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
{ {
MpegEncContext *const s = &mpeg12->mpeg.s; MPVEncContext *const s = &mpeg12->mpeg.s;
unsigned int vbv_buffer_size, fps, v; unsigned int vbv_buffer_size, fps, v;
int constraint_parameter_flag; int constraint_parameter_flag;
AVRational framerate = ff_mpeg12_frame_rate_tab[mpeg12->frame_rate_index]; AVRational framerate = ff_mpeg12_frame_rate_tab[mpeg12->frame_rate_index];
uint64_t time_code; uint64_t time_code;
int64_t best_aspect_error = INT64_MAX; int64_t best_aspect_error = INT64_MAX;
AVRational aspect_ratio = s->avctx->sample_aspect_ratio; AVRational aspect_ratio = s->c.avctx->sample_aspect_ratio;
int aspect_ratio_info; int aspect_ratio_info;
if (!(s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY)) if (!(s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_KEY))
return; return;
if (aspect_ratio.num == 0 || aspect_ratio.den == 0) if (aspect_ratio.num == 0 || aspect_ratio.den == 0)
@ -164,15 +164,15 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
/* MPEG-1 header repeated every GOP */ /* MPEG-1 header repeated every GOP */
put_header(s, SEQ_START_CODE); put_header(s, SEQ_START_CODE);
put_sbits(&s->pb, 12, s->width & 0xFFF); put_sbits(&s->pb, 12, s->c.width & 0xFFF);
put_sbits(&s->pb, 12, s->height & 0xFFF); put_sbits(&s->pb, 12, s->c.height & 0xFFF);
for (int i = 1; i < 15; i++) { for (int i = 1; i < 15; i++) {
int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den; int64_t error = aspect_ratio.num * (1LL<<32) / aspect_ratio.den;
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1) if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO || i <= 1)
error -= (1LL<<32) / ff_mpeg1_aspect[i]; error -= (1LL<<32) / ff_mpeg1_aspect[i];
else else
error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->height / s->width / ff_mpeg2_aspect[i].den; error -= (1LL<<32)*ff_mpeg2_aspect[i].num * s->c.height / s->c.width / ff_mpeg2_aspect[i].den;
error = FFABS(error); error = FFABS(error);
@ -185,16 +185,16 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
put_bits(&s->pb, 4, aspect_ratio_info); put_bits(&s->pb, 4, aspect_ratio_info);
put_bits(&s->pb, 4, mpeg12->frame_rate_index); put_bits(&s->pb, 4, mpeg12->frame_rate_index);
if (s->avctx->rc_max_rate) { if (s->c.avctx->rc_max_rate) {
v = (s->avctx->rc_max_rate + 399) / 400; v = (s->c.avctx->rc_max_rate + 399) / 400;
if (v > 0x3ffff && s->codec_id == AV_CODEC_ID_MPEG1VIDEO) if (v > 0x3ffff && s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
v = 0x3ffff; v = 0x3ffff;
} else { } else {
v = 0x3FFFF; v = 0x3FFFF;
} }
if (s->avctx->rc_buffer_size) if (s->c.avctx->rc_buffer_size)
vbv_buffer_size = s->avctx->rc_buffer_size; vbv_buffer_size = s->c.avctx->rc_buffer_size;
else else
/* VBV calculation: Scaled so that a VCD has the proper /* VBV calculation: Scaled so that a VCD has the proper
* VBV size of 40 kilobytes */ * VBV size of 40 kilobytes */
@ -206,48 +206,48 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
put_sbits(&s->pb, 10, vbv_buffer_size); put_sbits(&s->pb, 10, vbv_buffer_size);
constraint_parameter_flag = constraint_parameter_flag =
s->width <= 768 && s->c.width <= 768 &&
s->height <= 576 && s->c.height <= 576 &&
s->mb_width * s->mb_height <= 396 && s->c.mb_width * s->c.mb_height <= 396 &&
s->mb_width * s->mb_height * framerate.num <= 396 * 25 * framerate.den && s->c.mb_width * s->c.mb_height * framerate.num <= 396 * 25 * framerate.den &&
framerate.num <= framerate.den * 30 && framerate.num <= framerate.den * 30 &&
s->avctx->me_range && s->c.avctx->me_range &&
s->avctx->me_range < 128 && s->c.avctx->me_range < 128 &&
vbv_buffer_size <= 20 && vbv_buffer_size <= 20 &&
v <= 1856000 / 400 && v <= 1856000 / 400 &&
s->codec_id == AV_CODEC_ID_MPEG1VIDEO; s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO;
put_bits(&s->pb, 1, constraint_parameter_flag); put_bits(&s->pb, 1, constraint_parameter_flag);
ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->c.avctx->intra_matrix);
ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); ff_write_quant_matrix(&s->pb, s->c.avctx->inter_matrix);
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
const AVFrameSideData *side_data; const AVFrameSideData *side_data;
int width = s->width; int width = s->c.width;
int height = s->height; int height = s->c.height;
int use_seq_disp_ext; int use_seq_disp_ext;
put_header(s, EXT_START_CODE); put_header(s, EXT_START_CODE);
put_bits(&s->pb, 4, 1); // seq ext put_bits(&s->pb, 4, 1); // seq ext
put_bits(&s->pb, 1, s->avctx->profile == AV_PROFILE_MPEG2_422); // escx 1 for 4:2:2 profile put_bits(&s->pb, 1, s->c.avctx->profile == AV_PROFILE_MPEG2_422); // escx 1 for 4:2:2 profile
put_bits(&s->pb, 3, s->avctx->profile); // profile put_bits(&s->pb, 3, s->c.avctx->profile); // profile
put_bits(&s->pb, 4, s->avctx->level); // level put_bits(&s->pb, 4, s->c.avctx->level); // level
put_bits(&s->pb, 1, s->progressive_sequence); put_bits(&s->pb, 1, s->c.progressive_sequence);
put_bits(&s->pb, 2, s->chroma_format); put_bits(&s->pb, 2, s->c.chroma_format);
put_bits(&s->pb, 2, s->width >> 12); put_bits(&s->pb, 2, s->c.width >> 12);
put_bits(&s->pb, 2, s->height >> 12); put_bits(&s->pb, 2, s->c.height >> 12);
put_bits(&s->pb, 12, v >> 18); // bitrate ext put_bits(&s->pb, 12, v >> 18); // bitrate ext
put_bits(&s->pb, 1, 1); // marker put_bits(&s->pb, 1, 1); // marker
put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext put_bits(&s->pb, 8, vbv_buffer_size >> 10); // vbv buffer ext
put_bits(&s->pb, 1, s->low_delay); put_bits(&s->pb, 1, s->c.low_delay);
put_bits(&s->pb, 2, mpeg12->frame_rate_ext.num-1); // frame_rate_ext_n put_bits(&s->pb, 2, mpeg12->frame_rate_ext.num-1); // frame_rate_ext_n
put_bits(&s->pb, 5, mpeg12->frame_rate_ext.den-1); // frame_rate_ext_d put_bits(&s->pb, 5, mpeg12->frame_rate_ext.den-1); // frame_rate_ext_d
side_data = av_frame_get_side_data(s->cur_pic.ptr->f, AV_FRAME_DATA_PANSCAN); side_data = av_frame_get_side_data(s->c.cur_pic.ptr->f, AV_FRAME_DATA_PANSCAN);
if (side_data) { if (side_data) {
const AVPanScan *pan_scan = (AVPanScan *)side_data->data; const AVPanScan *pan_scan = (AVPanScan *)side_data->data;
if (pan_scan->width && pan_scan->height) { if (pan_scan->width && pan_scan->height) {
@ -256,11 +256,11 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
} }
} }
use_seq_disp_ext = (width != s->width || use_seq_disp_ext = (width != s->c.width ||
height != s->height || height != s->c.height ||
s->avctx->color_primaries != AVCOL_PRI_UNSPECIFIED || s->c.avctx->color_primaries != AVCOL_PRI_UNSPECIFIED ||
s->avctx->color_trc != AVCOL_TRC_UNSPECIFIED || s->c.avctx->color_trc != AVCOL_TRC_UNSPECIFIED ||
s->avctx->colorspace != AVCOL_SPC_UNSPECIFIED || s->c.avctx->colorspace != AVCOL_SPC_UNSPECIFIED ||
mpeg12->video_format != VIDEO_FORMAT_UNSPECIFIED); mpeg12->video_format != VIDEO_FORMAT_UNSPECIFIED);
if (mpeg12->seq_disp_ext == 1 || if (mpeg12->seq_disp_ext == 1 ||
@ -269,9 +269,9 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
put_bits(&s->pb, 4, 2); // sequence display extension put_bits(&s->pb, 4, 2); // sequence display extension
put_bits(&s->pb, 3, mpeg12->video_format); // video_format put_bits(&s->pb, 3, mpeg12->video_format); // video_format
put_bits(&s->pb, 1, 1); // colour_description put_bits(&s->pb, 1, 1); // colour_description
put_bits(&s->pb, 8, s->avctx->color_primaries); // colour_primaries put_bits(&s->pb, 8, s->c.avctx->color_primaries); // colour_primaries
put_bits(&s->pb, 8, s->avctx->color_trc); // transfer_characteristics put_bits(&s->pb, 8, s->c.avctx->color_trc); // transfer_characteristics
put_bits(&s->pb, 8, s->avctx->colorspace); // matrix_coefficients put_bits(&s->pb, 8, s->c.avctx->colorspace); // matrix_coefficients
put_bits(&s->pb, 14, width); // display_horizontal_size put_bits(&s->pb, 14, width); // display_horizontal_size
put_bits(&s->pb, 1, 1); // marker_bit put_bits(&s->pb, 1, 1); // marker_bit
put_bits(&s->pb, 14, height); // display_vertical_size put_bits(&s->pb, 14, height); // display_vertical_size
@ -284,10 +284,10 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
/* time code: we must convert from the real frame rate to a /* time code: we must convert from the real frame rate to a
* fake MPEG frame rate in case of low frame rate */ * fake MPEG frame rate in case of low frame rate */
fps = (framerate.num + framerate.den / 2) / framerate.den; fps = (framerate.num + framerate.den / 2) / framerate.den;
time_code = s->cur_pic.ptr->coded_picture_number + time_code = s->c.cur_pic.ptr->coded_picture_number +
mpeg12->timecode_frame_start; mpeg12->timecode_frame_start;
mpeg12->gop_picture_number = s->cur_pic.ptr->coded_picture_number; mpeg12->gop_picture_number = s->c.cur_pic.ptr->coded_picture_number;
av_assert0(mpeg12->drop_frame_timecode == !!(mpeg12->tc.flags & AV_TIMECODE_FLAG_DROPFRAME)); av_assert0(mpeg12->drop_frame_timecode == !!(mpeg12->tc.flags & AV_TIMECODE_FLAG_DROPFRAME));
if (mpeg12->drop_frame_timecode) if (mpeg12->drop_frame_timecode)
@ -298,12 +298,12 @@ static void mpeg1_encode_sequence_header(MPEG12EncContext *mpeg12)
put_bits(&s->pb, 1, 1); put_bits(&s->pb, 1, 1);
put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60));
put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); put_bits(&s->pb, 6, (uint32_t)((time_code % fps)));
put_bits(&s->pb, 1, !!(s->avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) || put_bits(&s->pb, 1, !!(s->c.avctx->flags & AV_CODEC_FLAG_CLOSED_GOP) ||
mpeg12->mpeg.intra_only || !mpeg12->gop_picture_number); mpeg12->mpeg.intra_only || !mpeg12->gop_picture_number);
put_bits(&s->pb, 1, 0); // broken link put_bits(&s->pb, 1, 0); // broken link
} }
static inline void encode_mb_skip_run(MpegEncContext *s, int run) static inline void encode_mb_skip_run(MPVEncContext *const s, int run)
{ {
while (run >= 33) { while (run >= 33) {
put_bits(&s->pb, 11, 0x008); put_bits(&s->pb, 11, 0x008);
@ -313,20 +313,20 @@ static inline void encode_mb_skip_run(MpegEncContext *s, int run)
ff_mpeg12_mbAddrIncrTable[run][0]); ff_mpeg12_mbAddrIncrTable[run][0]);
} }
static av_always_inline void put_qscale(MpegEncContext *s) static av_always_inline void put_qscale(MPVEncContext *const s)
{ {
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
} }
void ff_mpeg1_encode_slice_header(MpegEncContext *s) void ff_mpeg1_encode_slice_header(MPVEncContext *const s)
{ {
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->height > 2800) { if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO && s->c.height > 2800) {
put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127)); put_header(s, SLICE_MIN_START_CODE + (s->c.mb_y & 127));
/* slice_vertical_position_extension */ /* slice_vertical_position_extension */
put_bits(&s->pb, 3, s->mb_y >> 7); put_bits(&s->pb, 3, s->c.mb_y >> 7);
} else { } else {
av_assert1(s->mb_y <= SLICE_MAX_START_CODE - SLICE_MIN_START_CODE); av_assert1(s->c.mb_y <= SLICE_MAX_START_CODE - SLICE_MIN_START_CODE);
put_header(s, SLICE_MIN_START_CODE + s->mb_y); put_header(s, SLICE_MIN_START_CODE + s->c.mb_y);
} }
put_qscale(s); put_qscale(s);
/* slice extra information */ /* slice extra information */
@ -336,7 +336,7 @@ void ff_mpeg1_encode_slice_header(MpegEncContext *s)
static int mpeg1_encode_picture_header(MPVMainEncContext *const m) static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
{ {
MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)m; MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)m;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
const AVFrameSideData *side_data; const AVFrameSideData *side_data;
mpeg1_encode_sequence_header(mpeg12); mpeg1_encode_sequence_header(mpeg12);
@ -345,74 +345,74 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
put_header(s, PICTURE_START_CODE); put_header(s, PICTURE_START_CODE);
/* temporal reference */ /* temporal reference */
// RAL: s->picture_number instead of s->fake_picture_number // RAL: s->c.picture_number instead of s->fake_picture_number
put_bits(&s->pb, 10, put_bits(&s->pb, 10,
(s->picture_number - mpeg12->gop_picture_number) & 0x3ff); (s->c.picture_number - mpeg12->gop_picture_number) & 0x3ff);
put_bits(&s->pb, 3, s->pict_type); put_bits(&s->pb, 3, s->c.pict_type);
m->vbv_delay_pos = put_bytes_count(&s->pb, 0); m->vbv_delay_pos = put_bytes_count(&s->pb, 0);
put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */
// RAL: Forward f_code also needed for B-frames // RAL: Forward f_code also needed for B-frames
if (s->pict_type == AV_PICTURE_TYPE_P || if (s->c.pict_type == AV_PICTURE_TYPE_P ||
s->pict_type == AV_PICTURE_TYPE_B) { s->c.pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 1, 0); /* half pel coordinates */
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ put_bits(&s->pb, 3, s->c.f_code); /* forward_f_code */
else else
put_bits(&s->pb, 3, 7); /* forward_f_code */ put_bits(&s->pb, 3, 7); /* forward_f_code */
} }
// RAL: Backward f_code necessary for B-frames // RAL: Backward f_code necessary for B-frames
if (s->pict_type == AV_PICTURE_TYPE_B) { if (s->c.pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 1, 0); /* half pel coordinates */
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ put_bits(&s->pb, 3, s->c.b_code); /* backward_f_code */
else else
put_bits(&s->pb, 3, 7); /* backward_f_code */ put_bits(&s->pb, 3, 7); /* backward_f_code */
} }
put_bits(&s->pb, 1, 0); /* extra bit picture */ put_bits(&s->pb, 1, 0); /* extra bit picture */
s->frame_pred_frame_dct = 1; s->c.frame_pred_frame_dct = 1;
if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO) { if (s->c.codec_id == AV_CODEC_ID_MPEG2VIDEO) {
put_header(s, EXT_START_CODE); put_header(s, EXT_START_CODE);
put_bits(&s->pb, 4, 8); /* pic ext */ put_bits(&s->pb, 4, 8); /* pic ext */
if (s->pict_type == AV_PICTURE_TYPE_P || if (s->c.pict_type == AV_PICTURE_TYPE_P ||
s->pict_type == AV_PICTURE_TYPE_B) { s->c.pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->c.f_code);
put_bits(&s->pb, 4, s->f_code); put_bits(&s->pb, 4, s->c.f_code);
} else { } else {
put_bits(&s->pb, 8, 255); put_bits(&s->pb, 8, 255);
} }
if (s->pict_type == AV_PICTURE_TYPE_B) { if (s->c.pict_type == AV_PICTURE_TYPE_B) {
put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->c.b_code);
put_bits(&s->pb, 4, s->b_code); put_bits(&s->pb, 4, s->c.b_code);
} else { } else {
put_bits(&s->pb, 8, 255); put_bits(&s->pb, 8, 255);
} }
put_bits(&s->pb, 2, s->intra_dc_precision); put_bits(&s->pb, 2, s->c.intra_dc_precision);
av_assert0(s->picture_structure == PICT_FRAME); av_assert0(s->c.picture_structure == PICT_FRAME);
put_bits(&s->pb, 2, s->picture_structure); put_bits(&s->pb, 2, s->c.picture_structure);
if (s->progressive_sequence) if (s->c.progressive_sequence)
put_bits(&s->pb, 1, 0); /* no repeat */ put_bits(&s->pb, 1, 0); /* no repeat */
else else
put_bits(&s->pb, 1, !!(s->cur_pic.ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST)); put_bits(&s->pb, 1, !!(s->c.cur_pic.ptr->f->flags & AV_FRAME_FLAG_TOP_FIELD_FIRST));
/* XXX: optimize the generation of this flag with entropy measures */ /* XXX: optimize the generation of this flag with entropy measures */
s->frame_pred_frame_dct = s->progressive_sequence; s->c.frame_pred_frame_dct = s->c.progressive_sequence;
put_bits(&s->pb, 1, s->frame_pred_frame_dct); put_bits(&s->pb, 1, s->c.frame_pred_frame_dct);
put_bits(&s->pb, 1, s->concealment_motion_vectors); put_bits(&s->pb, 1, s->c.concealment_motion_vectors);
put_bits(&s->pb, 1, s->q_scale_type); put_bits(&s->pb, 1, s->c.q_scale_type);
put_bits(&s->pb, 1, s->intra_vlc_format); put_bits(&s->pb, 1, s->c.intra_vlc_format);
put_bits(&s->pb, 1, s->alternate_scan); put_bits(&s->pb, 1, s->c.alternate_scan);
put_bits(&s->pb, 1, s->repeat_first_field); put_bits(&s->pb, 1, s->c.repeat_first_field);
s->progressive_frame = s->progressive_sequence; s->c.progressive_frame = s->c.progressive_sequence;
/* chroma_420_type */ /* chroma_420_type */
put_bits(&s->pb, 1, s->chroma_format == put_bits(&s->pb, 1, s->c.chroma_format ==
CHROMA_420 ? s->progressive_frame : 0); CHROMA_420 ? s->c.progressive_frame : 0);
put_bits(&s->pb, 1, s->progressive_frame); put_bits(&s->pb, 1, s->c.progressive_frame);
put_bits(&s->pb, 1, 0); /* composite_display_flag */ put_bits(&s->pb, 1, 0); /* composite_display_flag */
} }
if (mpeg12->scan_offset) { if (mpeg12->scan_offset) {
@ -422,7 +422,7 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++) for (i = 0; i < sizeof(svcd_scan_offset_placeholder); i++)
put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]); put_bits(&s->pb, 8, svcd_scan_offset_placeholder[i]);
} }
side_data = av_frame_get_side_data(s->cur_pic.ptr->f, side_data = av_frame_get_side_data(s->c.cur_pic.ptr->f,
AV_FRAME_DATA_STEREO3D); AV_FRAME_DATA_STEREO3D);
if (side_data) { if (side_data) {
const AVStereo3D *stereo = (AVStereo3D *)side_data->data; const AVStereo3D *stereo = (AVStereo3D *)side_data->data;
@ -460,7 +460,7 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
} }
if (CONFIG_MPEG2VIDEO_ENCODER && mpeg12->a53_cc) { if (CONFIG_MPEG2VIDEO_ENCODER && mpeg12->a53_cc) {
side_data = av_frame_get_side_data(s->cur_pic.ptr->f, side_data = av_frame_get_side_data(s->c.cur_pic.ptr->f,
AV_FRAME_DATA_A53_CC); AV_FRAME_DATA_A53_CC);
if (side_data) { if (side_data) {
if (side_data->size <= A53_MAX_CC_COUNT * 3 && side_data->size % 3 == 0) { if (side_data->size <= A53_MAX_CC_COUNT * 3 && side_data->size % 3 == 0) {
@ -476,33 +476,33 @@ static int mpeg1_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 8, 0xff); // marker_bits put_bits(&s->pb, 8, 0xff); // marker_bits
} else { } else {
av_log(s->avctx, AV_LOG_WARNING, av_log(s->c.avctx, AV_LOG_WARNING,
"Closed Caption size (%"SIZE_SPECIFIER") can not exceed " "Closed Caption size (%"SIZE_SPECIFIER") can not exceed "
"93 bytes and must be a multiple of 3\n", side_data->size); "93 bytes and must be a multiple of 3\n", side_data->size);
} }
} }
} }
s->mb_y = 0; s->c.mb_y = 0;
ff_mpeg1_encode_slice_header(s); ff_mpeg1_encode_slice_header(s);
return 0; return 0;
} }
static inline void put_mb_modes(MpegEncContext *s, int n, int bits, static inline void put_mb_modes(MPVEncContext *const s, int n, int bits,
int has_mv, int field_motion) int has_mv, int field_motion)
{ {
put_bits(&s->pb, n, bits); put_bits(&s->pb, n, bits);
if (!s->frame_pred_frame_dct) { if (!s->c.frame_pred_frame_dct) {
if (has_mv) if (has_mv)
/* motion_type: frame/field */ /* motion_type: frame/field */
put_bits(&s->pb, 2, 2 - field_motion); put_bits(&s->pb, 2, 2 - field_motion);
put_bits(&s->pb, 1, s->interlaced_dct); put_bits(&s->pb, 1, s->c.interlaced_dct);
} }
} }
// RAL: Parameter added: f_or_b_code // RAL: Parameter added: f_or_b_code
static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code) static void mpeg1_encode_motion(MPVEncContext *const s, int val, int f_or_b_code)
{ {
if (val == 0) { if (val == 0) {
/* zero vector, corresponds to ff_mpeg12_mbMotionVectorTable[0] */ /* zero vector, corresponds to ff_mpeg12_mbMotionVectorTable[0] */
@ -539,7 +539,7 @@ static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
} }
} }
static inline void encode_dc(MpegEncContext *s, int diff, int component) static inline void encode_dc(MPVEncContext *const s, int diff, int component)
{ {
unsigned int diff_u = diff + 255; unsigned int diff_u = diff + 255;
if (diff_u >= 511) { if (diff_u >= 511) {
@ -573,23 +573,23 @@ static inline void encode_dc(MpegEncContext *s, int diff, int component)
} }
} }
static void mpeg1_encode_block(MpegEncContext *s, const int16_t *block, int n) static void mpeg1_encode_block(MPVEncContext *const s, const int16_t block[], int n)
{ {
int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign; int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
int code, component; int code, component;
const uint16_t (*table_vlc)[2] = ff_mpeg1_vlc_table; const uint16_t (*table_vlc)[2] = ff_mpeg1_vlc_table;
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
/* DC coef */ /* DC coef */
if (s->mb_intra) { if (s->c.mb_intra) {
component = (n <= 3 ? 0 : (n & 1) + 1); component = (n <= 3 ? 0 : (n & 1) + 1);
dc = block[0]; /* overflow is impossible */ dc = block[0]; /* overflow is impossible */
diff = dc - s->last_dc[component]; diff = dc - s->c.last_dc[component];
encode_dc(s, diff, component); encode_dc(s, diff, component);
s->last_dc[component] = dc; s->c.last_dc[component] = dc;
i = 1; i = 1;
if (s->intra_vlc_format) if (s->c.intra_vlc_format)
table_vlc = ff_mpeg2_vlc_table; table_vlc = ff_mpeg2_vlc_table;
} else { } else {
/* encode the first coefficient: needs to be done here because /* encode the first coefficient: needs to be done here because
@ -610,7 +610,7 @@ static void mpeg1_encode_block(MpegEncContext *s, const int16_t *block, int n)
last_non_zero = i - 1; last_non_zero = i - 1;
for (; i <= last_index; i++) { for (; i <= last_index; i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
next_coef: next_coef:
@ -634,7 +634,7 @@ next_coef:
put_bits(&s->pb, 6, 0x01); put_bits(&s->pb, 6, 0x01);
/* escape: only clip in this case */ /* escape: only clip in this case */
put_bits(&s->pb, 6, run); put_bits(&s->pb, 6, run);
if (s->codec_id == AV_CODEC_ID_MPEG1VIDEO) { if (s->c.codec_id == AV_CODEC_ID_MPEG1VIDEO) {
if (alevel < 128) { if (alevel < 128) {
put_sbits(&s->pb, 8, level); put_sbits(&s->pb, 8, level);
} else { } else {
@ -654,55 +654,55 @@ next_coef:
put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]); put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
} }
static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s, static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *const s,
const int16_t block[8][64], const int16_t block[8][64],
int motion_x, int motion_y, int motion_x, int motion_y,
int mb_block_count, int mb_block_count,
int chroma_y_shift) int chroma_y_shift)
{ {
/* MPEG-1 is always 420. */ /* MPEG-1 is always 420. */
#define IS_MPEG1(s) (chroma_y_shift == 1 && (s)->codec_id == AV_CODEC_ID_MPEG1VIDEO) #define IS_MPEG1(s) (chroma_y_shift == 1 && (s)->c.codec_id == AV_CODEC_ID_MPEG1VIDEO)
int i, cbp; int i, cbp;
const int mb_x = s->mb_x; const int mb_x = s->c.mb_x;
const int mb_y = s->mb_y; const int mb_y = s->c.mb_y;
const int first_mb = mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; const int first_mb = mb_x == s->c.resync_mb_x && mb_y == s->c.resync_mb_y;
/* compute cbp */ /* compute cbp */
cbp = 0; cbp = 0;
for (i = 0; i < mb_block_count; i++) for (i = 0; i < mb_block_count; i++)
if (s->block_last_index[i] >= 0) if (s->c.block_last_index[i] >= 0)
cbp |= 1 << (mb_block_count - 1 - i); cbp |= 1 << (mb_block_count - 1 - i);
if (cbp == 0 && !first_mb && s->mv_type == MV_TYPE_16X16 && if (cbp == 0 && !first_mb && s->c.mv_type == MV_TYPE_16X16 &&
(mb_x != s->mb_width - 1 || (mb_x != s->c.mb_width - 1 ||
(mb_y != s->end_mb_y - 1 && IS_MPEG1(s))) && (mb_y != s->c.end_mb_y - 1 && IS_MPEG1(s))) &&
((s->pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) || ((s->c.pict_type == AV_PICTURE_TYPE_P && (motion_x | motion_y) == 0) ||
(s->pict_type == AV_PICTURE_TYPE_B && s->mv_dir == s->last_mv_dir && (s->c.pict_type == AV_PICTURE_TYPE_B && s->c.mv_dir == s->last_mv_dir &&
(((s->mv_dir & MV_DIR_FORWARD) (((s->c.mv_dir & MV_DIR_FORWARD)
? ((s->mv[0][0][0] - s->last_mv[0][0][0]) | ? ((s->c.mv[0][0][0] - s->c.last_mv[0][0][0]) |
(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | (s->c.mv[0][0][1] - s->c.last_mv[0][0][1])) : 0) |
((s->mv_dir & MV_DIR_BACKWARD) ((s->c.mv_dir & MV_DIR_BACKWARD)
? ((s->mv[1][0][0] - s->last_mv[1][0][0]) | ? ((s->c.mv[1][0][0] - s->c.last_mv[1][0][0]) |
(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { (s->c.mv[1][0][1] - s->c.last_mv[1][0][1])) : 0)) == 0))) {
s->mb_skip_run++; s->c.mb_skip_run++;
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
s->misc_bits++; s->misc_bits++;
s->last_bits++; s->last_bits++;
if (s->pict_type == AV_PICTURE_TYPE_P) { if (s->c.pict_type == AV_PICTURE_TYPE_P) {
s->last_mv[0][0][0] = s->c.last_mv[0][0][0] =
s->last_mv[0][0][1] = s->c.last_mv[0][0][1] =
s->last_mv[0][1][0] = s->c.last_mv[0][1][0] =
s->last_mv[0][1][1] = 0; s->c.last_mv[0][1][1] = 0;
} }
} else { } else {
if (first_mb) { if (first_mb) {
av_assert0(s->mb_skip_run == 0); av_assert0(s->c.mb_skip_run == 0);
encode_mb_skip_run(s, s->mb_x); encode_mb_skip_run(s, s->c.mb_x);
} else { } else {
encode_mb_skip_run(s, s->mb_skip_run); encode_mb_skip_run(s, s->c.mb_skip_run);
} }
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
if (s->dquant && cbp) { if (s->dquant && cbp) {
/* macroblock_type: macroblock_quant = 1 */ /* macroblock_type: macroblock_quant = 1 */
put_mb_modes(s, 2, 1, 0, 0); put_mb_modes(s, 2, 1, 0, 0);
@ -710,23 +710,23 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
} else { } else {
/* macroblock_type: macroblock_quant = 0 */ /* macroblock_type: macroblock_quant = 0 */
put_mb_modes(s, 1, 1, 0, 0); put_mb_modes(s, 1, 1, 0, 0);
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
s->i_count++; s->i_count++;
} else if (s->mb_intra) { } else if (s->c.mb_intra) {
if (s->dquant && cbp) { if (s->dquant && cbp) {
put_mb_modes(s, 6, 0x01, 0, 0); put_mb_modes(s, 6, 0x01, 0, 0);
put_qscale(s); put_qscale(s);
} else { } else {
put_mb_modes(s, 5, 0x03, 0, 0); put_mb_modes(s, 5, 0x03, 0, 0);
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
s->i_count++; s->i_count++;
memset(s->last_mv, 0, sizeof(s->last_mv)); memset(s->c.last_mv, 0, sizeof(s->c.last_mv));
} else if (s->pict_type == AV_PICTURE_TYPE_P) { } else if (s->c.pict_type == AV_PICTURE_TYPE_P) {
if (s->mv_type == MV_TYPE_16X16) { if (s->c.mv_type == MV_TYPE_16X16) {
if (cbp != 0) { if (cbp != 0) {
if ((motion_x | motion_y) == 0) { if ((motion_x | motion_y) == 0) {
if (s->dquant) { if (s->dquant) {
@ -748,34 +748,34 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
// RAL: f_code parameter added // RAL: f_code parameter added
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
motion_x - s->last_mv[0][0][0], motion_x - s->c.last_mv[0][0][0],
s->f_code); s->c.f_code);
// RAL: f_code parameter added // RAL: f_code parameter added
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
motion_y - s->last_mv[0][0][1], motion_y - s->c.last_mv[0][0][1],
s->f_code); s->c.f_code);
s->mv_bits += get_bits_diff(s); s->mv_bits += get_bits_diff(s);
} }
} else { } else {
put_bits(&s->pb, 3, 1); /* motion only */ put_bits(&s->pb, 3, 1); /* motion only */
if (!s->frame_pred_frame_dct) if (!s->c.frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */ put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
// RAL: f_code parameter added // RAL: f_code parameter added
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
motion_x - s->last_mv[0][0][0], motion_x - s->c.last_mv[0][0][0],
s->f_code); s->c.f_code);
// RAL: f_code parameter added // RAL: f_code parameter added
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
motion_y - s->last_mv[0][0][1], motion_y - s->c.last_mv[0][0][1],
s->f_code); s->c.f_code);
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
s->mv_bits += get_bits_diff(s); s->mv_bits += get_bits_diff(s);
} }
s->last_mv[0][1][0] = s->last_mv[0][0][0] = motion_x; s->c.last_mv[0][1][0] = s->c.last_mv[0][0][0] = motion_x;
s->last_mv[0][1][1] = s->last_mv[0][0][1] = motion_y; s->c.last_mv[0][1][1] = s->c.last_mv[0][0][1] = motion_y;
} else { } else {
av_assert2(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD); av_assert2(!s->c.frame_pred_frame_dct && s->c.mv_type == MV_TYPE_FIELD);
if (cbp) { if (cbp) {
if (s->dquant) { if (s->dquant) {
@ -787,19 +787,19 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
} else { } else {
put_bits(&s->pb, 3, 1); /* motion only */ put_bits(&s->pb, 3, 1); /* motion only */
put_bits(&s->pb, 2, 1); /* motion_type: field */ put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
put_bits(&s->pb, 1, s->field_select[0][i]); put_bits(&s->pb, 1, s->c.field_select[0][i]);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][i][0] - s->last_mv[0][i][0], s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
s->f_code); s->c.f_code);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1), s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
s->f_code); s->c.f_code);
s->last_mv[0][i][0] = s->mv[0][i][0]; s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
s->last_mv[0][i][1] = 2 * s->mv[0][i][1]; s->c.last_mv[0][i][1] = 2 * s->c.mv[0][i][1];
} }
s->mv_bits += get_bits_diff(s); s->mv_bits += get_bits_diff(s);
} }
@ -816,91 +816,91 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
} }
} }
} else { } else {
if (s->mv_type == MV_TYPE_16X16) { if (s->c.mv_type == MV_TYPE_16X16) {
if (cbp) { // With coded bloc pattern if (cbp) { // With coded bloc pattern
if (s->dquant) { if (s->dquant) {
if (s->mv_dir == MV_DIR_FORWARD) if (s->c.mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 0); put_mb_modes(s, 6, 3, 1, 0);
else else
put_mb_modes(s, 8 - s->mv_dir, 2, 1, 0); put_mb_modes(s, 8 - s->c.mv_dir, 2, 1, 0);
put_qscale(s); put_qscale(s);
} else { } else {
put_mb_modes(s, 5 - s->mv_dir, 3, 1, 0); put_mb_modes(s, 5 - s->c.mv_dir, 3, 1, 0);
} }
} else { // No coded bloc pattern } else { // No coded bloc pattern
put_bits(&s->pb, 5 - s->mv_dir, 2); put_bits(&s->pb, 5 - s->c.mv_dir, 2);
if (!s->frame_pred_frame_dct) if (!s->c.frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */ put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
if (s->mv_dir & MV_DIR_FORWARD) { if (s->c.mv_dir & MV_DIR_FORWARD) {
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][0][0] - s->last_mv[0][0][0], s->c.mv[0][0][0] - s->c.last_mv[0][0][0],
s->f_code); s->c.f_code);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][0][1] - s->last_mv[0][0][1], s->c.mv[0][0][1] - s->c.last_mv[0][0][1],
s->f_code); s->c.f_code);
s->last_mv[0][0][0] = s->c.last_mv[0][0][0] =
s->last_mv[0][1][0] = s->mv[0][0][0]; s->c.last_mv[0][1][0] = s->c.mv[0][0][0];
s->last_mv[0][0][1] = s->c.last_mv[0][0][1] =
s->last_mv[0][1][1] = s->mv[0][0][1]; s->c.last_mv[0][1][1] = s->c.mv[0][0][1];
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->c.mv_dir & MV_DIR_BACKWARD) {
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[1][0][0] - s->last_mv[1][0][0], s->c.mv[1][0][0] - s->c.last_mv[1][0][0],
s->b_code); s->c.b_code);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[1][0][1] - s->last_mv[1][0][1], s->c.mv[1][0][1] - s->c.last_mv[1][0][1],
s->b_code); s->c.b_code);
s->last_mv[1][0][0] = s->c.last_mv[1][0][0] =
s->last_mv[1][1][0] = s->mv[1][0][0]; s->c.last_mv[1][1][0] = s->c.mv[1][0][0];
s->last_mv[1][0][1] = s->c.last_mv[1][0][1] =
s->last_mv[1][1][1] = s->mv[1][0][1]; s->c.last_mv[1][1][1] = s->c.mv[1][0][1];
} }
} else { } else {
av_assert2(s->mv_type == MV_TYPE_FIELD); av_assert2(s->c.mv_type == MV_TYPE_FIELD);
av_assert2(!s->frame_pred_frame_dct); av_assert2(!s->c.frame_pred_frame_dct);
if (cbp) { // With coded bloc pattern if (cbp) { // With coded bloc pattern
if (s->dquant) { if (s->dquant) {
if (s->mv_dir == MV_DIR_FORWARD) if (s->c.mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 1); put_mb_modes(s, 6, 3, 1, 1);
else else
put_mb_modes(s, 8 - s->mv_dir, 2, 1, 1); put_mb_modes(s, 8 - s->c.mv_dir, 2, 1, 1);
put_qscale(s); put_qscale(s);
} else { } else {
put_mb_modes(s, 5 - s->mv_dir, 3, 1, 1); put_mb_modes(s, 5 - s->c.mv_dir, 3, 1, 1);
} }
} else { // No coded bloc pattern } else { // No coded bloc pattern
put_bits(&s->pb, 5 - s->mv_dir, 2); put_bits(&s->pb, 5 - s->c.mv_dir, 2);
put_bits(&s->pb, 2, 1); /* motion_type: field */ put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant; s->c.qscale -= s->dquant;
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
if (s->mv_dir & MV_DIR_FORWARD) { if (s->c.mv_dir & MV_DIR_FORWARD) {
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
put_bits(&s->pb, 1, s->field_select[0][i]); put_bits(&s->pb, 1, s->c.field_select[0][i]);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][i][0] - s->last_mv[0][i][0], s->c.mv[0][i][0] - s->c.last_mv[0][i][0],
s->f_code); s->c.f_code);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[0][i][1] - (s->last_mv[0][i][1] >> 1), s->c.mv[0][i][1] - (s->c.last_mv[0][i][1] >> 1),
s->f_code); s->c.f_code);
s->last_mv[0][i][0] = s->mv[0][i][0]; s->c.last_mv[0][i][0] = s->c.mv[0][i][0];
s->last_mv[0][i][1] = s->mv[0][i][1] * 2; s->c.last_mv[0][i][1] = s->c.mv[0][i][1] * 2;
} }
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->c.mv_dir & MV_DIR_BACKWARD) {
for (i = 0; i < 2; i++) { for (i = 0; i < 2; i++) {
put_bits(&s->pb, 1, s->field_select[1][i]); put_bits(&s->pb, 1, s->c.field_select[1][i]);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[1][i][0] - s->last_mv[1][i][0], s->c.mv[1][i][0] - s->c.last_mv[1][i][0],
s->b_code); s->c.b_code);
mpeg1_encode_motion(s, mpeg1_encode_motion(s,
s->mv[1][i][1] - (s->last_mv[1][i][1] >> 1), s->c.mv[1][i][1] - (s->c.last_mv[1][i][1] >> 1),
s->b_code); s->c.b_code);
s->last_mv[1][i][0] = s->mv[1][i][0]; s->c.last_mv[1][i][0] = s->c.mv[1][i][0];
s->last_mv[1][i][1] = s->mv[1][i][1] * 2; s->c.last_mv[1][i][1] = s->c.mv[1][i][1] * 2;
} }
} }
} }
@ -921,20 +921,20 @@ static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
for (i = 0; i < mb_block_count; i++) for (i = 0; i < mb_block_count; i++)
if (cbp & (1 << (mb_block_count - 1 - i))) if (cbp & (1 << (mb_block_count - 1 - i)))
mpeg1_encode_block(s, block[i], i); mpeg1_encode_block(s, block[i], i);
s->mb_skip_run = 0; s->c.mb_skip_run = 0;
if (s->mb_intra) if (s->c.mb_intra)
s->i_tex_bits += get_bits_diff(s); s->i_tex_bits += get_bits_diff(s);
else else
s->p_tex_bits += get_bits_diff(s); s->p_tex_bits += get_bits_diff(s);
} }
} }
static void mpeg12_encode_mb(MpegEncContext *s, int16_t block[][64], static void mpeg12_encode_mb(MPVEncContext *const s, int16_t block[][64],
int motion_x, int motion_y) int motion_x, int motion_y)
{ {
if (!s->mb_intra) if (!s->c.mb_intra)
s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision; s->c.last_dc[0] = s->c.last_dc[1] = s->c.last_dc[2] = 128 << s->c.intra_dc_precision;
if (s->chroma_format == CHROMA_420) if (s->c.chroma_format == CHROMA_420)
mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6, 1); mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 6, 1);
else else
mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8, 0); mpeg1_encode_mb_internal(s, block, motion_x, motion_y, 8, 0);
@ -1048,7 +1048,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
static AVOnce init_static_once = AV_ONCE_INIT; static AVOnce init_static_once = AV_ONCE_INIT;
MPEG12EncContext *const mpeg12 = avctx->priv_data; MPEG12EncContext *const mpeg12 = avctx->priv_data;
MPVMainEncContext *const m = &mpeg12->mpeg; MPVMainEncContext *const m = &mpeg12->mpeg;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int ret; int ret;
int max_size = avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 16383 : 4095; int max_size = avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 16383 : 4095;
@ -1071,7 +1071,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
} }
} }
if (s->q_scale_type == 1) { if (s->c.q_scale_type == 1) {
if (avctx->qmax > 28) { if (avctx->qmax > 28) {
av_log(avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"non linear quant only supports qmax <= 28 currently\n"); "non linear quant only supports qmax <= 28 currently\n");
@ -1113,7 +1113,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
m->encode_picture_header = mpeg1_encode_picture_header; m->encode_picture_header = mpeg1_encode_picture_header;
s->encode_mb = mpeg12_encode_mb; s->encode_mb = mpeg12_encode_mb;
s->me.mv_penalty = mv_penalty; s->c.me.mv_penalty = mv_penalty;
m->fcode_tab = fcode_tab + MAX_MV; m->fcode_tab = fcode_tab + MAX_MV;
if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) { if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
s->min_qcoeff = -255; s->min_qcoeff = -255;
@ -1121,9 +1121,9 @@ static av_cold int encode_init(AVCodecContext *avctx)
} else { } else {
s->min_qcoeff = -2047; s->min_qcoeff = -2047;
s->max_qcoeff = 2047; s->max_qcoeff = 2047;
s->mpeg_quant = 1; s->c.mpeg_quant = 1;
} }
if (s->intra_vlc_format) { if (s->c.intra_vlc_format) {
s->intra_ac_vlc_length = s->intra_ac_vlc_length =
s->intra_ac_vlc_last_length = uni_mpeg2_ac_vlc_len; s->intra_ac_vlc_last_length = uni_mpeg2_ac_vlc_len;
} else { } else {
@ -1138,7 +1138,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
return ret; return ret;
if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO && if (avctx->codec_id == AV_CODEC_ID_MPEG1VIDEO &&
s->thread_context[s->slice_context_count - 1]->start_mb_y > s->c.thread_context[s->c.slice_context_count - 1]->start_mb_y >
SLICE_MAX_START_CODE - SLICE_MIN_START_CODE) { SLICE_MAX_START_CODE - SLICE_MIN_START_CODE) {
// MPEG-1 slices must not start at a MB row number that would make // MPEG-1 slices must not start at a MB row number that would make
// their start code > SLICE_MAX_START_CODE. So make the last slice // their start code > SLICE_MAX_START_CODE. So make the last slice
@ -1148,15 +1148,15 @@ static av_cold int encode_init(AVCodecContext *avctx)
"the case in which there is no work to do for some " "the case in which there is no work to do for some "
"slice contexts."); "slice contexts.");
const int mb_height = SLICE_MAX_START_CODE - SLICE_MIN_START_CODE; const int mb_height = SLICE_MAX_START_CODE - SLICE_MIN_START_CODE;
const int nb_slices = s->slice_context_count - 1; const int nb_slices = s->c.slice_context_count - 1;
s->thread_context[nb_slices]->start_mb_y = mb_height; s->c.thread_context[nb_slices]->start_mb_y = mb_height;
av_assert1(nb_slices >= 1); av_assert1(nb_slices >= 1);
for (int i = 0; i < nb_slices; i++) { for (int i = 0; i < nb_slices; i++) {
s->thread_context[i]->start_mb_y = s->c.thread_context[i]->start_mb_y =
(mb_height * (i ) + nb_slices / 2) / nb_slices; (mb_height * (i ) + nb_slices / 2) / nb_slices;
s->thread_context[i]->end_mb_y = s->c.thread_context[i]->end_mb_y =
(mb_height * (i + 1) + nb_slices / 2) / nb_slices; (mb_height * (i + 1) + nb_slices / 2) / nb_slices;
} }
} }
@ -1229,9 +1229,9 @@ static const AVOption mpeg1_options[] = {
static const AVOption mpeg2_options[] = { static const AVOption mpeg2_options[] = {
COMMON_OPTS COMMON_OPTS
{ "intra_vlc", "Use MPEG-2 intra VLC table.", { "intra_vlc", "Use MPEG-2 intra VLC table.",
FF_MPV_OFFSET(intra_vlc_format), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, FF_MPV_OFFSET(c.intra_vlc_format), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
{ "non_linear_quant", "Use nonlinear quantizer.", FF_MPV_OFFSET(q_scale_type), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "non_linear_quant", "Use nonlinear quantizer.", FF_MPV_OFFSET(c.q_scale_type), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
{ "alternate_scan", "Enable alternate scantable.", FF_MPV_OFFSET(alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE }, { "alternate_scan", "Enable alternate scantable.", FF_MPV_OFFSET(c.alternate_scan), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
{ "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE }, { "a53cc", "Use A53 Closed Captions (if available)", OFFSET(a53_cc), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE },
{ "seq_disp_ext", "Write sequence_display_extension blocks.", OFFSET(seq_disp_ext), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, .unit = "seq_disp_ext" }, { "seq_disp_ext", "Write sequence_display_extension blocks.", OFFSET(seq_disp_ext), AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1, VE, .unit = "seq_disp_ext" },
{ "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "seq_disp_ext" }, { "auto", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = -1}, 0, 0, VE, .unit = "seq_disp_ext" },

View File

@ -24,16 +24,16 @@
#include <stdint.h> #include <stdint.h>
#include "mpegvideo.h" #include "mpegvideoenc.h"
#include "mpegvideodata.h" #include "mpegvideodata.h"
void ff_mpeg1_encode_slice_header(MpegEncContext *s); void ff_mpeg1_encode_slice_header(MPVEncContext *s);
// Must not be called before intra_dc_precision has been sanitized in ff_mpv_encode_init() // Must not be called before intra_dc_precision has been sanitized in ff_mpv_encode_init()
static inline void ff_mpeg1_encode_init(MpegEncContext *s) static inline void ff_mpeg1_encode_init(MPVEncContext *s)
{ {
s->y_dc_scale_table = s->c.y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg12_dc_scale_table[s->intra_dc_precision]; s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[s->c.intra_dc_precision];
} }
#endif /* AVCODEC_MPEG12ENC_H */ #endif /* AVCODEC_MPEG12ENC_H */

File diff suppressed because it is too large Load Diff

View File

@ -27,14 +27,14 @@
#include "put_bits.h" #include "put_bits.h"
typedef struct MpegEncContext MpegEncContext; typedef struct MPVEncContext MPVEncContext;
void ff_set_mpeg4_time(MpegEncContext *s); void ff_set_mpeg4_time(MPVEncContext *s);
void ff_mpeg4_encode_video_packet_header(MpegEncContext *s); void ff_mpeg4_encode_video_packet_header(MPVEncContext *s);
void ff_mpeg4_stuffing(PutBitContext *pbc); void ff_mpeg4_stuffing(PutBitContext *pbc);
void ff_mpeg4_init_partitions(MpegEncContext *s); void ff_mpeg4_init_partitions(MPVEncContext *s);
void ff_mpeg4_merge_partitions(MpegEncContext *s); void ff_mpeg4_merge_partitions(MPVEncContext *s);
void ff_clean_mpeg4_qscales(MpegEncContext *s); void ff_clean_mpeg4_qscales(MPVEncContext *s);
#endif #endif

View File

@ -435,9 +435,6 @@ static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
COPY(start_mb_y); COPY(start_mb_y);
COPY(end_mb_y); COPY(end_mb_y);
COPY(me.map_generation); COPY(me.map_generation);
COPY(dct_error_sum);
COPY(dct_count[0]);
COPY(dct_count[1]);
COPY(ac_val_base); COPY(ac_val_base);
COPY(ac_val[0]); COPY(ac_val[0]);
COPY(ac_val[1]); COPY(ac_val[1]);

View File

@ -30,18 +30,13 @@
#include "blockdsp.h" #include "blockdsp.h"
#include "error_resilience.h" #include "error_resilience.h"
#include "fdctdsp.h"
#include "get_bits.h" #include "get_bits.h"
#include "h264chroma.h" #include "h264chroma.h"
#include "h263dsp.h" #include "h263dsp.h"
#include "hpeldsp.h" #include "hpeldsp.h"
#include "idctdsp.h" #include "idctdsp.h"
#include "me_cmp.h"
#include "motion_est.h" #include "motion_est.h"
#include "mpegpicture.h" #include "mpegpicture.h"
#include "mpegvideoencdsp.h"
#include "pixblockdsp.h"
#include "put_bits.h"
#include "qpeldsp.h" #include "qpeldsp.h"
#include "videodsp.h" #include "videodsp.h"
@ -105,8 +100,6 @@ typedef struct MpegEncContext {
enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
int encoding; ///< true if we are encoding (vs decoding) int encoding; ///< true if we are encoding (vs decoding)
int luma_elim_threshold;
int chroma_elim_threshold;
int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically int workaround_bugs; ///< workaround bugs in encoders which cannot be detected automatically
int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag int codec_tag; ///< internal codec_tag upper case converted from avctx codec_tag
/* the following fields are managed internally by the encoder */ /* the following fields are managed internally by the encoder */
@ -125,12 +118,12 @@ typedef struct MpegEncContext {
BufferPoolContext buffer_pools; BufferPoolContext buffer_pools;
/** bit output */
PutBitContext pb;
int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) int start_mb_y; ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y) int end_mb_y; ///< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
struct MpegEncContext *thread_context[MAX_THREADS]; union {
struct MpegEncContext *thread_context[MAX_THREADS];
struct MPVEncContext *enc_contexts[MAX_THREADS];
};
int slice_context_count; ///< number of used thread_contexts int slice_context_count; ///< number of used thread_contexts
/** /**
@ -145,12 +138,6 @@ typedef struct MpegEncContext {
*/ */
MPVWorkPicture next_pic; MPVWorkPicture next_pic;
/**
* Reference to the source picture for encoding.
* note, linesize & data, might not match the source picture (for field pictures)
*/
AVFrame *new_pic;
/** /**
* copy of the current picture structure. * copy of the current picture structure.
* note, linesize & data, might not match the current picture (for field pictures) * note, linesize & data, might not match the current picture (for field pictures)
@ -181,46 +168,24 @@ typedef struct MpegEncContext {
int chroma_qscale; ///< chroma QP int chroma_qscale; ///< chroma QP
unsigned int lambda; ///< Lagrange multiplier used in rate distortion unsigned int lambda; ///< Lagrange multiplier used in rate distortion
unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT unsigned int lambda2; ///< (lambda*lambda) >> FF_LAMBDA_SHIFT
int *lambda_table;
int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale
int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ... int pict_type; ///< AV_PICTURE_TYPE_I, AV_PICTURE_TYPE_P, AV_PICTURE_TYPE_B, ...
int droppable; int droppable;
int skipdct; ///< skip dct and code zero residual
/* motion compensation */ /* motion compensation */
int unrestricted_mv; ///< mv can point outside of the coded picture int unrestricted_mv; ///< mv can point outside of the coded picture
int h263_long_vectors; ///< use horrible H.263v1 long vector mode int h263_long_vectors; ///< use horrible H.263v1 long vector mode
BlockDSPContext bdsp; BlockDSPContext bdsp;
FDCTDSPContext fdsp;
H264ChromaContext h264chroma; H264ChromaContext h264chroma;
HpelDSPContext hdsp; HpelDSPContext hdsp;
IDCTDSPContext idsp; IDCTDSPContext idsp;
MpegvideoEncDSPContext mpvencdsp;
PixblockDSPContext pdsp;
QpelDSPContext qdsp; QpelDSPContext qdsp;
VideoDSPContext vdsp; VideoDSPContext vdsp;
H263DSPContext h263dsp; H263DSPContext h263dsp;
int f_code; ///< forward MV resolution int f_code; ///< forward MV resolution
int b_code; ///< backward MV resolution for B-frames (MPEG-4) int b_code; ///< backward MV resolution for B-frames (MPEG-4)
int16_t (*p_field_mv_table_base)[2]; int16_t (*p_field_mv_table_base)[2];
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame encoding
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame encoding
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame encoding
int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame encoding
int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame encoding
int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode B-frame encoding
int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding int16_t (*p_field_mv_table[2][2])[2]; ///< MV table (2MV per MB) interlaced P-frame encoding
int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced B-frame encoding
uint8_t (*p_field_select_table[2]); ///< Only the first element is allocated
uint8_t (*b_field_select_table[2][2]); ///< allocated jointly with p_field_select_table
/* The following fields are encoder-only */
uint16_t *mb_var; ///< Table for MB variances
uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
uint8_t *mb_mean; ///< Table for MB luminance
uint64_t encoding_error[MPV_MAX_PLANES];
int mv_dir; int mv_dir;
#define MV_DIR_FORWARD 1 #define MV_DIR_FORWARD 1
@ -251,7 +216,6 @@ typedef struct MpegEncContext {
int mb_x, mb_y; int mb_x, mb_y;
int mb_skip_run; int mb_skip_run;
int mb_intra; int mb_intra;
uint16_t *mb_type; ///< Table for candidate MB types for encoding (defines in mpegvideoenc.h)
int block_index[6]; ///< index to current MB in block based arrays with edges int block_index[6]; ///< index to current MB in block based arrays with edges
int block_wrap[6]; int block_wrap[6];
@ -265,43 +229,6 @@ typedef struct MpegEncContext {
uint16_t inter_matrix[64]; uint16_t inter_matrix[64];
uint16_t chroma_inter_matrix[64]; uint16_t chroma_inter_matrix[64];
int intra_quant_bias; ///< bias for the quantizer
int inter_quant_bias; ///< bias for the quantizer
int min_qcoeff; ///< minimum encodable coefficient
int max_qcoeff; ///< maximum encodable coefficient
int ac_esc_length; ///< num of bits needed to encode the longest esc
uint8_t *intra_ac_vlc_length;
uint8_t *intra_ac_vlc_last_length;
uint8_t *intra_chroma_ac_vlc_length;
uint8_t *intra_chroma_ac_vlc_last_length;
uint8_t *inter_ac_vlc_length;
uint8_t *inter_ac_vlc_last_length;
uint8_t *luma_dc_vlc_length;
int coded_score[12];
/** precomputed matrix (combine qscale and DCT renorm) */
int (*q_intra_matrix)[64];
int (*q_chroma_intra_matrix)[64];
int (*q_inter_matrix)[64];
/** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/
uint16_t (*q_intra_matrix16)[2][64];
uint16_t (*q_chroma_intra_matrix16)[2][64];
uint16_t (*q_inter_matrix16)[2][64];
/* noise reduction */
int (*dct_error_sum)[64];
int dct_count[2];
uint16_t (*dct_offset)[64];
/* statistics, used for 2-pass encoding */
int mv_bits;
int i_tex_bits;
int p_tex_bits;
int i_count;
int misc_bits; ///< cbp, mb_type
int last_bits; ///< temp var used for calculating the above vars
/* error concealment / resync */ /* error concealment / resync */
int resync_mb_x; ///< x position of last resync marker int resync_mb_x; ///< x position of last resync marker
int resync_mb_y; ///< y position of last resync marker int resync_mb_y; ///< y position of last resync marker
@ -311,10 +238,6 @@ typedef struct MpegEncContext {
/* H.263 specific */ /* H.263 specific */
int gob_index; int gob_index;
int obmc; ///< overlapped block motion compensation int obmc; ///< overlapped block motion compensation
int mb_info; ///< interval for outputting info about mb offsets as side data
int prev_mb_info, last_mb_info;
uint8_t *mb_info_ptr;
int mb_info_size;
int ehc_mode; int ehc_mode;
/* H.263+ specific */ /* H.263+ specific */
@ -342,8 +265,6 @@ typedef struct MpegEncContext {
int data_partitioning; ///< data partitioning flag from header int data_partitioning; ///< data partitioning flag from header
int partitioned_frame; ///< is current frame partitioned int partitioned_frame; ///< is current frame partitioned
int low_delay; ///< no reordering needed / has no B-frames int low_delay; ///< no reordering needed / has no B-frames
PutBitContext tex_pb; ///< used for data partitioned VOPs
PutBitContext pb2; ///< used for data partitioned VOPs
int mpeg_quant; int mpeg_quant;
int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG-4 int padding_bug_score; ///< used to detect the VERY common padding bug in MPEG-4
@ -354,10 +275,6 @@ typedef struct MpegEncContext {
int rv10_version; ///< RV10 version: 0 or 3 int rv10_version; ///< RV10 version: 0 or 3
int rv10_first_dc_coded[3]; int rv10_first_dc_coded[3];
/* MJPEG specific */
struct MJpegContext *mjpeg_ctx;
int esc_pos;
/* MSMPEG4 specific */ /* MSMPEG4 specific */
int slice_height; ///< in macroblocks int slice_height; ///< in macroblocks
int first_slice_line; ///< used in MPEG-4 too to handle resync markers int first_slice_line; ///< used in MPEG-4 too to handle resync markers
@ -371,16 +288,12 @@ typedef struct MpegEncContext {
MSMP4_WMV2, MSMP4_WMV2,
MSMP4_VC1, ///< for VC1 (image), WMV3 (image) and MSS2. MSMP4_VC1, ///< for VC1 (image), WMV3 (image) and MSS2.
} msmpeg4_version; } msmpeg4_version;
int esc3_level_length;
int inter_intra_pred; int inter_intra_pred;
int mspel; int mspel;
/* decompression specific */ /* decompression specific */
GetBitContext gb; GetBitContext gb;
/* MPEG-1 specific */
int last_mv_dir; ///< last mv_dir, used for B-frame encoding
/* MPEG-2-specific - I wished not to have to support this mess. */ /* MPEG-2-specific - I wished not to have to support this mess. */
int progressive_sequence; int progressive_sequence;
int mpeg_f_code[2][2]; int mpeg_f_code[2][2];
@ -409,19 +322,9 @@ typedef struct MpegEncContext {
int interlaced_dct; int interlaced_dct;
int first_field; ///< is 1 for the first field of a field picture 0 otherwise int first_field; ///< is 1 for the first field of a field picture 0 otherwise
/* RTP specific */
int rtp_mode;
int rtp_payload_size;
uint8_t *ptr_lastgob;
int16_t (*block)[64]; ///< points to one of the following blocks int16_t (*block)[64]; ///< points to one of the following blocks
int16_t (*blocks)[12][64]; // for HQ mode we need to keep the best block int16_t (*blocks)[12][64]; // for HQ mode we need to keep the best block
union {
int (*decode_mb)(struct MpegEncContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch() int (*decode_mb)(struct MpegEncContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch()
void (*encode_mb)(struct MpegEncContext *s, int16_t block[][64],
int motion_x, int motion_y);
};
#define SLICE_OK 0 #define SLICE_OK 0
#define SLICE_ERROR -1 #define SLICE_ERROR -1
@ -444,20 +347,6 @@ typedef struct MpegEncContext {
int16_t *block/*align 16*/, int n, int qscale); int16_t *block/*align 16*/, int n, int qscale);
void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both) void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both)
int16_t *block/*align 16*/, int n, int qscale); int16_t *block/*align 16*/, int n, int qscale);
int (*dct_quantize)(struct MpegEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
void (*denoise_dct)(struct MpegEncContext *s, int16_t *block);
int mpv_flags; ///< flags set by private options
int quantizer_noise_shaping;
me_cmp_func ildct_cmp[2]; ///< 0 = intra, 1 = non-intra
me_cmp_func n_sse_cmp[2]; ///< either SSE or NSSE cmp func
me_cmp_func sad_cmp[2];
me_cmp_func sse_cmp[2];
int (*sum_abs_dctelem)(const int16_t *block);
/// Bitfield containing information which frames to reconstruct.
int frame_reconstruction_bitfield;
/* flag to indicate a reinitialization is required, e.g. after /* flag to indicate a reinitialization is required, e.g. after
* a frame size change */ * a frame size change */
@ -467,10 +356,6 @@ typedef struct MpegEncContext {
unsigned slice_ctx_size; unsigned slice_ctx_size;
ERContext er; ERContext er;
int error_rate;
int intra_penalty;
} MpegEncContext; } MpegEncContext;

View File

@ -136,7 +136,7 @@ int ff_mpeg_update_thread_context(AVCodecContext *dst,
// MPEG-2/interlacing info // MPEG-2/interlacing info
memcpy(&s->progressive_sequence, &s1->progressive_sequence, memcpy(&s->progressive_sequence, &s1->progressive_sequence,
(char *) &s1->rtp_mode - (char *) &s1->progressive_sequence); (char *) &s1->first_field + sizeof(s1->first_field) - (char *) &s1->progressive_sequence);
return 0; return 0;
} }

File diff suppressed because it is too large Load Diff

View File

@ -32,13 +32,141 @@
#include "libavutil/avassert.h" #include "libavutil/avassert.h"
#include "libavutil/opt.h" #include "libavutil/opt.h"
#include "fdctdsp.h"
#include "mpegvideo.h" #include "mpegvideo.h"
#include "mpegvideoencdsp.h"
#include "pixblockdsp.h"
#include "put_bits.h"
#include "ratecontrol.h" #include "ratecontrol.h"
#define MPVENC_MAX_B_FRAMES 16 #define MPVENC_MAX_B_FRAMES 16
typedef struct MPVEncContext {
MpegEncContext c; ///< the common base context
/** bit output */
PutBitContext pb;
int *lambda_table;
int adaptive_quant; ///< use adaptive quantization
int dquant; ///< qscale difference to prev qscale
int skipdct; ///< skip dct and code zero residual
int quantizer_noise_shaping;
int luma_elim_threshold;
int chroma_elim_threshold;
int mpv_flags; ///< flags set by private options
/// Bitfield containing information which frames to reconstruct.
int frame_reconstruction_bitfield;
/**
* Reference to the source picture.
*/
AVFrame *new_pic;
FDCTDSPContext fdsp;
MpegvideoEncDSPContext mpvencdsp;
PixblockDSPContext pdsp;
int16_t (*p_mv_table)[2]; ///< MV table (1MV per MB) P-frame
int16_t (*b_forw_mv_table)[2]; ///< MV table (1MV per MB) forward mode B-frame
int16_t (*b_back_mv_table)[2]; ///< MV table (1MV per MB) backward mode B-frame
int16_t (*b_bidir_forw_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame
int16_t (*b_bidir_back_mv_table)[2]; ///< MV table (1MV per MB) bidir mode B-frame
int16_t (*b_direct_mv_table)[2]; ///< MV table (1MV per MB) direct mode B-frame
int16_t (*b_field_mv_table[2][2][2])[2];///< MV table (4MV per MB) interlaced B-frame
uint8_t (*p_field_select_table[2]); ///< Only the first element is allocated
uint8_t (*b_field_select_table[2][2]); ///< allocated jointly with p_field_select_table
uint16_t *mb_type; ///< Table for candidate MB types
uint16_t *mb_var; ///< Table for MB variances
uint16_t *mc_mb_var; ///< Table for motion compensated MB variances
uint8_t *mb_mean; ///< Table for MB luminance
uint64_t encoding_error[MPV_MAX_PLANES];
int intra_quant_bias; ///< bias for the quantizer
int inter_quant_bias; ///< bias for the quantizer
int min_qcoeff; ///< minimum encodable coefficient
int max_qcoeff; ///< maximum encodable coefficient
int ac_esc_length; ///< num of bits needed to encode the longest esc
uint8_t *intra_ac_vlc_length;
uint8_t *intra_ac_vlc_last_length;
uint8_t *intra_chroma_ac_vlc_length;
uint8_t *intra_chroma_ac_vlc_last_length;
uint8_t *inter_ac_vlc_length;
uint8_t *inter_ac_vlc_last_length;
uint8_t *luma_dc_vlc_length;
int coded_score[12];
/** precomputed matrix (combine qscale and DCT renorm) */
int (*q_intra_matrix)[64];
int (*q_chroma_intra_matrix)[64];
int (*q_inter_matrix)[64];
/** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/
uint16_t (*q_intra_matrix16)[2][64];
uint16_t (*q_chroma_intra_matrix16)[2][64];
uint16_t (*q_inter_matrix16)[2][64];
/* noise reduction */
void (*denoise_dct)(struct MPVEncContext *s, int16_t *block);
int (*dct_error_sum)[64];
int dct_count[2];
uint16_t (*dct_offset)[64];
/* statistics, used for 2-pass encoding */
int mv_bits;
int i_tex_bits;
int p_tex_bits;
int i_count;
int misc_bits; ///< cbp, mb_type
int last_bits; ///< temp var used for calculating the above vars
/* H.263 specific */
int mb_info; ///< interval for outputting info about mb offsets as side data
int prev_mb_info, last_mb_info;
int mb_info_size;
uint8_t *mb_info_ptr;
/* MJPEG specific */
struct MJpegContext *mjpeg_ctx;
int esc_pos;
/* MPEG-1 specific */
int last_mv_dir; ///< last mv_dir, used for B-frame encoding
/* MPEG-4 specific */
PutBitContext tex_pb; ///< used for data partitioned VOPs
PutBitContext pb2; ///< used for data partitioned VOPs
/* MSMPEG4 specific */
int esc3_level_length;
/* RTP specific */
int rtp_mode;
int rtp_payload_size;
int error_rate;
uint8_t *ptr_lastgob;
void (*encode_mb)(struct MPVEncContext *s, int16_t block[][64],
int motion_x, int motion_y);
int (*dct_quantize)(struct MPVEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
me_cmp_func ildct_cmp[2]; ///< 0 = intra, 1 = non-intra
me_cmp_func n_sse_cmp[2]; ///< either SSE or NSSE cmp func
me_cmp_func sad_cmp[2];
me_cmp_func sse_cmp[2];
int (*sum_abs_dctelem)(const int16_t *block);
int intra_penalty;
} MPVEncContext;
typedef struct MPVMainEncContext { typedef struct MPVMainEncContext {
MpegEncContext s; ///< The main slicecontext MPVEncContext s; ///< The main slicecontext
int intra_only; ///< if true, only intra pictures are generated int intra_only; ///< if true, only intra pictures are generated
int gop_size; int gop_size;
@ -112,14 +240,14 @@ typedef struct MPVMainEncContext {
int16_t (*mv_table_base)[2]; int16_t (*mv_table_base)[2];
} MPVMainEncContext; } MPVMainEncContext;
static inline const MPVMainEncContext *slice_to_mainenc(const MpegEncContext *s) static inline const MPVMainEncContext *slice_to_mainenc(const MPVEncContext *s)
{ {
#ifdef NO_SLICE_THREADING_HERE #ifdef NO_SLICE_THREADING_HERE
av_assert2(s->slice_context_count <= 1 && av_assert2(s->c.slice_context_count <= 1 &&
!(s->avctx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS)); !(s->c.avctx->codec->capabilities & AV_CODEC_CAP_SLICE_THREADS));
return (const MPVMainEncContext*)s; return (const MPVMainEncContext*)s;
#else #else
return s->encparent; return s->c.encparent;
#endif #endif
} }
@ -170,7 +298,7 @@ static inline const MPVMainEncContext *slice_to_mainenc(const MpegEncContext *s)
{ "chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_CHROMA }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, .unit = "cmp_func" }, \ { "chroma", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_CHROMA }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, .unit = "cmp_func" }, \
{ "msad", "Sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, .unit = "cmp_func" } { "msad", "Sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, .unit = "cmp_func" }
#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x) #define FF_MPV_OFFSET(x) offsetof(MPVEncContext, x)
#define FF_MPV_MAIN_OFFSET(x) offsetof(MPVMainEncContext, x) #define FF_MPV_MAIN_OFFSET(x) offsetof(MPVMainEncContext, x)
#define FF_RC_OFFSET(x) offsetof(MPVMainEncContext, rc_context.x) #define FF_RC_OFFSET(x) offsetof(MPVMainEncContext, rc_context.x)
#define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM) #define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
@ -217,7 +345,7 @@ FF_MPV_OPT_CMP_FUNC, \
#define FF_MPV_COMMON_MOTION_EST_OPTS \ #define FF_MPV_COMMON_MOTION_EST_OPTS \
{ "mv0", "always try a mb with mv=<0,0>", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_MV0 }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "mpv_flags" },\ { "mv0", "always try a mb with mv=<0,0>", 0, AV_OPT_TYPE_CONST, { .i64 = FF_MPV_FLAG_MV0 }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "mpv_flags" },\
{"motion_est", "motion estimation algorithm", FF_MPV_OFFSET(me.motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \ {"motion_est", "motion estimation algorithm", FF_MPV_OFFSET(c.me.motion_est), AV_OPT_TYPE_INT, {.i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \
{ "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \ { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \
{ "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \ { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \
{ "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \ { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion_est" }, \
@ -233,21 +361,21 @@ int ff_mpv_encode_init(AVCodecContext *avctx);
int ff_mpv_encode_end(AVCodecContext *avctx); int ff_mpv_encode_end(AVCodecContext *avctx);
int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt, int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
const AVFrame *frame, int *got_packet); const AVFrame *frame, int *got_packet);
int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase); int ff_mpv_reallocate_putbitbuffer(MPVEncContext *s, size_t threshold, size_t size_increase);
void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
void ff_dct_encode_init(MpegEncContext *s); void ff_dct_encode_init(MPVEncContext *s);
void ff_mpvenc_dct_init_mips(MpegEncContext *s); void ff_mpvenc_dct_init_mips(MPVEncContext *s);
void ff_dct_encode_init_x86(MpegEncContext *s); void ff_dct_encode_init_x86(MPVEncContext *s);
void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64], void ff_convert_matrix(MPVEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64],
const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra); const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra);
void ff_block_permute(int16_t *block, const uint8_t *permutation, void ff_block_permute(int16_t *block, const uint8_t *permutation,
const uint8_t *scantable, int last); const uint8_t *scantable, int last);
static inline int get_bits_diff(MpegEncContext *s) static inline int get_bits_diff(MPVEncContext *s)
{ {
const int bits = put_bits_count(&s->pb); const int bits = put_bits_count(&s->pb);
const int last = s->last_bits; const int last = s->last_bits;

View File

@ -150,7 +150,7 @@ static av_cold void msmpeg4_encode_init_static(void)
static void find_best_tables(MSMPEG4EncContext *ms) static void find_best_tables(MSMPEG4EncContext *ms)
{ {
MpegEncContext *const s = &ms->m.s; MPVEncContext *const s = &ms->m.s;
int i; int i;
int best = 0, best_size = INT_MAX; int best = 0, best_size = INT_MAX;
int chroma_best = 0, best_chroma_size = INT_MAX; int chroma_best = 0, best_chroma_size = INT_MAX;
@ -174,7 +174,7 @@ static void find_best_tables(MSMPEG4EncContext *ms)
int intra_luma_count = ms->ac_stats[1][0][level][run][last]; int intra_luma_count = ms->ac_stats[1][0][level][run][last];
int intra_chroma_count= ms->ac_stats[1][1][level][run][last]; int intra_chroma_count= ms->ac_stats[1][1][level][run][last];
if(s->pict_type==AV_PICTURE_TYPE_I){ if (s->c.pict_type == AV_PICTURE_TYPE_I) {
size += intra_luma_count *rl_length[i ][level][run][last]; size += intra_luma_count *rl_length[i ][level][run][last];
chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last]; chroma_size+= intra_chroma_count*rl_length[i+3][level][run][last];
}else{ }else{
@ -196,16 +196,16 @@ static void find_best_tables(MSMPEG4EncContext *ms)
} }
} }
if(s->pict_type==AV_PICTURE_TYPE_P) chroma_best= best; if (s->c.pict_type == AV_PICTURE_TYPE_P) chroma_best = best;
memset(ms->ac_stats, 0, sizeof(ms->ac_stats)); memset(ms->ac_stats, 0, sizeof(ms->ac_stats));
ms->rl_table_index = best; ms->rl_table_index = best;
ms->rl_chroma_table_index = chroma_best; ms->rl_chroma_table_index = chroma_best;
if (s->pict_type != ms->m.last_non_b_pict_type) { if (s->c.pict_type != ms->m.last_non_b_pict_type) {
ms->rl_table_index= 2; ms->rl_table_index= 2;
if(s->pict_type==AV_PICTURE_TYPE_I) if (s->c.pict_type == AV_PICTURE_TYPE_I)
ms->rl_chroma_table_index = 1; ms->rl_chroma_table_index = 1;
else else
ms->rl_chroma_table_index = 2; ms->rl_chroma_table_index = 2;
@ -217,15 +217,15 @@ static void find_best_tables(MSMPEG4EncContext *ms)
static int msmpeg4_encode_picture_header(MPVMainEncContext *const m) static int msmpeg4_encode_picture_header(MPVMainEncContext *const m)
{ {
MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)m; MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)m;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
find_best_tables(ms); find_best_tables(ms);
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits(&s->pb, 2, s->pict_type - 1); put_bits(&s->pb, 2, s->c.pict_type - 1);
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
if (s->msmpeg4_version <= MSMP4_V2) { if (s->c.msmpeg4_version <= MSMP4_V2) {
ms->rl_table_index = 2; ms->rl_table_index = 2;
ms->rl_chroma_table_index = 2; ms->rl_chroma_table_index = 2;
} }
@ -234,24 +234,24 @@ static int msmpeg4_encode_picture_header(MPVMainEncContext *const m)
ms->mv_table_index = 1; /* only if P-frame */ ms->mv_table_index = 1; /* only if P-frame */
ms->use_skip_mb_code = 1; /* only if P-frame */ ms->use_skip_mb_code = 1; /* only if P-frame */
ms->per_mb_rl_table = 0; ms->per_mb_rl_table = 0;
if (s->msmpeg4_version == MSMP4_WMV1) if (s->c.msmpeg4_version == MSMP4_WMV1)
s->inter_intra_pred = s->width * s->height < 320*240 && s->c.inter_intra_pred = s->c.width * s->c.height < 320*240 &&
m->bit_rate <= II_BITRATE && m->bit_rate <= II_BITRATE &&
s->pict_type == AV_PICTURE_TYPE_P; s->c.pict_type == AV_PICTURE_TYPE_P;
ff_dlog(s->avctx, "%d %"PRId64" %d %d %d\n", s->pict_type, m->bit_rate, ff_dlog(s->c.avctx, "%d %"PRId64" %d %d %d\n", s->c.pict_type, m->bit_rate,
s->inter_intra_pred, s->width, s->height); s->c.inter_intra_pred, s->c.width, s->c.height);
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
s->slice_height= s->mb_height/1; s->c.slice_height = s->c.mb_height/1;
put_bits(&s->pb, 5, 0x16 + s->mb_height/s->slice_height); put_bits(&s->pb, 5, 0x16 + s->c.mb_height/s->c.slice_height);
if (s->msmpeg4_version == MSMP4_WMV1) { if (s->c.msmpeg4_version == MSMP4_WMV1) {
ff_msmpeg4_encode_ext_header(s); ff_msmpeg4_encode_ext_header(s);
if (m->bit_rate > MBAC_BITRATE) if (m->bit_rate > MBAC_BITRATE)
put_bits(&s->pb, 1, ms->per_mb_rl_table); put_bits(&s->pb, 1, ms->per_mb_rl_table);
} }
if (s->msmpeg4_version > MSMP4_V2) { if (s->c.msmpeg4_version > MSMP4_V2) {
if (!ms->per_mb_rl_table){ if (!ms->per_mb_rl_table){
ff_msmpeg4_code012(&s->pb, ms->rl_chroma_table_index); ff_msmpeg4_code012(&s->pb, ms->rl_chroma_table_index);
ff_msmpeg4_code012(&s->pb, ms->rl_table_index); ff_msmpeg4_code012(&s->pb, ms->rl_table_index);
@ -262,10 +262,10 @@ static int msmpeg4_encode_picture_header(MPVMainEncContext *const m)
} else { } else {
put_bits(&s->pb, 1, ms->use_skip_mb_code); put_bits(&s->pb, 1, ms->use_skip_mb_code);
if (s->msmpeg4_version == MSMP4_WMV1 && m->bit_rate > MBAC_BITRATE) if (s->c.msmpeg4_version == MSMP4_WMV1 && m->bit_rate > MBAC_BITRATE)
put_bits(&s->pb, 1, ms->per_mb_rl_table); put_bits(&s->pb, 1, ms->per_mb_rl_table);
if (s->msmpeg4_version > MSMP4_V2) { if (s->c.msmpeg4_version > MSMP4_V2) {
if (!ms->per_mb_rl_table) if (!ms->per_mb_rl_table)
ff_msmpeg4_code012(&s->pb, ms->rl_table_index); ff_msmpeg4_code012(&s->pb, ms->rl_table_index);
@ -281,18 +281,18 @@ static int msmpeg4_encode_picture_header(MPVMainEncContext *const m)
return 0; return 0;
} }
void ff_msmpeg4_encode_ext_header(MpegEncContext * s) void ff_msmpeg4_encode_ext_header(MPVEncContext *const s)
{ {
const MPVMainEncContext *const m = slice_to_mainenc(s); const MPVMainEncContext *const m = slice_to_mainenc(s);
unsigned fps; unsigned fps;
if (s->avctx->framerate.num > 0 && s->avctx->framerate.den > 0) if (s->c.avctx->framerate.num > 0 && s->c.avctx->framerate.den > 0)
fps = s->avctx->framerate.num / s->avctx->framerate.den; fps = s->c.avctx->framerate.num / s->c.avctx->framerate.den;
else { else {
FF_DISABLE_DEPRECATION_WARNINGS FF_DISABLE_DEPRECATION_WARNINGS
fps = s->avctx->time_base.den / s->avctx->time_base.num fps = s->c.avctx->time_base.den / s->c.avctx->time_base.num
#if FF_API_TICKS_PER_FRAME #if FF_API_TICKS_PER_FRAME
/ FFMAX(s->avctx->ticks_per_frame, 1) / FFMAX(s->c.avctx->ticks_per_frame, 1)
#endif #endif
; ;
FF_ENABLE_DEPRECATION_WARNINGS FF_ENABLE_DEPRECATION_WARNINGS
@ -302,16 +302,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
put_bits(&s->pb, 11, FFMIN(m->bit_rate / 1024, 2047)); put_bits(&s->pb, 11, FFMIN(m->bit_rate / 1024, 2047));
if (s->msmpeg4_version >= MSMP4_V3) if (s->c.msmpeg4_version >= MSMP4_V3)
put_bits(&s->pb, 1, s->flipflop_rounding); put_bits(&s->pb, 1, s->c.flipflop_rounding);
else else
av_assert0(!s->flipflop_rounding); av_assert0(!s->c.flipflop_rounding);
} }
void ff_msmpeg4_encode_motion(MSMPEG4EncContext *const ms, void ff_msmpeg4_encode_motion(MSMPEG4EncContext *const ms,
int mx, int my) int mx, int my)
{ {
MpegEncContext *const s = &ms->m.s; MPVEncContext *const s = &ms->m.s;
const uint32_t *const mv_vector_table = mv_vector_tables[ms->mv_table_index]; const uint32_t *const mv_vector_table = mv_vector_tables[ms->mv_table_index];
uint32_t code; uint32_t code;
@ -334,20 +334,20 @@ void ff_msmpeg4_encode_motion(MSMPEG4EncContext *const ms,
put_bits(&s->pb, code & 0xff, code >> 8); put_bits(&s->pb, code & 0xff, code >> 8);
} }
void ff_msmpeg4_handle_slices(MpegEncContext *s){ void ff_msmpeg4_handle_slices(MPVEncContext *const s)
if (s->mb_x == 0) { {
if (s->slice_height && (s->mb_y % s->slice_height) == 0) { if (s->c.mb_x == 0) {
if (s->msmpeg4_version < MSMP4_WMV1) { if (s->c.slice_height && (s->c.mb_y % s->c.slice_height) == 0) {
ff_mpeg4_clean_buffers(s); if (s->c.msmpeg4_version < MSMP4_WMV1)
} ff_mpeg4_clean_buffers(&s->c);
s->first_slice_line = 1; s->c.first_slice_line = 1;
} else { } else {
s->first_slice_line = 0; s->c.first_slice_line = 0;
} }
} }
} }
static void msmpeg4v2_encode_motion(MpegEncContext * s, int val) static void msmpeg4v2_encode_motion(MPVEncContext *const s, int val)
{ {
int range, bit_size, sign, code, bits; int range, bit_size, sign, code, bits;
@ -355,7 +355,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
/* zero vector; corresponds to ff_mvtab[0] */ /* zero vector; corresponds to ff_mvtab[0] */
put_bits(&s->pb, 1, 0x1); put_bits(&s->pb, 1, 0x1);
} else { } else {
bit_size = s->f_code - 1; bit_size = s->c.f_code - 1;
range = 1 << bit_size; range = 1 << bit_size;
if (val <= -64) if (val <= -64)
val += 64; val += 64;
@ -379,7 +379,7 @@ static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
} }
} }
static void msmpeg4_encode_mb(MpegEncContext *const s, static void msmpeg4_encode_mb(MPVEncContext *const s,
int16_t block[][64], int16_t block[][64],
int motion_x, int motion_y) int motion_x, int motion_y)
{ {
@ -389,11 +389,11 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
ff_msmpeg4_handle_slices(s); ff_msmpeg4_handle_slices(s);
if (!s->mb_intra) { if (!s->c.mb_intra) {
/* compute cbp */ /* compute cbp */
cbp = 0; cbp = 0;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
if (s->block_last_index[i] >= 0) if (s->c.block_last_index[i] >= 0)
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
} }
if (ms->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) { if (ms->use_skip_mb_code && (cbp | motion_x | motion_y) == 0) {
@ -407,7 +407,7 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
if (ms->use_skip_mb_code) if (ms->use_skip_mb_code)
put_bits(&s->pb, 1, 0); /* mb coded */ put_bits(&s->pb, 1, 0); /* mb coded */
if (s->msmpeg4_version <= MSMP4_V2) { if (s->c.msmpeg4_version <= MSMP4_V2) {
put_bits(&s->pb, put_bits(&s->pb,
ff_v2_mb_type[cbp&3][1], ff_v2_mb_type[cbp&3][1],
ff_v2_mb_type[cbp&3][0]); ff_v2_mb_type[cbp&3][0]);
@ -420,7 +420,7 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
msmpeg4v2_encode_motion(s, motion_x - pred_x); msmpeg4v2_encode_motion(s, motion_x - pred_x);
msmpeg4v2_encode_motion(s, motion_y - pred_y); msmpeg4v2_encode_motion(s, motion_y - pred_y);
}else{ }else{
@ -431,7 +431,7 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
/* motion vector */ /* motion vector */
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
ff_msmpeg4_encode_motion(ms, motion_x - pred_x, ff_msmpeg4_encode_motion(ms, motion_x - pred_x,
motion_y - pred_y); motion_y - pred_y);
} }
@ -446,11 +446,11 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
/* compute cbp */ /* compute cbp */
cbp = 0; cbp = 0;
for (int i = 0; i < 6; i++) { for (int i = 0; i < 6; i++) {
int val = (s->block_last_index[i] >= 1); int val = (s->c.block_last_index[i] >= 1);
cbp |= val << (5 - i); cbp |= val << (5 - i);
} }
if (s->msmpeg4_version <= MSMP4_V2) { if (s->c.msmpeg4_version <= MSMP4_V2) {
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
put_bits(&s->pb, put_bits(&s->pb,
ff_v2_intra_cbpc[cbp&3][1], ff_v2_intra_cbpc[cbp&3][0]); ff_v2_intra_cbpc[cbp&3][1], ff_v2_intra_cbpc[cbp&3][0]);
} else { } else {
@ -465,14 +465,14 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
ff_h263_cbpy_tab[cbp>>2][1], ff_h263_cbpy_tab[cbp>>2][1],
ff_h263_cbpy_tab[cbp>>2][0]); ff_h263_cbpy_tab[cbp>>2][0]);
}else{ }else{
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
/* compute coded_cbp; the 0x3 corresponds to chroma cbp; /* compute coded_cbp; the 0x3 corresponds to chroma cbp;
* luma coded_cbp are set in the loop below */ * luma coded_cbp are set in the loop below */
coded_cbp = cbp & 0x3; coded_cbp = cbp & 0x3;
for (int i = 0; i < 4; i++) { for (int i = 0; i < 4; i++) {
uint8_t *coded_block; uint8_t *coded_block;
int pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block); int pred = ff_msmpeg4_coded_block_pred(&s->c, i, &coded_block);
int val = (s->block_last_index[i] >= 1); int val = (s->c.block_last_index[i] >= 1);
*coded_block = val; *coded_block = val;
val ^= pred; val ^= pred;
coded_cbp |= val << (5 - i); coded_cbp |= val << (5 - i);
@ -488,9 +488,10 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
ff_table_mb_non_intra[cbp][0]); ff_table_mb_non_intra[cbp][0]);
} }
put_bits(&s->pb, 1, 0); /* no AC prediction yet */ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
if(s->inter_intra_pred){ if (s->c.inter_intra_pred) {
s->h263_aic_dir=0; s->c.h263_aic_dir = 0;
put_bits(&s->pb, ff_table_inter_intra[s->h263_aic_dir][1], ff_table_inter_intra[s->h263_aic_dir][0]); put_bits(&s->pb, ff_table_inter_intra[s->c.h263_aic_dir][1],
ff_table_inter_intra[s->c.h263_aic_dir][0]);
} }
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
@ -505,24 +506,24 @@ static void msmpeg4_encode_mb(MpegEncContext *const s,
static void msmpeg4_encode_dc(MSMPEG4EncContext *const ms, int level, int n, int *dir_ptr) static void msmpeg4_encode_dc(MSMPEG4EncContext *const ms, int level, int n, int *dir_ptr)
{ {
MpegEncContext *const s = &ms->m.s; MPVEncContext *const s = &ms->m.s;
int sign, code; int sign, code;
int pred; int pred;
int16_t *dc_val; int16_t *dc_val;
pred = ff_msmpeg4_pred_dc(s, n, &dc_val, dir_ptr); pred = ff_msmpeg4_pred_dc(&s->c, n, &dc_val, dir_ptr);
/* update predictor */ /* update predictor */
if (n < 4) { if (n < 4) {
*dc_val = level * s->y_dc_scale; *dc_val = level * s->c.y_dc_scale;
} else { } else {
*dc_val = level * s->c_dc_scale; *dc_val = level * s->c.c_dc_scale;
} }
/* do the prediction */ /* do the prediction */
level -= pred; level -= pred;
if (s->msmpeg4_version <= MSMP4_V2) { if (s->c.msmpeg4_version <= MSMP4_V2) {
if (n < 4) { if (n < 4) {
put_bits(&s->pb, put_bits(&s->pb,
ff_v2_dc_lum_table[level + 256][1], ff_v2_dc_lum_table[level + 256][1],
@ -556,7 +557,7 @@ static void msmpeg4_encode_dc(MSMPEG4EncContext *const ms, int level, int n, int
/* Encoding of a block; very similar to MPEG-4 except for a different /* Encoding of a block; very similar to MPEG-4 except for a different
* escape coding (same as H.263) and more VLC tables. */ * escape coding (same as H.263) and more VLC tables. */
void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n) void ff_msmpeg4_encode_block(MPVEncContext *const s, int16_t * block, int n)
{ {
MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)s; MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)s;
int level, run, last, i, j, last_index; int level, run, last, i, j, last_index;
@ -565,7 +566,7 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
const RLTable *rl; const RLTable *rl;
const uint8_t *scantable; const uint8_t *scantable;
if (s->mb_intra) { if (s->c.mb_intra) {
msmpeg4_encode_dc(ms, block[0], n, &dc_pred_dir); msmpeg4_encode_dc(ms, block[0], n, &dc_pred_dir);
i = 1; i = 1;
if (n < 4) { if (n < 4) {
@ -573,23 +574,23 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
} else { } else {
rl = &ff_rl_table[3 + ms->rl_chroma_table_index]; rl = &ff_rl_table[3 + ms->rl_chroma_table_index];
} }
run_diff = s->msmpeg4_version >= MSMP4_WMV1; run_diff = s->c.msmpeg4_version >= MSMP4_WMV1;
scantable= s->intra_scantable.permutated; scantable = s->c.intra_scantable.permutated;
} else { } else {
i = 0; i = 0;
rl = &ff_rl_table[3 + ms->rl_table_index]; rl = &ff_rl_table[3 + ms->rl_table_index];
run_diff = s->msmpeg4_version > MSMP4_V2; run_diff = s->c.msmpeg4_version > MSMP4_V2;
scantable= s->inter_scantable.permutated; scantable = s->c.inter_scantable.permutated;
} }
/* recalculate block_last_index for M$ wmv1 */ /* recalculate block_last_index for M$ wmv1 */
if (s->msmpeg4_version >= MSMP4_WMV1 && s->block_last_index[n] > 0) { if (s->c.msmpeg4_version >= MSMP4_WMV1 && s->c.block_last_index[n] > 0) {
for(last_index=63; last_index>=0; last_index--){ for(last_index=63; last_index>=0; last_index--){
if(block[scantable[last_index]]) break; if(block[scantable[last_index]]) break;
} }
s->block_last_index[n]= last_index; s->c.block_last_index[n] = last_index;
}else }else
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
/* AC coefs */ /* AC coefs */
last_non_zero = i - 1; last_non_zero = i - 1;
for (; i <= last_index; i++) { for (; i <= last_index; i++) {
@ -606,10 +607,10 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
} }
if(level<=MAX_LEVEL && run<=MAX_RUN){ if(level<=MAX_LEVEL && run<=MAX_RUN){
ms->ac_stats[s->mb_intra][n>3][level][run][last]++; ms->ac_stats[s->c.mb_intra][n>3][level][run][last]++;
} }
ms->ac_stats[s->mb_intra][n > 3][40][63][0]++; //esc3 like ms->ac_stats[s->c.mb_intra][n > 3][40][63][0]++; //esc3 like
code = get_rl_index(rl, last, run, level); code = get_rl_index(rl, last, run, level);
put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]); put_bits(&s->pb, rl->table_vlc[code][1], rl->table_vlc[code][0]);
@ -629,7 +630,7 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
if (run1 < 0) if (run1 < 0)
goto esc3; goto esc3;
code = get_rl_index(rl, last, run1+1, level); code = get_rl_index(rl, last, run1+1, level);
if (s->msmpeg4_version == MSMP4_WMV1 && code == rl->n) if (s->c.msmpeg4_version == MSMP4_WMV1 && code == rl->n)
goto esc3; goto esc3;
code = get_rl_index(rl, last, run1, level); code = get_rl_index(rl, last, run1, level);
if (code == rl->n) { if (code == rl->n) {
@ -637,12 +638,12 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
/* third escape */ /* third escape */
put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, 0);
put_bits(&s->pb, 1, last); put_bits(&s->pb, 1, last);
if (s->msmpeg4_version >= MSMP4_WMV1) { if (s->c.msmpeg4_version >= MSMP4_WMV1) {
if (s->esc3_level_length == 0) { if (s->esc3_level_length == 0) {
s->esc3_level_length = 8; s->esc3_level_length = 8;
ms->esc3_run_length = 6; ms->esc3_run_length = 6;
//ESCLVLSZ + ESCRUNSZ //ESCLVLSZ + ESCRUNSZ
if(s->qscale<8) if (s->c.qscale < 8)
put_bits(&s->pb, 6, 3); put_bits(&s->pb, 6, 3);
else else
put_bits(&s->pb, 8, 3); put_bits(&s->pb, 8, 3);
@ -676,17 +677,17 @@ void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m) av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
static AVOnce init_static_once = AV_ONCE_INIT; static AVOnce init_static_once = AV_ONCE_INIT;
ff_msmpeg4_common_init(s); ff_msmpeg4_common_init(&s->c);
if (s->msmpeg4_version <= MSMP4_WMV1) { if (s->c.msmpeg4_version <= MSMP4_WMV1) {
m->encode_picture_header = msmpeg4_encode_picture_header; m->encode_picture_header = msmpeg4_encode_picture_header;
s->encode_mb = msmpeg4_encode_mb; s->encode_mb = msmpeg4_encode_mb;
} }
if (s->msmpeg4_version >= MSMP4_WMV1) { if (s->c.msmpeg4_version >= MSMP4_WMV1) {
s->min_qcoeff = -255; s->min_qcoeff = -255;
s->max_qcoeff = 255; s->max_qcoeff = 255;
} }

View File

@ -41,16 +41,16 @@ typedef struct MSMPEG4EncContext {
unsigned ac_stats[2][2][MAX_LEVEL + 1][MAX_RUN + 1][2]; unsigned ac_stats[2][2][MAX_LEVEL + 1][MAX_RUN + 1][2];
} MSMPEG4EncContext; } MSMPEG4EncContext;
static inline MSMPEG4EncContext *mpv_to_msmpeg4(MpegEncContext *s) static inline MSMPEG4EncContext *mpv_to_msmpeg4(MPVEncContext *s)
{ {
// Only legal because no MSMPEG-4 decoder uses slice-threading. // Only legal because no MSMPEG-4 decoder uses slice-threading.
return (MSMPEG4EncContext*)s; return (MSMPEG4EncContext*)s;
} }
void ff_msmpeg4_encode_init(MPVMainEncContext *m); void ff_msmpeg4_encode_init(MPVMainEncContext *m);
void ff_msmpeg4_encode_ext_header(MpegEncContext *s); void ff_msmpeg4_encode_ext_header(MPVEncContext *s);
void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n); void ff_msmpeg4_encode_block(MPVEncContext * s, int16_t * block, int n);
void ff_msmpeg4_handle_slices(MpegEncContext *s); void ff_msmpeg4_handle_slices(MPVEncContext *s);
void ff_msmpeg4_encode_motion(MSMPEG4EncContext *ms, int mx, int my); void ff_msmpeg4_encode_motion(MSMPEG4EncContext *ms, int mx, int my);
void ff_msmpeg4_code012(PutBitContext *pb, int n); void ff_msmpeg4_code012(PutBitContext *pb, int n);

View File

@ -51,7 +51,7 @@
iv = vec_vsx_ld(1, pix);\ iv = vec_vsx_ld(1, pix);\
} }
#endif #endif
static int sad16_x2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sad16_x2_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -91,7 +91,7 @@ static int sad16_x2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_
return s; return s;
} }
static int sad16_y2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sad16_y2_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -141,7 +141,7 @@ static int sad16_y2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_
return s; return s;
} }
static int sad16_xy2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sad16_xy2_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -230,7 +230,7 @@ static int sad16_xy2_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8
return s; return s;
} }
static int sad16_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sad16_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -265,7 +265,7 @@ static int sad16_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *
return s; return s;
} }
static int sad8_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sad8_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -309,7 +309,7 @@ static int sad8_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *p
/* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced. /* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
* It's the sad8_altivec code above w/ squaring added. */ * It's the sad8_altivec code above w/ squaring added. */
static int sse8_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sse8_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -354,7 +354,7 @@ static int sse8_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *p
/* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced. /* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
* It's the sad16_altivec code above w/ squaring added. */ * It's the sad16_altivec code above w/ squaring added. */
static int sse16_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, static int sse16_altivec(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int i; int i;
@ -392,7 +392,7 @@ static int sse16_altivec(MpegEncContext *v, const uint8_t *pix1, const uint8_t *
return s; return s;
} }
static int hadamard8_diff8x8_altivec(MpegEncContext *s, const uint8_t *dst, static int hadamard8_diff8x8_altivec(MPVEncContext *s, const uint8_t *dst,
const uint8_t *src, ptrdiff_t stride, int h) const uint8_t *src, ptrdiff_t stride, int h)
{ {
int __attribute__((aligned(16))) sum; int __attribute__((aligned(16))) sum;
@ -518,7 +518,7 @@ static int hadamard8_diff8x8_altivec(MpegEncContext *s, const uint8_t *dst,
* On the 970, the hand-made RA is still a win (around 690 vs. around 780), * On the 970, the hand-made RA is still a win (around 690 vs. around 780),
* but xlc goes to around 660 on the regular C code... * but xlc goes to around 660 on the regular C code...
*/ */
static int hadamard8_diff16x8_altivec(MpegEncContext *s, const uint8_t *dst, static int hadamard8_diff16x8_altivec(MPVEncContext *s, const uint8_t *dst,
const uint8_t *src, ptrdiff_t stride, int h) const uint8_t *src, ptrdiff_t stride, int h)
{ {
int __attribute__((aligned(16))) sum; int __attribute__((aligned(16))) sum;
@ -709,7 +709,7 @@ static int hadamard8_diff16x8_altivec(MpegEncContext *s, const uint8_t *dst,
return sum; return sum;
} }
static int hadamard8_diff16_altivec(MpegEncContext *s, const uint8_t *dst, static int hadamard8_diff16_altivec(MPVEncContext *s, const uint8_t *dst,
const uint8_t *src, ptrdiff_t stride, int h) const uint8_t *src, ptrdiff_t stride, int h)
{ {
int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8); int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);

View File

@ -37,20 +37,20 @@
void ff_write_pass1_stats(MPVMainEncContext *const m) void ff_write_pass1_stats(MPVMainEncContext *const m)
{ {
const MpegEncContext *const s = &m->s; const MPVEncContext *const s = &m->s;
snprintf(s->avctx->stats_out, 256, snprintf(s->c.avctx->stats_out, 256,
"in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d " "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
"fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d hbits:%d;\n", "fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d hbits:%d;\n",
s->cur_pic.ptr->display_picture_number, s->c.cur_pic.ptr->display_picture_number,
s->cur_pic.ptr->coded_picture_number, s->c.cur_pic.ptr->coded_picture_number,
s->pict_type, s->c.pict_type,
s->cur_pic.ptr->f->quality, s->c.cur_pic.ptr->f->quality,
s->i_tex_bits, s->i_tex_bits,
s->p_tex_bits, s->p_tex_bits,
s->mv_bits, s->mv_bits,
s->misc_bits, s->misc_bits,
s->f_code, s->c.f_code,
s->b_code, s->c.b_code,
m->mc_mb_var_sum, m->mc_mb_var_sum,
m->mb_var_sum, m->mb_var_sum,
s->i_count, s->i_count,
@ -104,9 +104,9 @@ static double bits2qp_cb(void *rce, double qp)
static double get_diff_limited_q(MPVMainEncContext *m, const RateControlEntry *rce, double q) static double get_diff_limited_q(MPVMainEncContext *m, const RateControlEntry *rce, double q)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *const rcc = &m->rc_context; RateControlContext *const rcc = &m->rc_context;
AVCodecContext *a = s->avctx; AVCodecContext *const a = s->c.avctx;
const int pict_type = rce->new_pict_type; const int pict_type = rce->new_pict_type;
const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P]; const double last_p_q = rcc->last_qscale_for[AV_PICTURE_TYPE_P];
const double last_non_b_q = rcc->last_qscale_for[rcc->last_non_b_pict_type]; const double last_non_b_q = rcc->last_qscale_for[rcc->last_non_b_pict_type];
@ -144,7 +144,7 @@ static double get_diff_limited_q(MPVMainEncContext *m, const RateControlEntry *r
*/ */
static void get_qminmax(int *qmin_ret, int *qmax_ret, MPVMainEncContext *const m, int pict_type) static void get_qminmax(int *qmin_ret, int *qmax_ret, MPVMainEncContext *const m, int pict_type)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int qmin = m->lmin; int qmin = m->lmin;
int qmax = m->lmax; int qmax = m->lmax;
@ -152,12 +152,12 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MPVMainEncContext *const m
switch (pict_type) { switch (pict_type) {
case AV_PICTURE_TYPE_B: case AV_PICTURE_TYPE_B:
qmin = (int)(qmin * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset + 0.5); qmin = (int)(qmin * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset + 0.5);
qmax = (int)(qmax * FFABS(s->avctx->b_quant_factor) + s->avctx->b_quant_offset + 0.5); qmax = (int)(qmax * FFABS(s->c.avctx->b_quant_factor) + s->c.avctx->b_quant_offset + 0.5);
break; break;
case AV_PICTURE_TYPE_I: case AV_PICTURE_TYPE_I:
qmin = (int)(qmin * FFABS(s->avctx->i_quant_factor) + s->avctx->i_quant_offset + 0.5); qmin = (int)(qmin * FFABS(s->c.avctx->i_quant_factor) + s->c.avctx->i_quant_offset + 0.5);
qmax = (int)(qmax * FFABS(s->avctx->i_quant_factor) + s->avctx->i_quant_offset + 0.5); qmax = (int)(qmax * FFABS(s->c.avctx->i_quant_factor) + s->c.avctx->i_quant_offset + 0.5);
break; break;
} }
@ -174,12 +174,12 @@ static void get_qminmax(int *qmin_ret, int *qmax_ret, MPVMainEncContext *const m
static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *rce, static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *rce,
double q, int frame_num) double q, int frame_num)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *const rcc = &m->rc_context; RateControlContext *const rcc = &m->rc_context;
const double buffer_size = s->avctx->rc_buffer_size; const double buffer_size = s->c.avctx->rc_buffer_size;
const double fps = get_fps(s->avctx); const double fps = get_fps(s->c.avctx);
const double min_rate = s->avctx->rc_min_rate / fps; const double min_rate = s->c.avctx->rc_min_rate / fps;
const double max_rate = s->avctx->rc_max_rate / fps; const double max_rate = s->c.avctx->rc_max_rate / fps;
const int pict_type = rce->new_pict_type; const int pict_type = rce->new_pict_type;
int qmin, qmax; int qmin, qmax;
@ -206,11 +206,11 @@ static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *
q_limit = bits2qp(rce, q_limit = bits2qp(rce,
FFMAX((min_rate - buffer_size + rcc->buffer_index) * FFMAX((min_rate - buffer_size + rcc->buffer_index) *
s->avctx->rc_min_vbv_overflow_use, 1)); s->c.avctx->rc_min_vbv_overflow_use, 1));
if (q > q_limit) { if (q > q_limit) {
if (s->avctx->debug & FF_DEBUG_RC) if (s->c.avctx->debug & FF_DEBUG_RC)
av_log(s->avctx, AV_LOG_DEBUG, av_log(s->c.avctx, AV_LOG_DEBUG,
"limiting QP %f -> %f\n", q, q_limit); "limiting QP %f -> %f\n", q, q_limit);
q = q_limit; q = q_limit;
} }
@ -226,17 +226,17 @@ static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *
q_limit = bits2qp(rce, q_limit = bits2qp(rce,
FFMAX(rcc->buffer_index * FFMAX(rcc->buffer_index *
s->avctx->rc_max_available_vbv_use, s->c.avctx->rc_max_available_vbv_use,
1)); 1));
if (q < q_limit) { if (q < q_limit) {
if (s->avctx->debug & FF_DEBUG_RC) if (s->c.avctx->debug & FF_DEBUG_RC)
av_log(s->avctx, AV_LOG_DEBUG, av_log(s->c.avctx, AV_LOG_DEBUG,
"limiting QP %f -> %f\n", q, q_limit); "limiting QP %f -> %f\n", q, q_limit);
q = q_limit; q = q_limit;
} }
} }
} }
ff_dlog(s->avctx, "q:%f max:%f min:%f size:%f index:%f agr:%f\n", ff_dlog(s->c.avctx, "q:%f max:%f min:%f size:%f index:%f agr:%f\n",
q, max_rate, min_rate, buffer_size, rcc->buffer_index, q, max_rate, min_rate, buffer_size, rcc->buffer_index,
rcc->buffer_aggressivity); rcc->buffer_aggressivity);
if (rcc->qsquish == 0.0 || qmin == qmax) { if (rcc->qsquish == 0.0 || qmin == qmax) {
@ -266,11 +266,11 @@ static double modify_qscale(MPVMainEncContext *const m, const RateControlEntry *
static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce, static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
double rate_factor, int frame_num) double rate_factor, int frame_num)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *rcc = &m->rc_context; RateControlContext *rcc = &m->rc_context;
AVCodecContext *a = s->avctx; AVCodecContext *const avctx = s->c.avctx;
const int pict_type = rce->new_pict_type; const int pict_type = rce->new_pict_type;
const double mb_num = s->mb_num; const double mb_num = s->c.mb_num;
double q, bits; double q, bits;
int i; int i;
@ -289,7 +289,7 @@ static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
rce->pict_type == AV_PICTURE_TYPE_P, rce->pict_type == AV_PICTURE_TYPE_P,
rce->pict_type == AV_PICTURE_TYPE_B, rce->pict_type == AV_PICTURE_TYPE_B,
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type], rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
a->qcompress, avctx->qcompress,
rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I], rcc->i_cplx_sum[AV_PICTURE_TYPE_I] / (double)rcc->frame_count[AV_PICTURE_TYPE_I],
rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P], rcc->i_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P], rcc->p_cplx_sum[AV_PICTURE_TYPE_P] / (double)rcc->frame_count[AV_PICTURE_TYPE_P],
@ -300,7 +300,7 @@ static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
bits = av_expr_eval(rcc->rc_eq_eval, const_values, rce); bits = av_expr_eval(rcc->rc_eq_eval, const_values, rce);
if (isnan(bits)) { if (isnan(bits)) {
av_log(s->avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", rcc->rc_eq); av_log(avctx, AV_LOG_ERROR, "Error evaluating rc_eq \"%s\"\n", rcc->rc_eq);
return -1; return -1;
} }
@ -311,8 +311,8 @@ static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
bits += 1.0; // avoid 1/0 issues bits += 1.0; // avoid 1/0 issues
/* user override */ /* user override */
for (i = 0; i < s->avctx->rc_override_count; i++) { for (i = 0; i < avctx->rc_override_count; i++) {
RcOverride *rco = s->avctx->rc_override; RcOverride *rco = avctx->rc_override;
if (rco[i].start_frame > frame_num) if (rco[i].start_frame > frame_num)
continue; continue;
if (rco[i].end_frame < frame_num) if (rco[i].end_frame < frame_num)
@ -327,10 +327,10 @@ static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
q = bits2qp(rce, bits); q = bits2qp(rce, bits);
/* I/B difference */ /* I/B difference */
if (pict_type == AV_PICTURE_TYPE_I && s->avctx->i_quant_factor < 0.0) if (pict_type == AV_PICTURE_TYPE_I && avctx->i_quant_factor < 0.0)
q = -q * s->avctx->i_quant_factor + s->avctx->i_quant_offset; q = -q * avctx->i_quant_factor + avctx->i_quant_offset;
else if (pict_type == AV_PICTURE_TYPE_B && s->avctx->b_quant_factor < 0.0) else if (pict_type == AV_PICTURE_TYPE_B && avctx->b_quant_factor < 0.0)
q = -q * s->avctx->b_quant_factor + s->avctx->b_quant_offset; q = -q * avctx->b_quant_factor + avctx->b_quant_offset;
if (q < 1) if (q < 1)
q = 1; q = 1;
@ -340,10 +340,10 @@ static double get_qscale(MPVMainEncContext *const m, RateControlEntry *rce,
static int init_pass2(MPVMainEncContext *const m) static int init_pass2(MPVMainEncContext *const m)
{ {
RateControlContext *const rcc = &m->rc_context; RateControlContext *const rcc = &m->rc_context;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
AVCodecContext *a = s->avctx; AVCodecContext *const avctx = s->c.avctx;
int i, toobig; int i, toobig;
AVRational fps = get_fpsQ(s->avctx); AVRational fps = get_fpsQ(avctx);
double complexity[5] = { 0 }; // approximate bits at quant=1 double complexity[5] = { 0 }; // approximate bits at quant=1
uint64_t const_bits[5] = { 0 }; // quantizer independent bits uint64_t const_bits[5] = { 0 }; // quantizer independent bits
uint64_t all_const_bits; uint64_t all_const_bits;
@ -352,7 +352,7 @@ static int init_pass2(MPVMainEncContext *const m)
fps); fps);
double rate_factor = 0; double rate_factor = 0;
double step; double step;
const int filter_size = (int)(a->qblur * 4) | 1; const int filter_size = (int)(avctx->qblur * 4) | 1;
double expected_bits = 0; // init to silence gcc warning double expected_bits = 0; // init to silence gcc warning
double *qscale, *blurred_qscale, qscale_sum; double *qscale, *blurred_qscale, qscale_sum;
@ -376,7 +376,7 @@ static int init_pass2(MPVMainEncContext *const m)
const_bits[AV_PICTURE_TYPE_B]; const_bits[AV_PICTURE_TYPE_B];
if (all_available_bits < all_const_bits) { if (all_available_bits < all_const_bits) {
av_log(s->avctx, AV_LOG_ERROR, "requested bitrate is too low\n"); av_log(avctx, AV_LOG_ERROR, "requested bitrate is too low\n");
return -1; return -1;
} }
@ -393,7 +393,7 @@ static int init_pass2(MPVMainEncContext *const m)
expected_bits = 0; expected_bits = 0;
rate_factor += step; rate_factor += step;
rcc->buffer_index = s->avctx->rc_buffer_size / 2; rcc->buffer_index = avctx->rc_buffer_size / 2;
/* find qscale */ /* find qscale */
for (i = 0; i < rcc->num_entries; i++) { for (i = 0; i < rcc->num_entries; i++) {
@ -427,7 +427,7 @@ static int init_pass2(MPVMainEncContext *const m)
for (j = 0; j < filter_size; j++) { for (j = 0; j < filter_size; j++) {
int index = i + j - filter_size / 2; int index = i + j - filter_size / 2;
double d = index - i; double d = index - i;
double coeff = a->qblur == 0 ? 1.0 : exp(-d * d / (a->qblur * a->qblur)); double coeff = avctx->qblur == 0 ? 1.0 : exp(-d * d / (avctx->qblur * avctx->qblur));
if (index < 0 || index >= rcc->num_entries) if (index < 0 || index >= rcc->num_entries)
continue; continue;
@ -453,7 +453,7 @@ static int init_pass2(MPVMainEncContext *const m)
expected_bits += bits; expected_bits += bits;
} }
ff_dlog(s->avctx, ff_dlog(avctx,
"expected_bits: %f all_available_bits: %d rate_factor: %f\n", "expected_bits: %f all_available_bits: %d rate_factor: %f\n",
expected_bits, (int)all_available_bits, rate_factor); expected_bits, (int)all_available_bits, rate_factor);
if (expected_bits > all_available_bits) { if (expected_bits > all_available_bits) {
@ -467,32 +467,32 @@ static int init_pass2(MPVMainEncContext *const m)
/* check bitrate calculations and print info */ /* check bitrate calculations and print info */
qscale_sum = 0.0; qscale_sum = 0.0;
for (i = 0; i < rcc->num_entries; i++) { for (i = 0; i < rcc->num_entries; i++) {
ff_dlog(s->avctx, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n", ff_dlog(avctx, "[lavc rc] entry[%d].new_qscale = %.3f qp = %.3f\n",
i, i,
rcc->entry[i].new_qscale, rcc->entry[i].new_qscale,
rcc->entry[i].new_qscale / FF_QP2LAMBDA); rcc->entry[i].new_qscale / FF_QP2LAMBDA);
qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA, qscale_sum += av_clip(rcc->entry[i].new_qscale / FF_QP2LAMBDA,
s->avctx->qmin, s->avctx->qmax); avctx->qmin, avctx->qmax);
} }
av_assert0(toobig <= 40); av_assert0(toobig <= 40);
av_log(s->avctx, AV_LOG_DEBUG, av_log(avctx, AV_LOG_DEBUG,
"[lavc rc] requested bitrate: %"PRId64" bps expected bitrate: %"PRId64" bps\n", "[lavc rc] requested bitrate: %"PRId64" bps expected bitrate: %"PRId64" bps\n",
m->bit_rate, m->bit_rate,
(int64_t)(expected_bits / ((double)all_available_bits / m->bit_rate))); (int64_t)(expected_bits / ((double)all_available_bits / m->bit_rate)));
av_log(s->avctx, AV_LOG_DEBUG, av_log(avctx, AV_LOG_DEBUG,
"[lavc rc] estimated target average qp: %.3f\n", "[lavc rc] estimated target average qp: %.3f\n",
(float)qscale_sum / rcc->num_entries); (float)qscale_sum / rcc->num_entries);
if (toobig == 0) { if (toobig == 0) {
av_log(s->avctx, AV_LOG_INFO, av_log(avctx, AV_LOG_INFO,
"[lavc rc] Using all of requested bitrate is not " "[lavc rc] Using all of requested bitrate is not "
"necessary for this video with these parameters.\n"); "necessary for this video with these parameters.\n");
} else if (toobig == 40) { } else if (toobig == 40) {
av_log(s->avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"[lavc rc] Error: bitrate too low for this video " "[lavc rc] Error: bitrate too low for this video "
"with these parameters.\n"); "with these parameters.\n");
return -1; return -1;
} else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) { } else if (fabs(expected_bits / all_available_bits - 1.0) > 0.01) {
av_log(s->avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"[lavc rc] Error: 2pass curve failed to converge\n"); "[lavc rc] Error: 2pass curve failed to converge\n");
return -1; return -1;
} }
@ -502,8 +502,9 @@ static int init_pass2(MPVMainEncContext *const m)
av_cold int ff_rate_control_init(MPVMainEncContext *const m) av_cold int ff_rate_control_init(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *rcc = &m->rc_context; RateControlContext *rcc = &m->rc_context;
AVCodecContext *const avctx = s->c.avctx;
int i, res; int i, res;
static const char * const const_names[] = { static const char * const const_names[] = {
"PI", "PI",
@ -540,19 +541,19 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
}; };
emms_c(); emms_c();
if (!s->avctx->rc_max_available_vbv_use && s->avctx->rc_buffer_size) { if (!avctx->rc_max_available_vbv_use && avctx->rc_buffer_size) {
if (s->avctx->rc_max_rate) { if (avctx->rc_max_rate) {
s->avctx->rc_max_available_vbv_use = av_clipf(s->avctx->rc_max_rate/(s->avctx->rc_buffer_size*get_fps(s->avctx)), 1.0/3, 1.0); avctx->rc_max_available_vbv_use = av_clipf(avctx->rc_max_rate/(avctx->rc_buffer_size*get_fps(avctx)), 1.0/3, 1.0);
} else } else
s->avctx->rc_max_available_vbv_use = 1.0; avctx->rc_max_available_vbv_use = 1.0;
} }
res = av_expr_parse(&rcc->rc_eq_eval, res = av_expr_parse(&rcc->rc_eq_eval,
rcc->rc_eq ? rcc->rc_eq : "tex^qComp", rcc->rc_eq ? rcc->rc_eq : "tex^qComp",
const_names, func1_names, func1, const_names, func1_names, func1,
NULL, NULL, 0, s->avctx); NULL, NULL, 0, avctx);
if (res < 0) { if (res < 0) {
av_log(s->avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", rcc->rc_eq); av_log(avctx, AV_LOG_ERROR, "Error parsing rc_eq \"%s\"\n", rcc->rc_eq);
return res; return res;
} }
@ -569,16 +570,16 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5; rcc->last_qscale_for[i] = FF_QP2LAMBDA * 5;
} }
rcc->buffer_index = s->avctx->rc_initial_buffer_occupancy; rcc->buffer_index = avctx->rc_initial_buffer_occupancy;
if (!rcc->buffer_index) if (!rcc->buffer_index)
rcc->buffer_index = s->avctx->rc_buffer_size * 3 / 4; rcc->buffer_index = avctx->rc_buffer_size * 3 / 4;
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { if (avctx->flags & AV_CODEC_FLAG_PASS2) {
int i; int i;
char *p; char *p;
/* find number of pics */ /* find number of pics */
p = s->avctx->stats_in; p = avctx->stats_in;
for (i = -1; p; i++) for (i = -1; p; i++)
p = strchr(p + 1, ';'); p = strchr(p + 1, ';');
i += m->max_b_frames; i += m->max_b_frames;
@ -596,12 +597,12 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
rce->pict_type = rce->new_pict_type = AV_PICTURE_TYPE_P; rce->pict_type = rce->new_pict_type = AV_PICTURE_TYPE_P;
rce->qscale = rce->new_qscale = FF_QP2LAMBDA * 2; rce->qscale = rce->new_qscale = FF_QP2LAMBDA * 2;
rce->misc_bits = s->mb_num + 10; rce->misc_bits = s->c.mb_num + 10;
rce->mb_var_sum = s->mb_num * 100; rce->mb_var_sum = s->c.mb_num * 100;
} }
/* read stats */ /* read stats */
p = s->avctx->stats_in; p = avctx->stats_in;
for (i = 0; i < rcc->num_entries - m->max_b_frames; i++) { for (i = 0; i < rcc->num_entries - m->max_b_frames; i++) {
RateControlEntry *rce; RateControlEntry *rce;
int picture_number; int picture_number;
@ -630,7 +631,7 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
&rce->mc_mb_var_sum, &rce->mb_var_sum, &rce->mc_mb_var_sum, &rce->mb_var_sum,
&rce->i_count, &rce->header_bits); &rce->i_count, &rce->header_bits);
if (e != 13) { if (e != 13) {
av_log(s->avctx, AV_LOG_ERROR, av_log(avctx, AV_LOG_ERROR,
"statistics are damaged at line %d, parser out=%d\n", "statistics are damaged at line %d, parser out=%d\n",
i, e); i, e);
return -1; return -1;
@ -644,21 +645,21 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
return res; return res;
} }
if (!(s->avctx->flags & AV_CODEC_FLAG_PASS2)) { if (!(avctx->flags & AV_CODEC_FLAG_PASS2)) {
rcc->short_term_qsum = 0.001; rcc->short_term_qsum = 0.001;
rcc->short_term_qcount = 0.001; rcc->short_term_qcount = 0.001;
rcc->pass1_rc_eq_output_sum = 0.001; rcc->pass1_rc_eq_output_sum = 0.001;
rcc->pass1_wanted_bits = 0.001; rcc->pass1_wanted_bits = 0.001;
if (s->avctx->qblur > 1.0) { if (avctx->qblur > 1.0) {
av_log(s->avctx, AV_LOG_ERROR, "qblur too large\n"); av_log(avctx, AV_LOG_ERROR, "qblur too large\n");
return -1; return -1;
} }
/* init stuff with the user specified complexity */ /* init stuff with the user specified complexity */
if (rcc->initial_cplx) { if (rcc->initial_cplx) {
for (i = 0; i < 60 * 30; i++) { for (i = 0; i < 60 * 30; i++) {
double bits = rcc->initial_cplx * (i / 10000.0 + 1.0) * s->mb_num; double bits = rcc->initial_cplx * (i / 10000.0 + 1.0) * s->c.mb_num;
RateControlEntry rce; RateControlEntry rce;
if (i % ((m->gop_size + 3) / 4) == 0) if (i % ((m->gop_size + 3) / 4) == 0)
@ -669,16 +670,16 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
rce.pict_type = AV_PICTURE_TYPE_P; rce.pict_type = AV_PICTURE_TYPE_P;
rce.new_pict_type = rce.pict_type; rce.new_pict_type = rce.pict_type;
rce.mc_mb_var_sum = bits * s->mb_num / 100000; rce.mc_mb_var_sum = bits * s->c.mb_num / 100000;
rce.mb_var_sum = s->mb_num; rce.mb_var_sum = s->c.mb_num;
rce.qscale = FF_QP2LAMBDA * 2; rce.qscale = FF_QP2LAMBDA * 2;
rce.f_code = 2; rce.f_code = 2;
rce.b_code = 1; rce.b_code = 1;
rce.misc_bits = 1; rce.misc_bits = 1;
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
rce.i_count = s->mb_num; rce.i_count = s->c.mb_num;
rce.i_tex_bits = bits; rce.i_tex_bits = bits;
rce.p_tex_bits = 0; rce.p_tex_bits = 0;
rce.mv_bits = 0; rce.mv_bits = 0;
@ -696,13 +697,13 @@ av_cold int ff_rate_control_init(MPVMainEncContext *const m)
get_qscale(m, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i); get_qscale(m, &rce, rcc->pass1_wanted_bits / rcc->pass1_rc_eq_output_sum, i);
// FIXME misbehaves a little for variable fps // FIXME misbehaves a little for variable fps
rcc->pass1_wanted_bits += m->bit_rate / get_fps(s->avctx); rcc->pass1_wanted_bits += m->bit_rate / get_fps(avctx);
} }
} }
} }
if (s->adaptive_quant) { if (s->adaptive_quant) {
unsigned mb_array_size = s->mb_stride * s->mb_height; unsigned mb_array_size = s->c.mb_stride * s->c.mb_height;
rcc->cplx_tab = av_malloc_array(mb_array_size, 2 * sizeof(rcc->cplx_tab)); rcc->cplx_tab = av_malloc_array(mb_array_size, 2 * sizeof(rcc->cplx_tab));
if (!rcc->cplx_tab) if (!rcc->cplx_tab)
@ -726,14 +727,15 @@ av_cold void ff_rate_control_uninit(RateControlContext *rcc)
int ff_vbv_update(MPVMainEncContext *m, int frame_size) int ff_vbv_update(MPVMainEncContext *m, int frame_size)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *const rcc = &m->rc_context; RateControlContext *const rcc = &m->rc_context;
const double fps = get_fps(s->avctx); AVCodecContext *const avctx = s->c.avctx;
const int buffer_size = s->avctx->rc_buffer_size; const double fps = get_fps(avctx);
const double min_rate = s->avctx->rc_min_rate / fps; const int buffer_size = avctx->rc_buffer_size;
const double max_rate = s->avctx->rc_max_rate / fps; const double min_rate = avctx->rc_min_rate / fps;
const double max_rate = avctx->rc_max_rate / fps;
ff_dlog(s->avctx, "%d %f %d %f %f\n", ff_dlog(avctx, "%d %f %d %f %f\n",
buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate); buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate);
if (buffer_size) { if (buffer_size) {
@ -741,9 +743,9 @@ int ff_vbv_update(MPVMainEncContext *m, int frame_size)
rcc->buffer_index -= frame_size; rcc->buffer_index -= frame_size;
if (rcc->buffer_index < 0) { if (rcc->buffer_index < 0) {
av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n"); av_log(avctx, AV_LOG_ERROR, "rc buffer underflow\n");
if (frame_size > max_rate && s->qscale == s->avctx->qmax) { if (frame_size > max_rate && s->c.qscale == avctx->qmax) {
av_log(s->avctx, AV_LOG_ERROR, "max bitrate possibly too small or try trellis with large lmax or increase qmax\n"); av_log(avctx, AV_LOG_ERROR, "max bitrate possibly too small or try trellis with large lmax or increase qmax\n");
} }
rcc->buffer_index = 0; rcc->buffer_index = 0;
} }
@ -754,12 +756,12 @@ int ff_vbv_update(MPVMainEncContext *m, int frame_size)
if (rcc->buffer_index > buffer_size) { if (rcc->buffer_index > buffer_size) {
int stuffing = ceil((rcc->buffer_index - buffer_size) / 8); int stuffing = ceil((rcc->buffer_index - buffer_size) / 8);
if (stuffing < 4 && s->codec_id == AV_CODEC_ID_MPEG4) if (stuffing < 4 && s->c.codec_id == AV_CODEC_ID_MPEG4)
stuffing = 4; stuffing = 4;
rcc->buffer_index -= 8 * stuffing; rcc->buffer_index -= 8 * stuffing;
if (s->avctx->debug & FF_DEBUG_RC) if (avctx->debug & FF_DEBUG_RC)
av_log(s->avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing); av_log(avctx, AV_LOG_DEBUG, "stuffing %d bytes\n", stuffing);
return stuffing; return stuffing;
} }
@ -787,31 +789,30 @@ static void update_predictor(Predictor *p, double q, double var, double size)
static void adaptive_quantization(RateControlContext *const rcc, static void adaptive_quantization(RateControlContext *const rcc,
MPVMainEncContext *const m, double q) MPVMainEncContext *const m, double q)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int i; const float lumi_masking = s->c.avctx->lumi_masking / (128.0 * 128.0);
const float lumi_masking = s->avctx->lumi_masking / (128.0 * 128.0); const float dark_masking = s->c.avctx->dark_masking / (128.0 * 128.0);
const float dark_masking = s->avctx->dark_masking / (128.0 * 128.0); const float temp_cplx_masking = s->c.avctx->temporal_cplx_masking;
const float temp_cplx_masking = s->avctx->temporal_cplx_masking; const float spatial_cplx_masking = s->c.avctx->spatial_cplx_masking;
const float spatial_cplx_masking = s->avctx->spatial_cplx_masking; const float p_masking = s->c.avctx->p_masking;
const float p_masking = s->avctx->p_masking;
const float border_masking = m->border_masking; const float border_masking = m->border_masking;
float bits_sum = 0.0; float bits_sum = 0.0;
float cplx_sum = 0.0; float cplx_sum = 0.0;
float *cplx_tab = rcc->cplx_tab; float *cplx_tab = rcc->cplx_tab;
float *bits_tab = rcc->bits_tab; float *bits_tab = rcc->bits_tab;
const int qmin = s->avctx->mb_lmin; const int qmin = s->c.avctx->mb_lmin;
const int qmax = s->avctx->mb_lmax; const int qmax = s->c.avctx->mb_lmax;
const int mb_width = s->mb_width; const int mb_width = s->c.mb_width;
const int mb_height = s->mb_height; const int mb_height = s->c.mb_height;
for (i = 0; i < s->mb_num; i++) { for (int i = 0; i < s->c.mb_num; i++) {
const int mb_xy = s->mb_index2xy[i]; const int mb_xy = s->c.mb_index2xy[i];
float temp_cplx = sqrt(s->mc_mb_var[mb_xy]); // FIXME merge in pow() float temp_cplx = sqrt(s->mc_mb_var[mb_xy]); // FIXME merge in pow()
float spat_cplx = sqrt(s->mb_var[mb_xy]); float spat_cplx = sqrt(s->mb_var[mb_xy]);
const int lumi = s->mb_mean[mb_xy]; const int lumi = s->mb_mean[mb_xy];
float bits, cplx, factor; float bits, cplx, factor;
int mb_x = mb_xy % s->mb_stride; int mb_x = mb_xy % s->c.mb_stride;
int mb_y = mb_xy / s->mb_stride; int mb_y = mb_xy / s->c.mb_stride;
int mb_distance; int mb_distance;
float mb_factor = 0.0; float mb_factor = 0.0;
if (spat_cplx < 4) if (spat_cplx < 4)
@ -865,7 +866,7 @@ static void adaptive_quantization(RateControlContext *const rcc,
/* handle qmin/qmax clipping */ /* handle qmin/qmax clipping */
if (s->mpv_flags & FF_MPV_FLAG_NAQ) { if (s->mpv_flags & FF_MPV_FLAG_NAQ) {
float factor = bits_sum / cplx_sum; float factor = bits_sum / cplx_sum;
for (i = 0; i < s->mb_num; i++) { for (int i = 0; i < s->c.mb_num; i++) {
float newq = q * cplx_tab[i] / bits_tab[i]; float newq = q * cplx_tab[i] / bits_tab[i];
newq *= factor; newq *= factor;
@ -883,8 +884,8 @@ static void adaptive_quantization(RateControlContext *const rcc,
cplx_sum = 0.001; cplx_sum = 0.001;
} }
for (i = 0; i < s->mb_num; i++) { for (int i = 0; i < s->c.mb_num; i++) {
const int mb_xy = s->mb_index2xy[i]; const int mb_xy = s->c.mb_index2xy[i];
float newq = q * cplx_tab[i] / bits_tab[i]; float newq = q * cplx_tab[i] / bits_tab[i];
int intq; int intq;
@ -904,39 +905,39 @@ static void adaptive_quantization(RateControlContext *const rcc,
void ff_get_2pass_fcode(MPVMainEncContext *const m) void ff_get_2pass_fcode(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
const RateControlContext *rcc = &m->rc_context; const RateControlContext *rcc = &m->rc_context;
const RateControlEntry *rce = &rcc->entry[s->picture_number]; const RateControlEntry *rce = &rcc->entry[s->c.picture_number];
s->f_code = rce->f_code; s->c.f_code = rce->f_code;
s->b_code = rce->b_code; s->c.b_code = rce->b_code;
} }
// FIXME rd or at least approx for dquant // FIXME rd or at least approx for dquant
float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run) float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
RateControlContext *rcc = &m->rc_context; RateControlContext *rcc = &m->rc_context;
AVCodecContext *const a = s->c.avctx;
float q; float q;
int qmin, qmax; int qmin, qmax;
float br_compensation; float br_compensation;
double diff; double diff;
double short_term_q; double short_term_q;
double fps; double fps;
int picture_number = s->picture_number; int picture_number = s->c.picture_number;
int64_t wanted_bits; int64_t wanted_bits;
AVCodecContext *a = s->avctx;
RateControlEntry local_rce, *rce; RateControlEntry local_rce, *rce;
double bits; double bits;
double rate_factor; double rate_factor;
int64_t var; int64_t var;
const int pict_type = s->pict_type; const int pict_type = s->c.pict_type;
emms_c(); emms_c();
get_qminmax(&qmin, &qmax, m, pict_type); get_qminmax(&qmin, &qmax, m, pict_type);
fps = get_fps(s->avctx); fps = get_fps(s->c.avctx);
/* update predictors */ /* update predictors */
if (picture_number > 2 && !dry_run) { if (picture_number > 2 && !dry_run) {
const int64_t last_var = const int64_t last_var =
@ -949,10 +950,10 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
m->frame_bits - m->stuffing_bits); m->frame_bits - m->stuffing_bits);
} }
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
av_assert0(picture_number >= 0); av_assert0(picture_number >= 0);
if (picture_number >= rcc->num_entries) { if (picture_number >= rcc->num_entries) {
av_log(s->avctx, AV_LOG_ERROR, "Input is longer than 2-pass log file\n"); av_log(s->c.avctx, AV_LOG_ERROR, "Input is longer than 2-pass log file\n");
return -1; return -1;
} }
rce = &rcc->entry[picture_number]; rce = &rcc->entry[picture_number];
@ -965,17 +966,17 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
/* FIXME add a dts field to AVFrame and ensure it is set and use it /* FIXME add a dts field to AVFrame and ensure it is set and use it
* here instead of reordering but the reordering is simpler for now * here instead of reordering but the reordering is simpler for now
* until H.264 B-pyramid must be handled. */ * until H.264 B-pyramid must be handled. */
if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) if (s->c.pict_type == AV_PICTURE_TYPE_B || s->c.low_delay)
dts_pic = s->cur_pic.ptr; dts_pic = s->c.cur_pic.ptr;
else else
dts_pic = s->last_pic.ptr; dts_pic = s->c.last_pic.ptr;
if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE) if (!dts_pic || dts_pic->f->pts == AV_NOPTS_VALUE)
wanted_bits_double = m->bit_rate * (double)picture_number / fps; wanted_bits_double = m->bit_rate * (double)picture_number / fps;
else else
wanted_bits_double = m->bit_rate * (double)dts_pic->f->pts / fps; wanted_bits_double = m->bit_rate * (double)dts_pic->f->pts / fps;
if (wanted_bits_double > INT64_MAX) { if (wanted_bits_double > INT64_MAX) {
av_log(s->avctx, AV_LOG_WARNING, "Bits exceed 64bit range\n"); av_log(s->c.avctx, AV_LOG_WARNING, "Bits exceed 64bit range\n");
wanted_bits = INT64_MAX; wanted_bits = INT64_MAX;
} else } else
wanted_bits = (int64_t)wanted_bits_double; wanted_bits = (int64_t)wanted_bits_double;
@ -989,12 +990,12 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
var = pict_type == AV_PICTURE_TYPE_I ? m->mb_var_sum : m->mc_mb_var_sum; var = pict_type == AV_PICTURE_TYPE_I ? m->mb_var_sum : m->mc_mb_var_sum;
short_term_q = 0; /* avoid warning */ short_term_q = 0; /* avoid warning */
if (s->avctx->flags & AV_CODEC_FLAG_PASS2) { if (s->c.avctx->flags & AV_CODEC_FLAG_PASS2) {
if (pict_type != AV_PICTURE_TYPE_I) if (pict_type != AV_PICTURE_TYPE_I)
av_assert0(pict_type == rce->new_pict_type); av_assert0(pict_type == rce->new_pict_type);
q = rce->new_qscale / br_compensation; q = rce->new_qscale / br_compensation;
ff_dlog(s->avctx, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale, ff_dlog(s->c.avctx, "%f %f %f last:%d var:%"PRId64" type:%d//\n", q, rce->new_qscale,
br_compensation, m->frame_bits, var, pict_type); br_compensation, m->frame_bits, var, pict_type);
} else { } else {
rce->pict_type = rce->pict_type =
@ -1002,13 +1003,13 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
rce->mc_mb_var_sum = m->mc_mb_var_sum; rce->mc_mb_var_sum = m->mc_mb_var_sum;
rce->mb_var_sum = m->mb_var_sum; rce->mb_var_sum = m->mb_var_sum;
rce->qscale = FF_QP2LAMBDA * 2; rce->qscale = FF_QP2LAMBDA * 2;
rce->f_code = s->f_code; rce->f_code = s->c.f_code;
rce->b_code = s->b_code; rce->b_code = s->c.b_code;
rce->misc_bits = 1; rce->misc_bits = 1;
bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var)); bits = predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var));
if (pict_type == AV_PICTURE_TYPE_I) { if (pict_type == AV_PICTURE_TYPE_I) {
rce->i_count = s->mb_num; rce->i_count = s->c.mb_num;
rce->i_tex_bits = bits; rce->i_tex_bits = bits;
rce->p_tex_bits = 0; rce->p_tex_bits = 0;
rce->mv_bits = 0; rce->mv_bits = 0;
@ -1052,8 +1053,8 @@ float ff_rate_estimate_qscale(MPVMainEncContext *const m, int dry_run)
av_assert0(q > 0.0); av_assert0(q > 0.0);
} }
if (s->avctx->debug & FF_DEBUG_RC) { if (s->c.avctx->debug & FF_DEBUG_RC) {
av_log(s->avctx, AV_LOG_DEBUG, av_log(s->c.avctx, AV_LOG_DEBUG,
"%c qp:%d<%2.1f<%d %d want:%"PRId64" total:%"PRId64" comp:%f st_q:%2.2f " "%c qp:%d<%2.1f<%d %d want:%"PRId64" total:%"PRId64" comp:%f st_q:%2.2f "
"size:%d var:%"PRId64"/%"PRId64" br:%"PRId64" fps:%d\n", "size:%d var:%"PRId64"/%"PRId64" br:%"PRId64" fps:%d\n",
av_get_picture_type_char(pict_type), av_get_picture_type_char(pict_type),

View File

@ -24,55 +24,55 @@
#include "libavutil/cpu.h" #include "libavutil/cpu.h"
#include "libavutil/riscv/cpu.h" #include "libavutil/riscv/cpu.h"
#include "libavcodec/me_cmp.h" #include "libavcodec/me_cmp.h"
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideoenc.h"
int ff_pix_abs16_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_x2_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_x2_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_x2_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_x2_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs16_y2_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs16_y2_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_pix_abs8_y2_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_pix_abs8_y2_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse16_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse16_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse8_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse8_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse4_rvv(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse4_rvv(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsse16_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsse16_rvv(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h);
int ff_vsse8_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsse8_rvv(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h);
int ff_vsse_intra16_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); int ff_vsse_intra16_rvv(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
int ff_vsse_intra8_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); int ff_vsse_intra8_rvv(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
int ff_vsad16_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsad16_rvv(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h);
int ff_vsad8_rvv(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h); int ff_vsad8_rvv(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2, ptrdiff_t stride, int h);
int ff_vsad_intra16_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); int ff_vsad_intra16_rvv(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
int ff_vsad_intra8_rvv(MpegEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h); int ff_vsad_intra8_rvv(MPVEncContext *c, const uint8_t *s, const uint8_t *dummy, ptrdiff_t stride, int h);
int ff_nsse16_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2, int ff_nsse16_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_nsse8_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2, int ff_nsse8_rvv(int multiplier, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
static int nsse16_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, static int nsse16_rvv_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
if (c) if (c)
return ff_nsse16_rvv(c->avctx->nsse_weight, s1, s2, stride, h); return ff_nsse16_rvv(c->c.avctx->nsse_weight, s1, s2, stride, h);
else else
return ff_nsse16_rvv(8, s1, s2, stride, h); return ff_nsse16_rvv(8, s1, s2, stride, h);
} }
static int nsse8_rvv_wrapper(MpegEncContext *c, const uint8_t *s1, const uint8_t *s2, static int nsse8_rvv_wrapper(MPVEncContext *c, const uint8_t *s1, const uint8_t *s2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
if (c) if (c)
return ff_nsse8_rvv(c->avctx->nsse_weight, s1, s2, stride, h); return ff_nsse8_rvv(c->c.avctx->nsse_weight, s1, s2, stride, h);
else else
return ff_nsse8_rvv(8, s1, s2, stride, h); return ff_nsse8_rvv(8, s1, s2, stride, h);
} }

View File

@ -33,33 +33,33 @@
int ff_rv10_encode_picture_header(MPVMainEncContext *const m) int ff_rv10_encode_picture_header(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int full_frame= 0; int full_frame= 0;
align_put_bits(&s->pb); align_put_bits(&s->pb);
put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 1, 1); /* marker */
put_bits(&s->pb, 1, (s->pict_type == AV_PICTURE_TYPE_P)); put_bits(&s->pb, 1, (s->c.pict_type == AV_PICTURE_TYPE_P));
put_bits(&s->pb, 1, 0); /* not PB-mframe */ put_bits(&s->pb, 1, 0); /* not PB-mframe */
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
/* specific MPEG like DC coding not used */ /* specific MPEG like DC coding not used */
} }
/* if multiple packets per frame are sent, the position at which /* if multiple packets per frame are sent, the position at which
to display the macroblocks is coded here */ to display the macroblocks is coded here */
if(!full_frame){ if(!full_frame){
if (s->mb_width * s->mb_height >= (1U << 12)) { if (s->c.mb_width * s->c.mb_height >= (1U << 12)) {
avpriv_report_missing_feature(s->avctx, "Encoding frames with %d (>= 4096) macroblocks", avpriv_report_missing_feature(s->c.avctx, "Encoding frames with %d (>= 4096) macroblocks",
s->mb_width * s->mb_height); s->c.mb_width * s->c.mb_height);
return AVERROR(ENOSYS); return AVERROR(ENOSYS);
} }
put_bits(&s->pb, 6, 0); /* mb_x */ put_bits(&s->pb, 6, 0); /* mb_x */
put_bits(&s->pb, 6, 0); /* mb_y */ put_bits(&s->pb, 6, 0); /* mb_y */
put_bits(&s->pb, 12, s->mb_width * s->mb_height); put_bits(&s->pb, 12, s->c.mb_width * s->c.mb_height);
} }
put_bits(&s->pb, 3, 0); /* ignored */ put_bits(&s->pb, 3, 0); /* ignored */

View File

@ -36,32 +36,32 @@
int ff_rv20_encode_picture_header(MPVMainEncContext *const m) int ff_rv20_encode_picture_header(MPVMainEncContext *const m)
{ {
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
put_bits(&s->pb, 2, s->pict_type); //I 0 vs. 1 ? put_bits(&s->pb, 2, s->c.pict_type); //I 0 vs. 1 ?
put_bits(&s->pb, 1, 0); /* unknown bit */ put_bits(&s->pb, 1, 0); /* unknown bit */
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
put_sbits(&s->pb, 8, s->picture_number); //FIXME wrong, but correct is not known put_sbits(&s->pb, 8, s->c.picture_number); //FIXME wrong, but correct is not known
s->mb_x= s->mb_y= 0; s->c.mb_x = s->c.mb_y = 0;
ff_h263_encode_mba(s); ff_h263_encode_mba(s);
put_bits(&s->pb, 1, s->no_rounding); put_bits(&s->pb, 1, s->c.no_rounding);
av_assert0(s->f_code == 1); av_assert0(s->c.f_code == 1);
av_assert0(s->unrestricted_mv == 0); av_assert0(!s->c.unrestricted_mv);
av_assert0(s->alt_inter_vlc == 0); av_assert0(!s->c.alt_inter_vlc);
av_assert0(s->umvplus == 0); av_assert0(!s->c.umvplus);
av_assert0(s->modified_quant==1); av_assert0(s->c.modified_quant==1);
av_assert0(s->loop_filter==1); av_assert0(s->c.loop_filter==1);
s->h263_aic= s->pict_type == AV_PICTURE_TYPE_I; s->c.h263_aic = s->c.pict_type == AV_PICTURE_TYPE_I;
if(s->h263_aic){ if (s->c.h263_aic) {
s->y_dc_scale_table= s->c.y_dc_scale_table =
s->c_dc_scale_table= ff_aic_dc_scale_table; s->c.c_dc_scale_table = ff_aic_dc_scale_table;
}else{ }else{
s->y_dc_scale_table= s->c.y_dc_scale_table =
s->c_dc_scale_table= ff_mpeg1_dc_scale_table; s->c.c_dc_scale_table = ff_mpeg1_dc_scale_table;
} }
return 0; return 0;
} }

View File

@ -741,7 +741,7 @@ void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
decomposition_count, y); decomposition_count, y);
} }
static inline int w_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, static inline int w_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size,
int w, int h, int type) int w, int h, int type)
{ {
int s, i, j; int s, i, j;
@ -810,32 +810,32 @@ static inline int w_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8
return s >> 9; return s >> 9;
} }
static int w53_8_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) static int w53_8_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 8, h, 1); return w_c(v, pix1, pix2, line_size, 8, h, 1);
} }
static int w97_8_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) static int w97_8_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 8, h, 0); return w_c(v, pix1, pix2, line_size, 8, h, 0);
} }
static int w53_16_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) static int w53_16_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 16, h, 1); return w_c(v, pix1, pix2, line_size, 16, h, 1);
} }
static int w97_16_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) static int w97_16_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 16, h, 0); return w_c(v, pix1, pix2, line_size, 16, h, 0);
} }
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) int ff_w53_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 32, h, 1); return w_c(v, pix1, pix2, line_size, 32, h, 1);
} }
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h) int ff_w97_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h)
{ {
return w_c(v, pix1, pix2, line_size, 32, h, 0); return w_c(v, pix1, pix2, line_size, 32, h, 0);
} }

View File

@ -26,7 +26,7 @@
#include "libavutil/attributes.h" #include "libavutil/attributes.h"
struct MpegEncContext; typedef struct MPVEncContext MPVEncContext;
typedef int DWTELEM; typedef int DWTELEM;
typedef short IDWTELEM; typedef short IDWTELEM;
@ -144,8 +144,8 @@ void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
int src_y, int src_stride, slice_buffer *sb, int src_y, int src_stride, slice_buffer *sb,
int add, uint8_t *dst8); int add, uint8_t *dst8);
int ff_w53_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h); int ff_w53_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h);
int ff_w97_32_c(struct MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h); int ff_w97_32_c(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t line_size, int h);
void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride, void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
int type, int decomposition_count); int type, int decomposition_count);

View File

@ -61,7 +61,7 @@ typedef struct SnowEncContext {
int scenechange_threshold; int scenechange_threshold;
MECmpContext mecc; MECmpContext mecc;
MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX) MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MPVEncContext, so this will be removed then (FIXME/XXX)
MPVPicture cur_pic, last_pic; MPVPicture cur_pic, last_pic;
#define ME_CACHE_SIZE 1024 #define ME_CACHE_SIZE 1024
unsigned me_cache[ME_CACHE_SIZE]; unsigned me_cache[ME_CACHE_SIZE];
@ -160,7 +160,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
{ {
SnowEncContext *const enc = avctx->priv_data; SnowEncContext *const enc = avctx->priv_data;
SnowContext *const s = &enc->com; SnowContext *const s = &enc->com;
MpegEncContext *const mpv = &enc->m.s; MPVEncContext *const mpv = &enc->m.s;
int plane_index, ret; int plane_index, ret;
int i; int i;
@ -217,7 +217,7 @@ static av_cold int encode_init(AVCodecContext *avctx)
mcf(12,12) mcf(12,12)
ff_me_cmp_init(&enc->mecc, avctx); ff_me_cmp_init(&enc->mecc, avctx);
ret = ff_me_init(&mpv->me, avctx, &enc->mecc, 0); ret = ff_me_init(&mpv->c.me, avctx, &enc->mecc, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx); ff_mpegvideoencdsp_init(&enc->mpvencdsp, avctx);
@ -226,21 +226,21 @@ static av_cold int encode_init(AVCodecContext *avctx)
s->version=0; s->version=0;
mpv->avctx = avctx; mpv->c.avctx = avctx;
enc->m.bit_rate = avctx->bit_rate; enc->m.bit_rate = avctx->bit_rate;
enc->m.lmin = avctx->mb_lmin; enc->m.lmin = avctx->mb_lmin;
enc->m.lmax = avctx->mb_lmax; enc->m.lmax = avctx->mb_lmax;
mpv->mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol mpv->c.mb_num = (avctx->width * avctx->height + 255) / 256; // For ratecontrol
mpv->me.temp = mpv->c.me.temp =
mpv->me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t)); mpv->c.me.scratchpad = av_calloc(avctx->width + 64, 2*16*2*sizeof(uint8_t));
mpv->sc.obmc_scratchpad= av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t)); mpv->c.sc.obmc_scratchpad = av_mallocz(MB_SIZE*MB_SIZE*12*sizeof(uint32_t));
mpv->me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*mpv->me.map)); mpv->c.me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*mpv->c.me.map));
if (!mpv->me.scratchpad || !mpv->me.map || !mpv->sc.obmc_scratchpad) if (!mpv->c.me.scratchpad || !mpv->c.me.map || !mpv->c.sc.obmc_scratchpad)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
mpv->me.score_map = mpv->me.map + ME_MAP_SIZE; mpv->c.me.score_map = mpv->c.me.map + ME_MAP_SIZE;
mpv->me.mv_penalty = ff_h263_get_mv_penalty(); mpv->c.me.mv_penalty = ff_h263_get_mv_penalty();
s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES); s->max_ref_frames = av_clip(avctx->refs, 1, MAX_REF_FRAMES);
@ -369,7 +369,7 @@ static inline int get_penalty_factor(int lambda, int lambda2, int type){
static int encode_q_branch(SnowEncContext *enc, int level, int x, int y) static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
{ {
SnowContext *const s = &enc->com; SnowContext *const s = &enc->com;
MotionEstContext *const c = &enc->m.s.me; MotionEstContext *const c = &enc->m.s.c.me;
uint8_t p_buffer[1024]; uint8_t p_buffer[1024];
uint8_t i_buffer[1024]; uint8_t i_buffer[1024];
uint8_t p_state[sizeof(s->block_state)]; uint8_t p_state[sizeof(s->block_state)];
@ -435,9 +435,9 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
last_mv[2][0]= bottom->mx; last_mv[2][0]= bottom->mx;
last_mv[2][1]= bottom->my; last_mv[2][1]= bottom->my;
enc->m.s.mb_stride = 2; enc->m.s.c.mb_stride = 2;
enc->m.s.mb_x = enc->m.s.c.mb_x =
enc->m.s.mb_y = 0; enc->m.s.c.mb_y = 0;
c->skip= 0; c->skip= 0;
av_assert1(c-> stride == stride); av_assert1(c-> stride == stride);
@ -446,7 +446,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp); c->penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_cmp);
c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp); c->sub_penalty_factor= get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->me_sub_cmp);
c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp); c->mb_penalty_factor = get_penalty_factor(enc->lambda, enc->lambda2, c->avctx->mb_cmp);
c->current_mv_penalty = c->mv_penalty[enc->m.s.f_code=1] + MAX_DMV; c->current_mv_penalty = c->mv_penalty[enc->m.s.c.f_code=1] + MAX_DMV;
c->xmin = - x*block_w - 16+3; c->xmin = - x*block_w - 16+3;
c->ymin = - y*block_w - 16+3; c->ymin = - y*block_w - 16+3;
@ -569,7 +569,7 @@ static int encode_q_branch(SnowEncContext *enc, int level, int x, int y)
if (vard <= 64 || vard < varc) if (vard <= 64 || vard < varc)
c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc);
else else
c->scene_change_score += enc->m.s.qscale; c->scene_change_score += enc->m.s.c.qscale;
} }
if(level!=s->block_max_depth){ if(level!=s->block_max_depth){
@ -672,7 +672,7 @@ static int get_dc(SnowEncContext *enc, int mb_x, int mb_y, int plane_index)
const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size; const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
const int ref_stride= s->current_picture->linesize[plane_index]; const int ref_stride= s->current_picture->linesize[plane_index];
const uint8_t *src = s->input_picture->data[plane_index]; const uint8_t *src = s->input_picture->data[plane_index];
IDWTELEM *dst= (IDWTELEM*)enc->m.s.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned IDWTELEM *dst = (IDWTELEM*)enc->m.s.c.sc.obmc_scratchpad + plane_index * block_size * block_size * 4; //FIXME change to unsigned
const int b_stride = s->b_width << s->block_max_depth; const int b_stride = s->b_width << s->block_max_depth;
const int w= p->width; const int w= p->width;
const int h= p->height; const int h= p->height;
@ -770,7 +770,7 @@ static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
const int ref_stride= s->current_picture->linesize[plane_index]; const int ref_stride= s->current_picture->linesize[plane_index];
uint8_t *dst= s->current_picture->data[plane_index]; uint8_t *dst= s->current_picture->data[plane_index];
const uint8_t *src = s->input_picture->data[plane_index]; const uint8_t *src = s->input_picture->data[plane_index];
IDWTELEM *pred= (IDWTELEM*)enc->m.s.sc.obmc_scratchpad + plane_index*block_size*block_size*4; IDWTELEM *pred = (IDWTELEM*)enc->m.s.c.sc.obmc_scratchpad + plane_index * block_size * block_size * 4;
uint8_t *cur = s->scratchbuf; uint8_t *cur = s->scratchbuf;
uint8_t *tmp = s->emu_edge_buffer; uint8_t *tmp = s->emu_edge_buffer;
const int b_stride = s->b_width << s->block_max_depth; const int b_stride = s->b_width << s->block_max_depth;
@ -840,12 +840,12 @@ static int get_block_rd(SnowEncContext *enc, int mb_x, int mb_y,
distortion = 0; distortion = 0;
for(i=0; i<4; i++){ for(i=0; i<4; i++){
int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride; int off = sx+16*(i&1) + (sy+16*(i>>1))*ref_stride;
distortion += enc->m.s.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16); distortion += enc->m.s.c.me.me_cmp[0](&enc->m.s, src + off, dst + off, ref_stride, 16);
} }
} }
}else{ }else{
av_assert2(block_w==8); av_assert2(block_w==8);
distortion = enc->m.s.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2); distortion = enc->m.s.c.me.me_cmp[0](&enc->m.s, src + sx + sy*ref_stride, dst + sx + sy*ref_stride, ref_stride, block_w*2);
} }
if(plane_index==0){ if(plane_index==0){
@ -911,7 +911,7 @@ static int get_4block_rd(SnowEncContext *enc, int mb_x, int mb_y, int plane_inde
} }
av_assert1(block_w== 8 || block_w==16); av_assert1(block_w== 8 || block_w==16);
distortion += enc->m.s.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h); distortion += enc->m.s.c.me.me_cmp[block_w==8](&enc->m.s, src + x + y*ref_stride, dst + x + y*ref_stride, ref_stride, block_h);
} }
if(plane_index==0){ if(plane_index==0){
@ -1759,7 +1759,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
{ {
SnowEncContext *const enc = avctx->priv_data; SnowEncContext *const enc = avctx->priv_data;
SnowContext *const s = &enc->com; SnowContext *const s = &enc->com;
MpegEncContext *const mpv = &enc->m.s; MPVEncContext *const mpv = &enc->m.s;
RangeCoder * const c= &s->c; RangeCoder * const c= &s->c;
AVCodecInternal *avci = avctx->internal; AVCodecInternal *avci = avctx->internal;
AVFrame *pic; AVFrame *pic;
@ -1793,9 +1793,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
pic->pict_type = pict->pict_type; pic->pict_type = pict->pict_type;
pic->quality = pict->quality; pic->quality = pict->quality;
mpv->picture_number = avctx->frame_num; mpv->c.picture_number = avctx->frame_num;
if(avctx->flags&AV_CODEC_FLAG_PASS2){ if(avctx->flags&AV_CODEC_FLAG_PASS2){
mpv->pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type; mpv->c.pict_type = pic->pict_type = enc->m.rc_context.entry[avctx->frame_num].new_pict_type;
s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I; s->keyframe = pic->pict_type == AV_PICTURE_TYPE_I;
if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) { if(!(avctx->flags&AV_CODEC_FLAG_QSCALE)) {
pic->quality = ff_rate_estimate_qscale(&enc->m, 0); pic->quality = ff_rate_estimate_qscale(&enc->m, 0);
@ -1804,7 +1804,7 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
} }
}else{ }else{
s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0; s->keyframe= avctx->gop_size==0 || avctx->frame_num % avctx->gop_size == 0;
mpv->pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P; mpv->c.pict_type = pic->pict_type = s->keyframe ? AV_PICTURE_TYPE_I : AV_PICTURE_TYPE_P;
} }
if (enc->pass1_rc && avctx->frame_num == 0) if (enc->pass1_rc && avctx->frame_num == 0)
@ -1841,9 +1841,9 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
if (ret < 0) if (ret < 0)
return ret; return ret;
mpv->cur_pic.ptr = &enc->cur_pic; mpv->c.cur_pic.ptr = &enc->cur_pic;
mpv->cur_pic.ptr->f = s->current_picture; mpv->c.cur_pic.ptr->f = s->current_picture;
mpv->cur_pic.ptr->f->pts = pict->pts; mpv->c.cur_pic.ptr->f->pts = pict->pts;
if(pic->pict_type == AV_PICTURE_TYPE_P){ if(pic->pict_type == AV_PICTURE_TYPE_P){
int block_width = (width +15)>>4; int block_width = (width +15)>>4;
int block_height= (height+15)>>4; int block_height= (height+15)>>4;
@ -1852,35 +1852,35 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
av_assert0(s->current_picture->data[0]); av_assert0(s->current_picture->data[0]);
av_assert0(s->last_picture[0]->data[0]); av_assert0(s->last_picture[0]->data[0]);
mpv->avctx = s->avctx; mpv->c.avctx = s->avctx;
mpv->last_pic.ptr = &enc->last_pic; mpv->c.last_pic.ptr = &enc->last_pic;
mpv->last_pic.ptr->f = s->last_picture[0]; mpv->c.last_pic.ptr->f = s->last_picture[0];
mpv-> new_pic = s->input_picture; mpv-> new_pic = s->input_picture;
mpv->linesize = stride; mpv->c.linesize = stride;
mpv->uvlinesize = s->current_picture->linesize[1]; mpv->c.uvlinesize = s->current_picture->linesize[1];
mpv->width = width; mpv->c.width = width;
mpv->height = height; mpv->c.height = height;
mpv->mb_width = block_width; mpv->c.mb_width = block_width;
mpv->mb_height = block_height; mpv->c.mb_height = block_height;
mpv->mb_stride = mpv->mb_width + 1; mpv->c.mb_stride = mpv->c.mb_width + 1;
mpv->b8_stride = 2 * mpv->mb_width + 1; mpv->c.b8_stride = 2 * mpv->c.mb_width + 1;
mpv->f_code = 1; mpv->c.f_code = 1;
mpv->pict_type = pic->pict_type; mpv->c.pict_type = pic->pict_type;
mpv->me.motion_est = enc->motion_est; mpv->c.me.motion_est = enc->motion_est;
mpv->me.scene_change_score = 0; mpv->c.me.scene_change_score = 0;
mpv->me.dia_size = avctx->dia_size; mpv->c.me.dia_size = avctx->dia_size;
mpv->quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0; mpv->c.quarter_sample = (s->avctx->flags & AV_CODEC_FLAG_QPEL)!=0;
mpv->out_format = FMT_H263; mpv->c.out_format = FMT_H263;
mpv->unrestricted_mv = 1; mpv->c.unrestricted_mv = 1;
mpv->lambda = enc->lambda; mpv->c.lambda = enc->lambda;
mpv->qscale = (mpv->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); mpv->c.qscale = (mpv->c.lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7);
enc->lambda2 = mpv->lambda2 = (mpv->lambda*mpv->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; enc->lambda2 = mpv->c.lambda2 = (mpv->c.lambda*mpv->c.lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT;
mpv->qdsp = enc->qdsp; //move mpv->c.qdsp = enc->qdsp; //move
mpv->hdsp = s->hdsp; mpv->c.hdsp = s->hdsp;
ff_me_init_pic(mpv); ff_me_init_pic(mpv);
s->hdsp = mpv->hdsp; s->hdsp = mpv->c.hdsp;
} }
if (enc->pass1_rc) { if (enc->pass1_rc) {
@ -1901,7 +1901,7 @@ redo_frame:
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
mpv->pict_type = pic->pict_type; mpv->c.pict_type = pic->pict_type;
s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0; s->qbias = pic->pict_type == AV_PICTURE_TYPE_P ? 2 : 0;
ff_snow_common_init_after_header(avctx); ff_snow_common_init_after_header(avctx);
@ -1937,7 +1937,7 @@ redo_frame:
if( plane_index==0 if( plane_index==0
&& pic->pict_type == AV_PICTURE_TYPE_P && pic->pict_type == AV_PICTURE_TYPE_P
&& !(avctx->flags&AV_CODEC_FLAG_PASS2) && !(avctx->flags&AV_CODEC_FLAG_PASS2)
&& mpv->me.scene_change_score > enc->scenechange_threshold) { && mpv->c.me.scene_change_score > enc->scenechange_threshold) {
ff_init_range_encoder(c, pkt->data, pkt->size); ff_init_range_encoder(c, pkt->data, pkt->size);
ff_build_rac_states(c, (1LL<<32)/20, 256-8); ff_build_rac_states(c, (1LL<<32)/20, 256-8);
pic->pict_type= AV_PICTURE_TYPE_I; pic->pict_type= AV_PICTURE_TYPE_I;
@ -2058,7 +2058,7 @@ redo_frame:
} }
if(avctx->flags&AV_CODEC_FLAG_PASS1) if(avctx->flags&AV_CODEC_FLAG_PASS1)
ff_write_pass1_stats(&enc->m); ff_write_pass1_stats(&enc->m);
enc->m.last_pict_type = mpv->pict_type; enc->m.last_pict_type = mpv->c.pict_type;
emms_c(); emms_c();
@ -2092,10 +2092,10 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&s->ref_scores[i]); av_freep(&s->ref_scores[i]);
} }
enc->m.s.me.temp = NULL; enc->m.s.c.me.temp = NULL;
av_freep(&enc->m.s.me.scratchpad); av_freep(&enc->m.s.c.me.scratchpad);
av_freep(&enc->m.s.me.map); av_freep(&enc->m.s.c.me.map);
av_freep(&enc->m.s.sc.obmc_scratchpad); av_freep(&enc->m.s.c.sc.obmc_scratchpad);
av_freep(&avctx->stats_out); av_freep(&avctx->stats_out);

View File

@ -98,9 +98,9 @@ static av_cold void speedhq_init_static_data(void)
static int speedhq_encode_picture_header(MPVMainEncContext *const m) static int speedhq_encode_picture_header(MPVMainEncContext *const m)
{ {
SpeedHQEncContext *const ctx = (SpeedHQEncContext*)m; SpeedHQEncContext *const ctx = (SpeedHQEncContext*)m;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
put_bits_le(&s->pb, 8, 100 - s->qscale * 2); /* FIXME why doubled */ put_bits_le(&s->pb, 8, 100 - s->c.qscale * 2); /* FIXME why doubled */
put_bits_le(&s->pb, 24, 4); /* no second field */ put_bits_le(&s->pb, 24, 4); /* no second field */
ctx->slice_start = 4; ctx->slice_start = 4;
@ -110,7 +110,7 @@ static int speedhq_encode_picture_header(MPVMainEncContext *const m)
return 0; return 0;
} }
void ff_speedhq_end_slice(MpegEncContext *s) void ff_speedhq_end_slice(MPVEncContext *const s)
{ {
SpeedHQEncContext *ctx = (SpeedHQEncContext*)s; SpeedHQEncContext *ctx = (SpeedHQEncContext*)s;
int slice_len; int slice_len;
@ -158,7 +158,7 @@ static inline void encode_dc(PutBitContext *pb, int diff, int component)
} }
} }
static void encode_block(MpegEncContext *s, int16_t *block, int n) static void encode_block(MPVEncContext *const s, const int16_t block[], int n)
{ {
int alevel, level, last_non_zero, dc, i, j, run, last_index, sign; int alevel, level, last_non_zero, dc, i, j, run, last_index, sign;
int code; int code;
@ -167,16 +167,16 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
/* DC coef */ /* DC coef */
component = (n <= 3 ? 0 : (n&1) + 1); component = (n <= 3 ? 0 : (n&1) + 1);
dc = block[0]; /* overflow is impossible */ dc = block[0]; /* overflow is impossible */
val = s->last_dc[component] - dc; /* opposite of most codecs */ val = s->c.last_dc[component] - dc; /* opposite of most codecs */
encode_dc(&s->pb, val, component); encode_dc(&s->pb, val, component);
s->last_dc[component] = dc; s->c.last_dc[component] = dc;
/* now quantify & encode AC coefs */ /* now quantify & encode AC coefs */
last_non_zero = 0; last_non_zero = 0;
last_index = s->block_last_index[n]; last_index = s->c.block_last_index[n];
for (i = 1; i <= last_index; i++) { for (i = 1; i <= last_index; i++) {
j = s->intra_scantable.permutated[i]; j = s->c.intra_scantable.permutated[i];
level = block[j]; level = block[j];
/* encode using VLC */ /* encode using VLC */
@ -207,14 +207,14 @@ static void encode_block(MpegEncContext *s, int16_t *block, int n)
put_bits_le(&s->pb, 4, 6); put_bits_le(&s->pb, 4, 6);
} }
static void speedhq_encode_mb(MpegEncContext *const s, int16_t block[12][64], static void speedhq_encode_mb(MPVEncContext *const s, int16_t block[12][64],
int unused_x, int unused_y) int unused_x, int unused_y)
{ {
int i; int i;
for(i=0;i<6;i++) { for(i=0;i<6;i++) {
encode_block(s, block[i], i); encode_block(s, block[i], i);
} }
if (s->chroma_format == CHROMA_444) { if (s->c.chroma_format == CHROMA_444) {
encode_block(s, block[8], 8); encode_block(s, block[8], 8);
encode_block(s, block[9], 9); encode_block(s, block[9], 9);
@ -223,7 +223,7 @@ static void speedhq_encode_mb(MpegEncContext *const s, int16_t block[12][64],
encode_block(s, block[10], 10); encode_block(s, block[10], 10);
encode_block(s, block[11], 11); encode_block(s, block[11], 11);
} else if (s->chroma_format == CHROMA_422) { } else if (s->c.chroma_format == CHROMA_422) {
encode_block(s, block[6], 6); encode_block(s, block[6], 6);
encode_block(s, block[7], 7); encode_block(s, block[7], 7);
} }
@ -235,7 +235,7 @@ static av_cold int speedhq_encode_init(AVCodecContext *avctx)
{ {
static AVOnce init_static_once = AV_ONCE_INIT; static AVOnce init_static_once = AV_ONCE_INIT;
MPVMainEncContext *const m = avctx->priv_data; MPVMainEncContext *const m = avctx->priv_data;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
int ret; int ret;
if (avctx->width > 65500 || avctx->height > 65500) { if (avctx->width > 65500 || avctx->height > 65500) {
@ -274,8 +274,8 @@ static av_cold int speedhq_encode_init(AVCodecContext *avctx)
s->intra_chroma_ac_vlc_length = s->intra_chroma_ac_vlc_length =
s->intra_chroma_ac_vlc_last_length = uni_speedhq_ac_vlc_len; s->intra_chroma_ac_vlc_last_length = uni_speedhq_ac_vlc_len;
s->y_dc_scale_table = s->c.y_dc_scale_table =
s->c_dc_scale_table = ff_mpeg12_dc_scale_table[3]; s->c.c_dc_scale_table = ff_mpeg12_dc_scale_table[3];
ret = ff_mpv_encode_init(avctx); ret = ff_mpv_encode_init(avctx);
if (ret < 0) if (ret < 0)

View File

@ -29,11 +29,9 @@
#ifndef AVCODEC_SPEEDHQENC_H #ifndef AVCODEC_SPEEDHQENC_H
#define AVCODEC_SPEEDHQENC_H #define AVCODEC_SPEEDHQENC_H
#include <stdint.h> typedef struct MPVEncContext MPVEncContext;
#include "mpegvideo.h" void ff_speedhq_end_slice(MPVEncContext *s);
void ff_speedhq_end_slice(MpegEncContext *s);
static inline int ff_speedhq_mb_rows_in_slice(int slice_num, int mb_height) static inline int ff_speedhq_mb_rows_in_slice(int slice_num, int mb_height)
{ {

View File

@ -58,8 +58,8 @@
typedef struct SVQ1EncContext { typedef struct SVQ1EncContext {
/* FIXME: Needed for motion estimation, should not be used for anything /* FIXME: Needed for motion estimation, should not be used for anything
* else, the idea is to make the motion estimation eventually independent * else, the idea is to make the motion estimation eventually independent
* of MpegEncContext, so this will be removed then. */ * of MPVEncContext, so this will be removed then. */
MpegEncContext m; MPVEncContext m;
AVCodecContext *avctx; AVCodecContext *avctx;
MECmpContext mecc; MECmpContext mecc;
HpelDSPContext hdsp; HpelDSPContext hdsp;
@ -289,7 +289,8 @@ static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref,
return best_score; return best_score;
} }
static void init_block_index(MpegEncContext *s){ static void init_block_index(MpegEncContext *const s)
{
s->block_index[0]= s->b8_stride*(s->mb_y*2 ) + s->mb_x*2; s->block_index[0]= s->b8_stride*(s->mb_y*2 ) + s->mb_x*2;
s->block_index[1]= s->b8_stride*(s->mb_y*2 ) + 1 + s->mb_x*2; s->block_index[1]= s->b8_stride*(s->mb_y*2 ) + 1 + s->mb_x*2;
s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) + s->mb_x*2; s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) + s->mb_x*2;
@ -305,6 +306,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
unsigned char *decoded_plane, unsigned char *decoded_plane,
int width, int height, int src_stride, int stride) int width, int height, int src_stride, int stride)
{ {
MpegEncContext *const s2 = &s->m.c;
int x, y; int x, y;
int i; int i;
int block_width, block_height; int block_width, block_height;
@ -323,36 +325,36 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
block_height = (height + 15) / 16; block_height = (height + 15) / 16;
if (s->pict_type == AV_PICTURE_TYPE_P) { if (s->pict_type == AV_PICTURE_TYPE_P) {
s->m.avctx = s->avctx; s2->avctx = s->avctx;
s->m.last_pic.data[0] = ref_plane; s2->last_pic.data[0] = ref_plane;
s->m.linesize = s2->linesize =
s->m.last_pic.linesize[0] = s2->last_pic.linesize[0] =
s->m.new_pic->linesize[0] = s->m.new_pic->linesize[0] =
s->m.cur_pic.linesize[0] = stride; s2->cur_pic.linesize[0] = stride;
s->m.width = width; s2->width = width;
s->m.height = height; s2->height = height;
s->m.mb_width = block_width; s2->mb_width = block_width;
s->m.mb_height = block_height; s2->mb_height = block_height;
s->m.mb_stride = s->m.mb_width + 1; s2->mb_stride = s2->mb_width + 1;
s->m.b8_stride = 2 * s->m.mb_width + 1; s2->b8_stride = 2 * s2->mb_width + 1;
s->m.f_code = 1; s2->f_code = 1;
s->m.pict_type = s->pict_type; s2->pict_type = s->pict_type;
s->m.me.scene_change_score = 0; s2->me.scene_change_score = 0;
// s->m.out_format = FMT_H263; // s2->out_format = FMT_H263;
// s->m.unrestricted_mv = 1; // s2->unrestricted_mv = 1;
s->m.lambda = s->quality; s2->lambda = s->quality;
s->m.qscale = s->m.lambda * 139 + s2->qscale = s2->lambda * 139 +
FF_LAMBDA_SCALE * 64 >> FF_LAMBDA_SCALE * 64 >>
FF_LAMBDA_SHIFT + 7; FF_LAMBDA_SHIFT + 7;
s->m.lambda2 = s->m.lambda * s->m.lambda + s2->lambda2 = s2->lambda * s2->lambda +
FF_LAMBDA_SCALE / 2 >> FF_LAMBDA_SCALE / 2 >>
FF_LAMBDA_SHIFT; FF_LAMBDA_SHIFT;
if (!s->motion_val8[plane]) { if (!s->motion_val8[plane]) {
s->motion_val8[plane] = av_mallocz((s->m.b8_stride * s->motion_val8[plane] = av_mallocz((s2->b8_stride *
block_height * 2 + 2) * block_height * 2 + 2) *
2 * sizeof(int16_t)); 2 * sizeof(int16_t));
s->motion_val16[plane] = av_mallocz((s->m.mb_stride * s->motion_val16[plane] = av_mallocz((s2->mb_stride *
(block_height + 2) + 1) * (block_height + 2) + 1) *
2 * sizeof(int16_t)); 2 * sizeof(int16_t));
if (!s->motion_val8[plane] || !s->motion_val16[plane]) if (!s->motion_val8[plane] || !s->motion_val16[plane])
@ -365,18 +367,18 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
s->m.mb_mean = (uint8_t *)s->dummy; s->m.mb_mean = (uint8_t *)s->dummy;
s->m.mb_var = (uint16_t *)s->dummy; s->m.mb_var = (uint16_t *)s->dummy;
s->m.mc_mb_var = (uint16_t *)s->dummy; s->m.mc_mb_var = (uint16_t *)s->dummy;
s->m.cur_pic.mb_type = s->dummy; s2->cur_pic.mb_type = s->dummy;
s->m.cur_pic.motion_val[0] = s->motion_val8[plane] + 2; s2->cur_pic.motion_val[0] = s->motion_val8[plane] + 2;
s->m.p_mv_table = s->motion_val16[plane] + s->m.p_mv_table = s->motion_val16[plane] +
s->m.mb_stride + 1; s2->mb_stride + 1;
ff_me_init_pic(&s->m); ff_me_init_pic(&s->m);
s->m.me.dia_size = s->avctx->dia_size; s2->me.dia_size = s->avctx->dia_size;
s->m.first_slice_line = 1; s2->first_slice_line = 1;
for (y = 0; y < block_height; y++) { for (y = 0; y < block_height; y++) {
s->m.new_pic->data[0] = src - y * 16 * stride; // ugly s->m.new_pic->data[0] = src - y * 16 * stride; // ugly
s->m.mb_y = y; s2->mb_y = y;
for (i = 0; i < 16 && i + 16 * y < height; i++) { for (i = 0; i < 16 && i + 16 * y < height; i++) {
memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
@ -389,20 +391,20 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
16 * block_width); 16 * block_width);
for (x = 0; x < block_width; x++) { for (x = 0; x < block_width; x++) {
s->m.mb_x = x; s2->mb_x = x;
init_block_index(&s->m); init_block_index(s2);
ff_estimate_p_frame_motion(&s->m, x, y); ff_estimate_p_frame_motion(&s->m, x, y);
} }
s->m.first_slice_line = 0; s2->first_slice_line = 0;
} }
ff_fix_long_p_mvs(&s->m, CANDIDATE_MB_TYPE_INTRA); ff_fix_long_p_mvs(&s->m, CANDIDATE_MB_TYPE_INTRA);
ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s->m.f_code, ff_fix_long_mvs(&s->m, NULL, 0, s->m.p_mv_table, s2->f_code,
CANDIDATE_MB_TYPE_INTER, 0); CANDIDATE_MB_TYPE_INTER, 0);
} }
s->m.first_slice_line = 1; s2->first_slice_line = 1;
for (y = 0; y < block_height; y++) { for (y = 0; y < block_height; y++) {
for (i = 0; i < 16 && i + 16 * y < height; i++) { for (i = 0; i < 16 && i + 16 * y < height; i++) {
memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride], memcpy(&src[i * stride], &src_plane[(i + 16 * y) * src_stride],
@ -413,7 +415,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
for (; i < 16 && i + 16 * y < 16 * block_height; i++) for (; i < 16 && i + 16 * y < 16 * block_height; i++)
memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width); memcpy(&src[i * stride], &src[(i - 1) * stride], 16 * block_width);
s->m.mb_y = y; s2->mb_y = y;
for (x = 0; x < block_width; x++) { for (x = 0; x < block_width; x++) {
uint8_t reorder_buffer[2][6][7 * 32]; uint8_t reorder_buffer[2][6][7 * 32];
int count[2][6]; int count[2][6];
@ -428,11 +430,11 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
return -1; return -1;
} }
s->m.mb_x = x; s2->mb_x = x;
init_block_index(&s->m); init_block_index(s2);
if (s->pict_type == AV_PICTURE_TYPE_I || if (s->pict_type == AV_PICTURE_TYPE_I ||
(s->m.mb_type[x + y * s->m.mb_stride] & (s->m.mb_type[x + y * s2->mb_stride] &
CANDIDATE_MB_TYPE_INTRA)) { CANDIDATE_MB_TYPE_INTRA)) {
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i], init_put_bits(&s->reorder_pb[i], reorder_buffer[0][i],
@ -456,8 +458,8 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
int mx, my, pred_x, pred_y, dxy; int mx, my, pred_x, pred_y, dxy;
int16_t *motion_ptr; int16_t *motion_ptr;
motion_ptr = ff_h263_pred_motion(&s->m, 0, 0, &pred_x, &pred_y); motion_ptr = ff_h263_pred_motion(s2, 0, 0, &pred_x, &pred_y);
if (s->m.mb_type[x + y * s->m.mb_stride] & if (s->m.mb_type[x + y * s2->mb_stride] &
CANDIDATE_MB_TYPE_INTER) { CANDIDATE_MB_TYPE_INTER) {
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i], init_put_bits(&s->reorder_pb[i], reorder_buffer[1][i],
@ -506,10 +508,10 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
motion_ptr[1] = motion_ptr[1] =
motion_ptr[2] = motion_ptr[2] =
motion_ptr[3] = motion_ptr[3] =
motion_ptr[0 + 2 * s->m.b8_stride] = motion_ptr[0 + 2 * s2->b8_stride] =
motion_ptr[1 + 2 * s->m.b8_stride] = motion_ptr[1 + 2 * s2->b8_stride] =
motion_ptr[2 + 2 * s->m.b8_stride] = motion_ptr[2 + 2 * s2->b8_stride] =
motion_ptr[3 + 2 * s->m.b8_stride] = 0; motion_ptr[3 + 2 * s2->b8_stride] = 0;
} }
} }
@ -522,7 +524,7 @@ static int svq1_encode_plane(SVQ1EncContext *s, int plane,
if (best == 0) if (best == 0)
s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16); s->hdsp.put_pixels_tab[0][0](decoded, temp, stride, 16);
} }
s->m.first_slice_line = 0; s2->first_slice_line = 0;
} }
return 0; return 0;
} }
@ -537,14 +539,14 @@ static av_cold int svq1_encode_end(AVCodecContext *avctx)
s->rd_total / (double)(avctx->width * avctx->height * s->rd_total / (double)(avctx->width * avctx->height *
avctx->frame_num)); avctx->frame_num));
av_freep(&s->m.me.scratchpad); av_freep(&s->m.c.me.scratchpad);
av_freep(&s->m.me.map); av_freep(&s->m.c.me.map);
av_freep(&s->mb_type); av_freep(&s->mb_type);
av_freep(&s->dummy); av_freep(&s->dummy);
av_freep(&s->scratchbuf); av_freep(&s->scratchbuf);
s->m.mb_type = NULL; s->m.mb_type = NULL;
ff_mpv_common_end(&s->m); ff_mpv_common_end(&s->m.c);
for (i = 0; i < 3; i++) { for (i = 0; i < 3; i++) {
av_freep(&s->motion_val8[i]); av_freep(&s->motion_val8[i]);
@ -583,7 +585,7 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
ff_hpeldsp_init(&s->hdsp, avctx->flags); ff_hpeldsp_init(&s->hdsp, avctx->flags);
ff_me_cmp_init(&s->mecc, avctx); ff_me_cmp_init(&s->mecc, avctx);
ret = ff_me_init(&s->m.me, avctx, &s->mecc, 0); ret = ff_me_init(&s->m.c.me, avctx, &s->mecc, 0);
if (ret < 0) if (ret < 0)
return ret; return ret;
ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx); ff_mpegvideoencdsp_init(&s->m.mpvencdsp, avctx);
@ -604,31 +606,31 @@ static av_cold int svq1_encode_init(AVCodecContext *avctx)
s->c_block_height = (s->frame_height / 4 + 15) / 16; s->c_block_height = (s->frame_height / 4 + 15) / 16;
s->avctx = avctx; s->avctx = avctx;
s->m.avctx = avctx; s->m.c.avctx = avctx;
if ((ret = ff_mpv_common_init(&s->m)) < 0) { ret = ff_mpv_common_init(&s->m.c);
if (ret < 0)
return ret; return ret;
}
s->m.picture_structure = PICT_FRAME; s->m.c.picture_structure = PICT_FRAME;
s->m.me.temp = s->m.c.me.temp =
s->m.me.scratchpad = av_mallocz((avctx->width + 64) * s->m.c.me.scratchpad = av_mallocz((avctx->width + 64) *
2 * 16 * 2 * sizeof(uint8_t)); 2 * 16 * 2 * sizeof(uint8_t));
s->mb_type = av_mallocz((s->y_block_width + 1) * s->mb_type = av_mallocz((s->y_block_width + 1) *
s->y_block_height * sizeof(int16_t)); s->y_block_height * sizeof(int16_t));
s->dummy = av_mallocz((s->y_block_width + 1) * s->dummy = av_mallocz((s->y_block_width + 1) *
s->y_block_height * sizeof(int32_t)); s->y_block_height * sizeof(int32_t));
s->m.me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->m.me.map)); s->m.c.me.map = av_mallocz(2 * ME_MAP_SIZE * sizeof(*s->m.c.me.map));
s->m.new_pic = av_frame_alloc(); s->m.new_pic = av_frame_alloc();
if (!s->m.me.scratchpad || !s->m.me.map || if (!s->m.c.me.scratchpad || !s->m.c.me.map ||
!s->mb_type || !s->dummy || !s->m.new_pic) !s->mb_type || !s->dummy || !s->m.new_pic)
return AVERROR(ENOMEM); return AVERROR(ENOMEM);
s->m.me.score_map = s->m.me.map + ME_MAP_SIZE; s->m.c.me.score_map = s->m.c.me.map + ME_MAP_SIZE;
ff_svq1enc_init(&s->svq1encdsp); ff_svq1enc_init(&s->svq1encdsp);
s->m.me.mv_penalty = ff_h263_get_mv_penalty(); s->m.c.me.mv_penalty = ff_h263_get_mv_penalty();
return write_ident(avctx, s->avctx->flags & AV_CODEC_FLAG_BITEXACT ? "Lavc" : LIBAVCODEC_IDENT); return write_ident(avctx, s->avctx->flags & AV_CODEC_FLAG_BITEXACT ? "Lavc" : LIBAVCODEC_IDENT);
} }
@ -716,7 +718,7 @@ static int svq1_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
#define OFFSET(x) offsetof(struct SVQ1EncContext, x) #define OFFSET(x) offsetof(struct SVQ1EncContext, x)
#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = { static const AVOption options[] = {
{ "motion-est", "Motion estimation algorithm", OFFSET(m.me.motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, .unit = "motion-est"}, { "motion-est", "Motion estimation algorithm", OFFSET(m.c.me.motion_est), AV_OPT_TYPE_INT, { .i64 = FF_ME_EPZS }, FF_ME_ZERO, FF_ME_XONE, VE, .unit = "motion-est"},
{ "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" }, { "zero", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_ZERO }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" },
{ "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" }, { "epzs", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_EPZS }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" },
{ "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" }, { "xone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = FF_ME_XONE }, 0, 0, FF_MPV_OPT_FLAGS, .unit = "motion-est" },

View File

@ -48,17 +48,17 @@ typedef struct WMV2EncContext {
static int encode_ext_header(WMV2EncContext *w) static int encode_ext_header(WMV2EncContext *w)
{ {
MpegEncContext *const s = &w->msmpeg4.m.s; MPVEncContext *const s = &w->msmpeg4.m.s;
PutBitContext pb; PutBitContext pb;
int code; int code;
init_put_bits(&pb, s->avctx->extradata, WMV2_EXTRADATA_SIZE); init_put_bits(&pb, s->c.avctx->extradata, WMV2_EXTRADATA_SIZE);
put_bits(&pb, 5, s->avctx->time_base.den / s->avctx->time_base.num); // yes 29.97 -> 29 put_bits(&pb, 5, s->c.avctx->time_base.den / s->c.avctx->time_base.num); // yes 29.97 -> 29
put_bits(&pb, 11, FFMIN(w->msmpeg4.m.bit_rate / 1024, 2047)); put_bits(&pb, 11, FFMIN(w->msmpeg4.m.bit_rate / 1024, 2047));
put_bits(&pb, 1, w->mspel_bit = 1); put_bits(&pb, 1, w->mspel_bit = 1);
put_bits(&pb, 1, s->loop_filter); put_bits(&pb, 1, s->c.loop_filter);
put_bits(&pb, 1, w->abt_flag = 1); put_bits(&pb, 1, w->abt_flag = 1);
put_bits(&pb, 1, w->j_type_bit = 1); put_bits(&pb, 1, w->j_type_bit = 1);
put_bits(&pb, 1, w->top_left_mv_flag = 0); put_bits(&pb, 1, w->top_left_mv_flag = 0);
@ -67,7 +67,7 @@ static int encode_ext_header(WMV2EncContext *w)
flush_put_bits(&pb); flush_put_bits(&pb);
s->slice_height = s->mb_height / code; s->c.slice_height = s->c.mb_height / code;
return 0; return 0;
} }
@ -76,25 +76,25 @@ static int wmv2_encode_picture_header(MPVMainEncContext *const m)
{ {
WMV2EncContext *const w = (WMV2EncContext *) m; WMV2EncContext *const w = (WMV2EncContext *) m;
MSMPEG4EncContext *const ms = &w->msmpeg4; MSMPEG4EncContext *const ms = &w->msmpeg4;
MpegEncContext *const s = &m->s; MPVEncContext *const s = &m->s;
put_bits(&s->pb, 1, s->pict_type - 1); put_bits(&s->pb, 1, s->c.pict_type - 1);
if (s->pict_type == AV_PICTURE_TYPE_I) if (s->c.pict_type == AV_PICTURE_TYPE_I)
put_bits(&s->pb, 7, 0); put_bits(&s->pb, 7, 0);
put_bits(&s->pb, 5, s->qscale); put_bits(&s->pb, 5, s->c.qscale);
ms->dc_table_index = 1; ms->dc_table_index = 1;
ms->mv_table_index = 1; /* only if P-frame */ ms->mv_table_index = 1; /* only if P-frame */
ms->per_mb_rl_table = 0; ms->per_mb_rl_table = 0;
s->mspel = 0; s->c.mspel = 0;
w->per_mb_abt = 0; w->per_mb_abt = 0;
w->abt_type = 0; w->abt_type = 0;
w->j_type = 0; w->j_type = 0;
av_assert0(s->flipflop_rounding); av_assert0(s->c.flipflop_rounding);
if (s->pict_type == AV_PICTURE_TYPE_I) { if (s->c.pict_type == AV_PICTURE_TYPE_I) {
av_assert0(s->no_rounding == 1); av_assert0(s->c.no_rounding == 1);
if (w->j_type_bit) if (w->j_type_bit)
put_bits(&s->pb, 1, w->j_type); put_bits(&s->pb, 1, w->j_type);
@ -108,17 +108,17 @@ static int wmv2_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 1, ms->dc_table_index); put_bits(&s->pb, 1, ms->dc_table_index);
s->inter_intra_pred = 0; s->c.inter_intra_pred = 0;
} else { } else {
int cbp_index; int cbp_index;
put_bits(&s->pb, 2, SKIP_TYPE_NONE); put_bits(&s->pb, 2, SKIP_TYPE_NONE);
ff_msmpeg4_code012(&s->pb, cbp_index = 0); ff_msmpeg4_code012(&s->pb, cbp_index = 0);
w->cbp_table_index = wmv2_get_cbp_table_index(s, cbp_index); w->cbp_table_index = wmv2_get_cbp_table_index(&s->c, cbp_index);
if (w->mspel_bit) if (w->mspel_bit)
put_bits(&s->pb, 1, s->mspel); put_bits(&s->pb, 1, s->c.mspel);
if (w->abt_flag) { if (w->abt_flag) {
put_bits(&s->pb, 1, w->per_mb_abt ^ 1); put_bits(&s->pb, 1, w->per_mb_abt ^ 1);
@ -136,7 +136,7 @@ static int wmv2_encode_picture_header(MPVMainEncContext *const m)
put_bits(&s->pb, 1, ms->dc_table_index); put_bits(&s->pb, 1, ms->dc_table_index);
put_bits(&s->pb, 1, ms->mv_table_index); put_bits(&s->pb, 1, ms->mv_table_index);
s->inter_intra_pred = 0; // (s->width * s->height < 320 * 240 && m->bit_rate <= II_BITRATE); s->c.inter_intra_pred = 0; // (s->c.width * s->c.height < 320 * 240 && m->bit_rate <= II_BITRATE);
} }
s->esc3_level_length = 0; s->esc3_level_length = 0;
ms->esc3_run_length = 0; ms->esc3_run_length = 0;
@ -147,7 +147,7 @@ static int wmv2_encode_picture_header(MPVMainEncContext *const m)
/* Nearly identical to wmv1 but that is just because we do not use the /* Nearly identical to wmv1 but that is just because we do not use the
* useless M$ crap features. It is duplicated here in case someone wants * useless M$ crap features. It is duplicated here in case someone wants
* to add support for these crap features. */ * to add support for these crap features. */
static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64], static void wmv2_encode_mb(MPVEncContext *const s, int16_t block[][64],
int motion_x, int motion_y) int motion_x, int motion_y)
{ {
WMV2EncContext *const w = (WMV2EncContext *) s; WMV2EncContext *const w = (WMV2EncContext *) s;
@ -157,11 +157,11 @@ static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64],
ff_msmpeg4_handle_slices(s); ff_msmpeg4_handle_slices(s);
if (!s->mb_intra) { if (!s->c.mb_intra) {
/* compute cbp */ /* compute cbp */
cbp = 0; cbp = 0;
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
if (s->block_last_index[i] >= 0) if (s->c.block_last_index[i] >= 0)
cbp |= 1 << (5 - i); cbp |= 1 << (5 - i);
put_bits(&s->pb, put_bits(&s->pb,
@ -170,7 +170,7 @@ static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64],
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
/* motion vector */ /* motion vector */
ff_h263_pred_motion(s, 0, 0, &pred_x, &pred_y); ff_h263_pred_motion(&s->c, 0, 0, &pred_x, &pred_y);
ff_msmpeg4_encode_motion(&w->msmpeg4, motion_x - pred_x, ff_msmpeg4_encode_motion(&w->msmpeg4, motion_x - pred_x,
motion_y - pred_y); motion_y - pred_y);
s->mv_bits += get_bits_diff(s); s->mv_bits += get_bits_diff(s);
@ -179,19 +179,19 @@ static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64],
cbp = 0; cbp = 0;
coded_cbp = 0; coded_cbp = 0;
for (i = 0; i < 6; i++) { for (i = 0; i < 6; i++) {
int val, pred; int val = (s->c.block_last_index[i] >= 1);
val = (s->block_last_index[i] >= 1);
cbp |= val << (5 - i); cbp |= val << (5 - i);
if (i < 4) { if (i < 4) {
/* predict value for close blocks only for luma */ /* predict value for close blocks only for luma */
pred = ff_msmpeg4_coded_block_pred(s, i, &coded_block); int pred = ff_msmpeg4_coded_block_pred(&s->c, i, &coded_block);
*coded_block = val; *coded_block = val;
val = val ^ pred; val = val ^ pred;
} }
coded_cbp |= val << (5 - i); coded_cbp |= val << (5 - i);
} }
if (s->pict_type == AV_PICTURE_TYPE_I) if (s->c.pict_type == AV_PICTURE_TYPE_I)
put_bits(&s->pb, put_bits(&s->pb,
ff_msmp4_mb_i_table[coded_cbp][1], ff_msmp4_mb_i_table[coded_cbp][1],
ff_msmp4_mb_i_table[coded_cbp][0]); ff_msmp4_mb_i_table[coded_cbp][0]);
@ -200,18 +200,18 @@ static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64],
ff_wmv2_inter_table[w->cbp_table_index][cbp][1], ff_wmv2_inter_table[w->cbp_table_index][cbp][1],
ff_wmv2_inter_table[w->cbp_table_index][cbp][0]); ff_wmv2_inter_table[w->cbp_table_index][cbp][0]);
put_bits(&s->pb, 1, 0); /* no AC prediction yet */ put_bits(&s->pb, 1, 0); /* no AC prediction yet */
if (s->inter_intra_pred) { if (s->c.inter_intra_pred) {
s->h263_aic_dir = 0; s->c.h263_aic_dir = 0;
put_bits(&s->pb, put_bits(&s->pb,
ff_table_inter_intra[s->h263_aic_dir][1], ff_table_inter_intra[s->c.h263_aic_dir][1],
ff_table_inter_intra[s->h263_aic_dir][0]); ff_table_inter_intra[s->c.h263_aic_dir][0]);
} }
s->misc_bits += get_bits_diff(s); s->misc_bits += get_bits_diff(s);
} }
for (i = 0; i < 6; i++) for (i = 0; i < 6; i++)
ff_msmpeg4_encode_block(s, block[i], i); ff_msmpeg4_encode_block(s, block[i], i);
if (s->mb_intra) if (s->c.mb_intra)
s->i_tex_bits += get_bits_diff(s); s->i_tex_bits += get_bits_diff(s);
else else
s->p_tex_bits += get_bits_diff(s); s->p_tex_bits += get_bits_diff(s);
@ -220,17 +220,17 @@ static void wmv2_encode_mb(MpegEncContext *const s, int16_t block[][64],
static av_cold int wmv2_encode_init(AVCodecContext *avctx) static av_cold int wmv2_encode_init(AVCodecContext *avctx)
{ {
WMV2EncContext *const w = avctx->priv_data; WMV2EncContext *const w = avctx->priv_data;
MpegEncContext *const s = &w->msmpeg4.m.s; MPVEncContext *const s = &w->msmpeg4.m.s;
int ret; int ret;
w->msmpeg4.m.encode_picture_header = wmv2_encode_picture_header; w->msmpeg4.m.encode_picture_header = wmv2_encode_picture_header;
s->encode_mb = wmv2_encode_mb; s->encode_mb = wmv2_encode_mb;
s->private_ctx = &w->common; s->c.private_ctx = &w->common;
ret = ff_mpv_encode_init(avctx); ret = ff_mpv_encode_init(avctx);
if (ret < 0) if (ret < 0)
return ret; return ret;
ff_wmv2_common_init(s); ff_wmv2_common_init(&s->c);
avctx->extradata_size = WMV2_EXTRADATA_SIZE; avctx->extradata_size = WMV2_EXTRADATA_SIZE;
avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE); avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);

View File

@ -214,7 +214,7 @@ hadamard8x8_diff %+ SUFFIX:
hadamard8_16_wrapper %1, 3 hadamard8_16_wrapper %1, 3
%elif cpuflag(mmx) %elif cpuflag(mmx)
ALIGN 16 ALIGN 16
; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, const uint8_t *src1, ; int ff_hadamard8_diff_ ## cpu(MPVEncContext *s, const uint8_t *src1,
; const uint8_t *src2, ptrdiff_t stride, int h) ; const uint8_t *src2, ptrdiff_t stride, int h)
; r0 = void *s = unused, int h = unused (always 8) ; r0 = void *s = unused, int h = unused (always 8)
; note how r1, r2 and r3 are not clobbered in this function, so 16x16 ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
@ -278,7 +278,7 @@ INIT_XMM ssse3
%define ABS_SUM_8x8 ABS_SUM_8x8_64 %define ABS_SUM_8x8 ABS_SUM_8x8_64
HADAMARD8_DIFF 9 HADAMARD8_DIFF 9
; int ff_sse*_*(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ; int ff_sse*_*(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
; ptrdiff_t line_size, int h) ; ptrdiff_t line_size, int h)
%macro SUM_SQUARED_ERRORS 1 %macro SUM_SQUARED_ERRORS 1
@ -466,7 +466,7 @@ HF_NOISE 8
HF_NOISE 16 HF_NOISE 16
;--------------------------------------------------------------------------------------- ;---------------------------------------------------------------------------------------
;int ff_sad_<opt>(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); ;int ff_sad_<opt>(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h);
;--------------------------------------------------------------------------------------- ;---------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD 1 %macro SAD 1
@ -521,7 +521,7 @@ INIT_XMM sse2
SAD 16 SAD 16
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;int ff_sad_x2_<opt>(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); ;int ff_sad_x2_<opt>(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_X2 1 %macro SAD_X2 1
@ -598,7 +598,7 @@ INIT_XMM sse2
SAD_X2 16 SAD_X2 16
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;int ff_sad_y2_<opt>(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); ;int ff_sad_y2_<opt>(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------ ;------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_Y2 1 %macro SAD_Y2 1
@ -668,7 +668,7 @@ INIT_XMM sse2
SAD_Y2 16 SAD_Y2 16
;------------------------------------------------------------------------------------------- ;-------------------------------------------------------------------------------------------
;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h); ;int ff_sad_approx_xy2_<opt>(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ptrdiff_t stride, int h);
;------------------------------------------------------------------------------------------- ;-------------------------------------------------------------------------------------------
;%1 = 8/16 ;%1 = 8/16
%macro SAD_APPROX_XY2 1 %macro SAD_APPROX_XY2 1
@ -769,7 +769,7 @@ INIT_XMM sse2
SAD_APPROX_XY2 16 SAD_APPROX_XY2 16
;-------------------------------------------------------------------- ;--------------------------------------------------------------------
;int ff_vsad_intra(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ;int ff_vsad_intra(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
; ptrdiff_t line_size, int h); ; ptrdiff_t line_size, int h);
;-------------------------------------------------------------------- ;--------------------------------------------------------------------
; %1 = 8/16 ; %1 = 8/16
@ -830,7 +830,7 @@ INIT_XMM sse2
VSAD_INTRA 16 VSAD_INTRA 16
;--------------------------------------------------------------------- ;---------------------------------------------------------------------
;int ff_vsad_approx(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, ;int ff_vsad_approx(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
; ptrdiff_t line_size, int h); ; ptrdiff_t line_size, int h);
;--------------------------------------------------------------------- ;---------------------------------------------------------------------
; %1 = 8/16 ; %1 = 8/16

View File

@ -28,59 +28,59 @@
#include "libavutil/x86/asm.h" #include "libavutil/x86/asm.h"
#include "libavutil/x86/cpu.h" #include "libavutil/x86/cpu.h"
#include "libavcodec/me_cmp.h" #include "libavcodec/me_cmp.h"
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideoenc.h"
int ff_sum_abs_dctelem_sse2(const int16_t *block); int ff_sum_abs_dctelem_sse2(const int16_t *block);
int ff_sum_abs_dctelem_ssse3(const int16_t *block); int ff_sum_abs_dctelem_ssse3(const int16_t *block);
int ff_sse8_mmx(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse8_mmx(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse16_mmx(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse16_mmx(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sse16_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sse16_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_hf_noise8_mmx(const uint8_t *pix1, ptrdiff_t stride, int h); int ff_hf_noise8_mmx(const uint8_t *pix1, ptrdiff_t stride, int h);
int ff_hf_noise16_mmx(const uint8_t *pix1, ptrdiff_t stride, int h); int ff_hf_noise16_mmx(const uint8_t *pix1, ptrdiff_t stride, int h);
int ff_sad8_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad8_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad8_x2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad8_x2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_x2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_x2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_x2_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_x2_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad8_y2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad8_y2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_y2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_y2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_y2_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_y2_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad8_approx_xy2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_approx_xy2_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_sad16_approx_xy2_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_sad16_approx_xy2_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad_intra8_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad_intra8_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad_intra16_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad_intra16_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad_intra16_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad_intra16_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad8_approx_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad8_approx_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad16_approx_mmxext(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad16_approx_mmxext(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
int ff_vsad16_approx_sse2(MpegEncContext *v, const uint8_t *pix1, const uint8_t *pix2, int ff_vsad16_approx_sse2(MPVEncContext *v, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h); ptrdiff_t stride, int h);
#define hadamard_func(cpu) \ #define hadamard_func(cpu) \
int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, const uint8_t *src1, \ int ff_hadamard8_diff_ ## cpu(MPVEncContext *s, const uint8_t *src1, \
const uint8_t *src2, ptrdiff_t stride, int h); \ const uint8_t *src2, ptrdiff_t stride, int h); \
int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, const uint8_t *src1, \ int ff_hadamard8_diff16_ ## cpu(MPVEncContext *s, const uint8_t *src1, \
const uint8_t *src2, ptrdiff_t stride, int h); const uint8_t *src2, ptrdiff_t stride, int h);
hadamard_func(mmxext) hadamard_func(mmxext)
@ -88,7 +88,7 @@ hadamard_func(sse2)
hadamard_func(ssse3) hadamard_func(ssse3)
#if HAVE_X86ASM #if HAVE_X86ASM
static int nsse16_mmx(MpegEncContext *c, const uint8_t *pix1, const uint8_t *pix2, static int nsse16_mmx(MPVEncContext *c, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int score1, score2; int score1, score2;
@ -101,12 +101,12 @@ static int nsse16_mmx(MpegEncContext *c, const uint8_t *pix1, const uint8_t *pix
- ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h); - ff_hf_noise16_mmx(pix2, stride, h) - ff_hf_noise8_mmx(pix2+8, stride, h);
if (c) if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight; return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
else else
return score1 + FFABS(score2) * 8; return score1 + FFABS(score2) * 8;
} }
static int nsse8_mmx(MpegEncContext *c, const uint8_t *pix1, const uint8_t *pix2, static int nsse8_mmx(MPVEncContext *c, const uint8_t *pix1, const uint8_t *pix2,
ptrdiff_t stride, int h) ptrdiff_t stride, int h)
{ {
int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h); int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
@ -114,7 +114,7 @@ static int nsse8_mmx(MpegEncContext *c, const uint8_t *pix1, const uint8_t *pix2
ff_hf_noise8_mmx(pix2, stride, h); ff_hf_noise8_mmx(pix2, stride, h);
if (c) if (c)
return score1 + FFABS(score2) * c->avctx->nsse_weight; return score1 + FFABS(score2) * c->c.avctx->nsse_weight;
else else
return score1 + FFABS(score2) * 8; return score1 + FFABS(score2) * 8;
} }
@ -199,7 +199,7 @@ static inline int sum_mmx(void)
} }
#define PIX_SADXY(suf) \ #define PIX_SADXY(suf) \
static int sad8_xy2_ ## suf(MpegEncContext *v, const uint8_t *blk2, \ static int sad8_xy2_ ## suf(MPVEncContext *v, const uint8_t *blk2, \
const uint8_t *blk1, ptrdiff_t stride, int h) \ const uint8_t *blk1, ptrdiff_t stride, int h) \
{ \ { \
__asm__ volatile ( \ __asm__ volatile ( \
@ -212,7 +212,7 @@ static int sad8_xy2_ ## suf(MpegEncContext *v, const uint8_t *blk2, \
return sum_ ## suf(); \ return sum_ ## suf(); \
} \ } \
\ \
static int sad16_xy2_ ## suf(MpegEncContext *v, const uint8_t *blk2, \ static int sad16_xy2_ ## suf(MPVEncContext *v, const uint8_t *blk2, \
const uint8_t *blk1, ptrdiff_t stride, int h) \ const uint8_t *blk1, ptrdiff_t stride, int h) \
{ \ { \
__asm__ volatile ( \ __asm__ volatile ( \

View File

@ -70,8 +70,9 @@ DECLARE_ALIGNED(16, static const uint16_t, inv_zigzag_direct16)[64] = {
#if HAVE_INLINE_ASM #if HAVE_INLINE_ASM
#if HAVE_SSE2_INLINE #if HAVE_SSE2_INLINE
static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){ static void denoise_dct_sse2(MPVEncContext *const s, int16_t block[])
const int intra= s->mb_intra; {
const int intra = s->c.mb_intra;
int *sum= s->dct_error_sum[intra]; int *sum= s->dct_error_sum[intra];
uint16_t *offset= s->dct_offset[intra]; uint16_t *offset= s->dct_offset[intra];
@ -128,9 +129,9 @@ static void denoise_dct_sse2(MpegEncContext *s, int16_t *block){
#endif /* HAVE_SSE2_INLINE */ #endif /* HAVE_SSE2_INLINE */
#endif /* HAVE_INLINE_ASM */ #endif /* HAVE_INLINE_ASM */
av_cold void ff_dct_encode_init_x86(MpegEncContext *s) av_cold void ff_dct_encode_init_x86(MPVEncContext *const s)
{ {
const int dct_algo = s->avctx->dct_algo; const int dct_algo = s->c.avctx->dct_algo;
if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) { if (dct_algo == FF_DCT_AUTO || dct_algo == FF_DCT_MMX) {
#if HAVE_MMX_INLINE #if HAVE_MMX_INLINE

View File

@ -26,7 +26,7 @@
#include "libavutil/mem_internal.h" #include "libavutil/mem_internal.h"
#include "libavutil/x86/asm.h" #include "libavutil/x86/asm.h"
#include "libavcodec/mpegutils.h" #include "libavcodec/mpegutils.h"
#include "libavcodec/mpegvideo.h" #include "libavcodec/mpegvideoenc.h"
#include "fdct.h" #include "fdct.h"
#undef MMREG_WIDTH #undef MMREG_WIDTH
@ -90,7 +90,7 @@
"psubw "a", "b" \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i]) "psubw "a", "b" \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
#endif #endif
static int RENAME(dct_quantize)(MpegEncContext *s, static int RENAME(dct_quantize)(MPVEncContext *const s,
int16_t *block, int n, int16_t *block, int n,
int qscale, int *overflow) int qscale, int *overflow)
{ {
@ -105,19 +105,19 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
if(s->dct_error_sum) if(s->dct_error_sum)
s->denoise_dct(s, block); s->denoise_dct(s, block);
if (s->mb_intra) { if (s->c.mb_intra) {
int dummy; int dummy;
if (n < 4){ if (n < 4){
q = s->y_dc_scale; q = s->c.y_dc_scale;
bias = s->q_intra_matrix16[qscale][1]; bias = s->q_intra_matrix16[qscale][1];
qmat = s->q_intra_matrix16[qscale][0]; qmat = s->q_intra_matrix16[qscale][0];
}else{ }else{
q = s->c_dc_scale; q = s->c.c_dc_scale;
bias = s->q_chroma_intra_matrix16[qscale][1]; bias = s->q_chroma_intra_matrix16[qscale][1];
qmat = s->q_chroma_intra_matrix16[qscale][0]; qmat = s->q_chroma_intra_matrix16[qscale][0];
} }
/* note: block[0] is assumed to be positive */ /* note: block[0] is assumed to be positive */
if (!s->h263_aic) { if (!s->c.h263_aic) {
__asm__ volatile ( __asm__ volatile (
"mul %%ecx \n\t" "mul %%ecx \n\t"
: "=d" (level), "=a"(dummy) : "=d" (level), "=a"(dummy)
@ -136,8 +136,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
qmat = s->q_inter_matrix16[qscale][0]; qmat = s->q_inter_matrix16[qscale][0];
} }
if((s->out_format == FMT_H263 || s->out_format == FMT_H261) && s->mpeg_quant==0){ if ((s->c.out_format == FMT_H263 || s->c.out_format == FMT_H261) && !s->c.mpeg_quant) {
__asm__ volatile( __asm__ volatile(
"movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1 "movd %%"FF_REG_a", "MM"3 \n\t" // last_non_zero_p1
SPREADW(MM"3") SPREADW(MM"3")
@ -220,11 +219,10 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
: "g" (s->max_qcoeff) : "g" (s->max_qcoeff)
); );
if(s->mb_intra) block[0]= level; block[0] = s->c.mb_intra ? level : temp_block[0];
else block[0]= temp_block[0];
av_assert2(ARCH_X86_32 || s->idsp.perm_type != FF_IDCT_PERM_SIMPLE); av_assert2(ARCH_X86_32 || s->c.idsp.perm_type != FF_IDCT_PERM_SIMPLE);
if (ARCH_X86_32 && s->idsp.perm_type == FF_IDCT_PERM_SIMPLE) { if (ARCH_X86_32 && s->c.idsp.perm_type == FF_IDCT_PERM_SIMPLE) {
if(last_non_zero_p1 <= 1) goto end; if(last_non_zero_p1 <= 1) goto end;
block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08]; block[0x08] = temp_block[0x01]; block[0x10] = temp_block[0x08];
block[0x20] = temp_block[0x10]; block[0x20] = temp_block[0x10];
@ -268,7 +266,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36]; block[0x3E] = temp_block[0x3D]; block[0x27] = temp_block[0x36];
block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37]; block[0x3D] = temp_block[0x2F]; block[0x2F] = temp_block[0x37];
block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
}else if(s->idsp.perm_type == FF_IDCT_PERM_LIBMPEG2){ } else if (s->c.idsp.perm_type == FF_IDCT_PERM_LIBMPEG2) {
if(last_non_zero_p1 <= 1) goto end; if(last_non_zero_p1 <= 1) goto end;
block[0x04] = temp_block[0x01]; block[0x04] = temp_block[0x01];
block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
@ -312,7 +310,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36]; block[0x3E] = temp_block[0x3D]; block[0x33] = temp_block[0x36];
block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; block[0x3B] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
} else if (s->idsp.perm_type == FF_IDCT_PERM_NONE) { } else if (s->c.idsp.perm_type == FF_IDCT_PERM_NONE) {
if(last_non_zero_p1 <= 1) goto end; if(last_non_zero_p1 <= 1) goto end;
block[0x01] = temp_block[0x01]; block[0x01] = temp_block[0x01];
block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10]; block[0x08] = temp_block[0x08]; block[0x10] = temp_block[0x10];
@ -356,7 +354,7 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36]; block[0x3D] = temp_block[0x3D]; block[0x36] = temp_block[0x36];
block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37]; block[0x2F] = temp_block[0x2F]; block[0x37] = temp_block[0x37];
block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; block[0x3E] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
} else if (s->idsp.perm_type == FF_IDCT_PERM_TRANSPOSE) { } else if (s->c.idsp.perm_type == FF_IDCT_PERM_TRANSPOSE) {
if(last_non_zero_p1 <= 1) goto end; if(last_non_zero_p1 <= 1) goto end;
block[0x08] = temp_block[0x01]; block[0x08] = temp_block[0x01];
block[0x01] = temp_block[0x08]; block[0x02] = temp_block[0x10]; block[0x01] = temp_block[0x08]; block[0x02] = temp_block[0x10];
@ -401,12 +399,12 @@ static int RENAME(dct_quantize)(MpegEncContext *s,
block[0x3D] = temp_block[0x2F]; block[0x3E] = temp_block[0x37]; block[0x3D] = temp_block[0x2F]; block[0x3E] = temp_block[0x37];
block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F]; block[0x37] = temp_block[0x3E]; block[0x3F] = temp_block[0x3F];
} else { } else {
av_log(s->avctx, AV_LOG_DEBUG, "s->idsp.perm_type: %d\n", av_log(s->c.avctx, AV_LOG_DEBUG, "s->c.idsp.perm_type: %d\n",
(int)s->idsp.perm_type); (int)s->c.idsp.perm_type);
av_assert0(s->idsp.perm_type == FF_IDCT_PERM_NONE || av_assert0(s->c.idsp.perm_type == FF_IDCT_PERM_NONE ||
s->idsp.perm_type == FF_IDCT_PERM_LIBMPEG2 || s->c.idsp.perm_type == FF_IDCT_PERM_LIBMPEG2 ||
s->idsp.perm_type == FF_IDCT_PERM_SIMPLE || s->c.idsp.perm_type == FF_IDCT_PERM_SIMPLE ||
s->idsp.perm_type == FF_IDCT_PERM_TRANSPOSE); s->c.idsp.perm_type == FF_IDCT_PERM_TRANSPOSE);
} }
end: end:
return last_non_zero_p1 - 1; return last_non_zero_p1 - 1;

View File

@ -51,7 +51,7 @@ static void test_motion(const char *name, me_cmp_func test_func)
LOCAL_ALIGNED_16(uint8_t, img1, [WIDTH * HEIGHT]); LOCAL_ALIGNED_16(uint8_t, img1, [WIDTH * HEIGHT]);
LOCAL_ALIGNED_16(uint8_t, img2, [WIDTH * HEIGHT]); LOCAL_ALIGNED_16(uint8_t, img2, [WIDTH * HEIGHT]);
declare_func_emms(AV_CPU_FLAG_MMX, int, struct MpegEncContext *c, declare_func_emms(AV_CPU_FLAG_MMX, int, MPVEncContext *c,
const uint8_t *blk1 /* align width (8 or 16) */, const uint8_t *blk1 /* align width (8 or 16) */,
const uint8_t *blk2 /* align 1 */, ptrdiff_t stride, const uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
int h); int h);