mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
cleanup
Originally committed as revision 2605 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
0c11692a1d
commit
d55f7b6521
@ -85,7 +85,6 @@ int ff_h263_decode_init(AVCodecContext *avctx)
|
|||||||
s->msmpeg4_version=5;
|
s->msmpeg4_version=5;
|
||||||
break;
|
break;
|
||||||
case CODEC_ID_H263I:
|
case CODEC_ID_H263I:
|
||||||
s->h263_intel = 1;
|
|
||||||
break;
|
break;
|
||||||
case CODEC_ID_FLV1:
|
case CODEC_ID_FLV1:
|
||||||
s->h263_flv = 1;
|
s->h263_flv = 1;
|
||||||
@ -460,7 +459,7 @@ retry:
|
|||||||
|
|
||||||
if(s->flags& CODEC_FLAG_LOW_DELAY)
|
if(s->flags& CODEC_FLAG_LOW_DELAY)
|
||||||
s->low_delay=1;
|
s->low_delay=1;
|
||||||
} else if (s->h263_intel) {
|
} else if (s->codec_id == CODEC_ID_H263I) {
|
||||||
ret = intel_h263_decode_picture_header(s);
|
ret = intel_h263_decode_picture_header(s);
|
||||||
} else if (s->h263_flv) {
|
} else if (s->h263_flv) {
|
||||||
ret = flv_h263_decode_picture_header(s);
|
ret = flv_h263_decode_picture_header(s);
|
||||||
|
@ -634,7 +634,6 @@ int MPV_encode_init(AVCodecContext *avctx)
|
|||||||
avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
|
avctx->pix_fmt = PIX_FMT_YUV420P; // FIXME
|
||||||
|
|
||||||
s->bit_rate = avctx->bit_rate;
|
s->bit_rate = avctx->bit_rate;
|
||||||
s->bit_rate_tolerance = avctx->bit_rate_tolerance;
|
|
||||||
s->width = avctx->width;
|
s->width = avctx->width;
|
||||||
s->height = avctx->height;
|
s->height = avctx->height;
|
||||||
if(avctx->gop_size > 600){
|
if(avctx->gop_size > 600){
|
||||||
@ -642,13 +641,9 @@ int MPV_encode_init(AVCodecContext *avctx)
|
|||||||
avctx->gop_size=600;
|
avctx->gop_size=600;
|
||||||
}
|
}
|
||||||
s->gop_size = avctx->gop_size;
|
s->gop_size = avctx->gop_size;
|
||||||
s->max_qdiff= avctx->max_qdiff;
|
|
||||||
s->qcompress= avctx->qcompress;
|
|
||||||
s->qblur= avctx->qblur;
|
|
||||||
s->avctx = avctx;
|
s->avctx = avctx;
|
||||||
s->flags= avctx->flags;
|
s->flags= avctx->flags;
|
||||||
s->max_b_frames= avctx->max_b_frames;
|
s->max_b_frames= avctx->max_b_frames;
|
||||||
s->b_frame_strategy= avctx->b_frame_strategy;
|
|
||||||
s->codec_id= avctx->codec->id;
|
s->codec_id= avctx->codec->id;
|
||||||
s->luma_elim_threshold = avctx->luma_elim_threshold;
|
s->luma_elim_threshold = avctx->luma_elim_threshold;
|
||||||
s->chroma_elim_threshold= avctx->chroma_elim_threshold;
|
s->chroma_elim_threshold= avctx->chroma_elim_threshold;
|
||||||
@ -1683,10 +1678,10 @@ static void select_input_picture(MpegEncContext *s){
|
|||||||
av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
|
av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n");
|
||||||
b_frames = s->max_b_frames;
|
b_frames = s->max_b_frames;
|
||||||
}
|
}
|
||||||
}else if(s->b_frame_strategy==0){
|
}else if(s->avctx->b_frame_strategy==0){
|
||||||
b_frames= s->max_b_frames;
|
b_frames= s->max_b_frames;
|
||||||
while(b_frames && !s->input_picture[b_frames]) b_frames--;
|
while(b_frames && !s->input_picture[b_frames]) b_frames--;
|
||||||
}else if(s->b_frame_strategy==1){
|
}else if(s->avctx->b_frame_strategy==1){
|
||||||
for(i=1; i<s->max_b_frames+1; i++){
|
for(i=1; i<s->max_b_frames+1; i++){
|
||||||
if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
|
if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){
|
||||||
s->input_picture[i]->b_frame_score=
|
s->input_picture[i]->b_frame_score=
|
||||||
|
@ -236,25 +236,19 @@ typedef struct MpegEncContext {
|
|||||||
int gop_size;
|
int gop_size;
|
||||||
int intra_only; ///< if true, only intra pictures are generated
|
int intra_only; ///< if true, only intra pictures are generated
|
||||||
int bit_rate; ///< wanted bit rate
|
int bit_rate; ///< wanted bit rate
|
||||||
int bit_rate_tolerance; ///< amount of +- bits (>0)
|
|
||||||
enum OutputFormat out_format; ///< output format
|
enum OutputFormat out_format; ///< output format
|
||||||
int h263_pred; ///< use mpeg4/h263 ac/dc predictions
|
int h263_pred; ///< use mpeg4/h263 ac/dc predictions
|
||||||
|
|
||||||
/* the following codec id fields are deprecated in favor of codec_id */
|
/* the following codec id fields are deprecated in favor of codec_id */
|
||||||
int h263_plus; ///< h263 plus headers
|
int h263_plus; ///< h263 plus headers
|
||||||
int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead)
|
int h263_msmpeg4; ///< generate MSMPEG4 compatible stream (deprecated, use msmpeg4_version instead)
|
||||||
int h263_intel; ///< use I263 intel h263 header
|
|
||||||
int h263_flv; ///< use flv h263 header
|
int h263_flv; ///< use flv h263 header
|
||||||
|
|
||||||
int codec_id; /* see CODEC_ID_xxx */
|
int codec_id; /* see CODEC_ID_xxx */
|
||||||
int fixed_qscale; ///< fixed qscale if non zero
|
int fixed_qscale; ///< fixed qscale if non zero
|
||||||
float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
|
|
||||||
float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
|
|
||||||
int max_qdiff; ///< max qscale difference between frames
|
|
||||||
int encoding; ///< true if we are encoding (vs decoding)
|
int encoding; ///< true if we are encoding (vs decoding)
|
||||||
int flags; ///< AVCodecContext.flags (HQ, MV4, ...)
|
int flags; ///< AVCodecContext.flags (HQ, MV4, ...)
|
||||||
int max_b_frames; ///< max number of b-frames for encoding
|
int max_b_frames; ///< max number of b-frames for encoding
|
||||||
int b_frame_strategy;
|
|
||||||
int luma_elim_threshold;
|
int luma_elim_threshold;
|
||||||
int chroma_elim_threshold;
|
int chroma_elim_threshold;
|
||||||
int strict_std_compliance; ///< strictly follow the std (MPEG4, ...)
|
int strict_std_compliance; ///< strictly follow the std (MPEG4, ...)
|
||||||
|
@ -236,6 +236,7 @@ int ff_vbv_update(MpegEncContext *s, int frame_size){
|
|||||||
*/
|
*/
|
||||||
static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){
|
static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num){
|
||||||
RateControlContext *rcc= &s->rc_context;
|
RateControlContext *rcc= &s->rc_context;
|
||||||
|
AVCodecContext *a= s->avctx;
|
||||||
double q, bits;
|
double q, bits;
|
||||||
const int pict_type= rce->new_pict_type;
|
const int pict_type= rce->new_pict_type;
|
||||||
const double mb_num= s->mb_num;
|
const double mb_num= s->mb_num;
|
||||||
@ -256,7 +257,7 @@ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_f
|
|||||||
rce->pict_type == P_TYPE,
|
rce->pict_type == P_TYPE,
|
||||||
rce->pict_type == B_TYPE,
|
rce->pict_type == B_TYPE,
|
||||||
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
|
rcc->qscale_sum[pict_type] / (double)rcc->frame_count[pict_type],
|
||||||
s->qcompress,
|
a->qcompress,
|
||||||
/* rcc->last_qscale_for[I_TYPE],
|
/* rcc->last_qscale_for[I_TYPE],
|
||||||
rcc->last_qscale_for[P_TYPE],
|
rcc->last_qscale_for[P_TYPE],
|
||||||
rcc->last_qscale_for[B_TYPE],
|
rcc->last_qscale_for[B_TYPE],
|
||||||
@ -577,6 +578,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
|
|||||||
int picture_number= s->picture_number;
|
int picture_number= s->picture_number;
|
||||||
int64_t wanted_bits;
|
int64_t wanted_bits;
|
||||||
RateControlContext *rcc= &s->rc_context;
|
RateControlContext *rcc= &s->rc_context;
|
||||||
|
AVCodecContext *a= s->avctx;
|
||||||
RateControlEntry local_rce, *rce;
|
RateControlEntry local_rce, *rce;
|
||||||
double bits;
|
double bits;
|
||||||
double rate_factor;
|
double rate_factor;
|
||||||
@ -606,7 +608,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
|
|||||||
}
|
}
|
||||||
|
|
||||||
diff= s->total_bits - wanted_bits;
|
diff= s->total_bits - wanted_bits;
|
||||||
br_compensation= (s->bit_rate_tolerance - diff)/s->bit_rate_tolerance;
|
br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance;
|
||||||
if(br_compensation<=0.0) br_compensation=0.001;
|
if(br_compensation<=0.0) br_compensation=0.001;
|
||||||
|
|
||||||
var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
|
var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum;
|
||||||
@ -658,8 +660,8 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
|
|||||||
assert(q>0.0);
|
assert(q>0.0);
|
||||||
|
|
||||||
if(pict_type==P_TYPE || s->intra_only){ //FIXME type dependant blur like in 2-pass
|
if(pict_type==P_TYPE || s->intra_only){ //FIXME type dependant blur like in 2-pass
|
||||||
rcc->short_term_qsum*=s->qblur;
|
rcc->short_term_qsum*=a->qblur;
|
||||||
rcc->short_term_qcount*=s->qblur;
|
rcc->short_term_qcount*=a->qblur;
|
||||||
|
|
||||||
rcc->short_term_qsum+= q;
|
rcc->short_term_qsum+= q;
|
||||||
rcc->short_term_qcount++;
|
rcc->short_term_qcount++;
|
||||||
@ -711,6 +713,7 @@ float ff_rate_estimate_qscale(MpegEncContext *s)
|
|||||||
static int init_pass2(MpegEncContext *s)
|
static int init_pass2(MpegEncContext *s)
|
||||||
{
|
{
|
||||||
RateControlContext *rcc= &s->rc_context;
|
RateControlContext *rcc= &s->rc_context;
|
||||||
|
AVCodecContext *a= s->avctx;
|
||||||
int i;
|
int i;
|
||||||
double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base;
|
double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base;
|
||||||
double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1
|
double complexity[5]={0,0,0,0,0}; // aproximate bits at quant=1
|
||||||
@ -722,7 +725,7 @@ static int init_pass2(MpegEncContext *s)
|
|||||||
double rate_factor=0;
|
double rate_factor=0;
|
||||||
double step;
|
double step;
|
||||||
//int last_i_frame=-10000000;
|
//int last_i_frame=-10000000;
|
||||||
const int filter_size= (int)(s->qblur*4) | 1;
|
const int filter_size= (int)(a->qblur*4) | 1;
|
||||||
double expected_bits;
|
double expected_bits;
|
||||||
double *qscale, *blured_qscale;
|
double *qscale, *blured_qscale;
|
||||||
|
|
||||||
@ -803,7 +806,7 @@ static int init_pass2(MpegEncContext *s)
|
|||||||
for(j=0; j<filter_size; j++){
|
for(j=0; j<filter_size; j++){
|
||||||
int index= i+j-filter_size/2;
|
int index= i+j-filter_size/2;
|
||||||
double d= index-i;
|
double d= index-i;
|
||||||
double coeff= s->qblur==0 ? 1.0 : exp(-d*d/(s->qblur * s->qblur));
|
double coeff= a->qblur==0 ? 1.0 : exp(-d*d/(a->qblur * a->qblur));
|
||||||
|
|
||||||
if(index < 0 || index >= rcc->num_entries) continue;
|
if(index < 0 || index >= rcc->num_entries) continue;
|
||||||
if(pict_type != rcc->entry[index].new_pict_type) continue;
|
if(pict_type != rcc->entry[index].new_pict_type) continue;
|
||||||
|
Loading…
Reference in New Issue
Block a user