mirror of
https://github.com/FFmpeg/FFmpeg.git
synced 2024-12-23 12:43:46 +02:00
dc_val should be signed
Originally committed as revision 6364 to svn://svn.ffmpeg.org/ffmpeg/trunk
This commit is contained in:
parent
bca99b47bf
commit
b86216de4c
@ -70,7 +70,7 @@ static void put_dc(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb, uint8_t
|
||||
}
|
||||
}
|
||||
|
||||
static void filter181(uint16_t *data, int width, int height, int stride){
|
||||
static void filter181(int16_t *data, int width, int height, int stride){
|
||||
int x,y;
|
||||
|
||||
/* horizontal filter */
|
||||
@ -111,7 +111,7 @@ static void filter181(uint16_t *data, int width, int height, int stride){
|
||||
* @param w width in 8 pixel blocks
|
||||
* @param h height in 8 pixel blocks
|
||||
*/
|
||||
static void guess_dc(MpegEncContext *s, uint16_t *dc, int w, int h, int stride, int is_luma){
|
||||
static void guess_dc(MpegEncContext *s, int16_t *dc, int w, int h, int stride, int is_luma){
|
||||
int b_x, b_y;
|
||||
|
||||
for(b_y=0; b_y<h; b_y++){
|
||||
@ -931,7 +931,7 @@ void ff_er_frame_end(MpegEncContext *s){
|
||||
for(mb_y=0; mb_y<s->mb_height; mb_y++){
|
||||
for(mb_x=0; mb_x<s->mb_width; mb_x++){
|
||||
int dc, dcu, dcv, y, n;
|
||||
uint16_t *dc_ptr;
|
||||
int16_t *dc_ptr;
|
||||
uint8_t *dest_y, *dest_cb, *dest_cr;
|
||||
const int mb_xy= mb_x + mb_y * s->mb_stride;
|
||||
const int mb_type= s->current_picture.mb_type[mb_xy];
|
||||
|
@ -72,7 +72,7 @@ static inline int mpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr);
|
||||
static inline int mpeg4_decode_block(MpegEncContext * s, DCTELEM * block,
|
||||
int n, int coded, int intra, int rvlc);
|
||||
#ifdef CONFIG_ENCODERS
|
||||
static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr);
|
||||
static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr);
|
||||
static void mpeg4_encode_visual_object_header(MpegEncContext * s);
|
||||
static void mpeg4_encode_vol_header(MpegEncContext * s, int vo_number, int vol_number);
|
||||
#endif //CONFIG_ENCODERS
|
||||
@ -1231,7 +1231,7 @@ void h263_encode_mb(MpegEncContext * s,
|
||||
int cbpc, cbpy, i, cbp, pred_x, pred_y;
|
||||
int16_t pred_dc;
|
||||
int16_t rec_intradc[6];
|
||||
uint16_t *dc_ptr[6];
|
||||
int16_t *dc_ptr[6];
|
||||
const int interleaved_stats= (s->flags&CODEC_FLAG_PASS1);
|
||||
const int dquant_code[5]= {1,0,9,2,3};
|
||||
|
||||
@ -1516,10 +1516,10 @@ void ff_h263_loop_filter(MpegEncContext * s){
|
||||
}
|
||||
|
||||
#ifdef CONFIG_ENCODERS
|
||||
static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
|
||||
static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
|
||||
/* find prediction */
|
||||
if (n < 4) {
|
||||
@ -1565,8 +1565,7 @@ static int h263_pred_dc(MpegEncContext * s, int n, uint16_t **dc_val_ptr)
|
||||
static void h263_pred_acdc(MpegEncContext * s, DCTELEM *block, int n)
|
||||
{
|
||||
int x, y, wrap, a, c, pred_dc, scale, i;
|
||||
uint16_t *dc_val;
|
||||
int16_t *ac_val, *ac_val1;
|
||||
int16_t *dc_val, *ac_val, *ac_val1;
|
||||
|
||||
/* find prediction */
|
||||
if (n < 4) {
|
||||
@ -2530,7 +2529,7 @@ void ff_set_qscale(MpegEncContext * s, int qscale)
|
||||
static inline int ff_mpeg4_pred_dc(MpegEncContext * s, int n, int level, int *dir_ptr, int encoding)
|
||||
{
|
||||
int a, b, c, wrap, pred, scale, ret;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
|
||||
/* find prediction */
|
||||
if (n < 4) {
|
||||
|
@ -344,8 +344,8 @@ typedef struct MpegEncContext {
|
||||
Picture *current_picture_ptr; ///< pointer to the current picture
|
||||
uint8_t *visualization_buffer[3]; //< temporary buffer vor MV visualization
|
||||
int last_dc[3]; ///< last DC values for MPEG1
|
||||
uint16_t *dc_val_base;
|
||||
uint16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
|
||||
int16_t *dc_val_base;
|
||||
int16_t *dc_val[3]; ///< used for mpeg4 DC prediction, all 3 arrays must be continuous
|
||||
int16_t dc_cache[4*5];
|
||||
int y_dc_scale, c_dc_scale;
|
||||
const uint8_t *y_dc_scale_table; ///< qscale -> y_dc_scale table
|
||||
|
@ -627,10 +627,10 @@ static int get_dc(uint8_t *src, int stride, int scale)
|
||||
|
||||
/* dir = 0: left, dir = 1: top prediction */
|
||||
static inline int msmpeg4_pred_dc(MpegEncContext * s, int n,
|
||||
uint16_t **dc_val_ptr, int *dir_ptr)
|
||||
int16_t **dc_val_ptr, int *dir_ptr)
|
||||
{
|
||||
int a, b, c, wrap, pred, scale;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
|
||||
/* find prediction */
|
||||
if (n < 4) {
|
||||
@ -787,7 +787,7 @@ static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr
|
||||
/* update predictor */
|
||||
*dc_val= level;
|
||||
}else{
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
|
||||
|
||||
/* update predictor */
|
||||
@ -1875,7 +1875,7 @@ static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
|
||||
/* update predictor */
|
||||
*dc_val= level;
|
||||
}else{
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
pred = msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
|
||||
level += pred;
|
||||
|
||||
|
@ -2341,10 +2341,10 @@ static inline void vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2], int
|
||||
* @param dir_ptr Prediction direction for use in AC prediction
|
||||
*/
|
||||
static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
uint16_t **dc_val_ptr, int *dir_ptr)
|
||||
int16_t **dc_val_ptr, int *dir_ptr)
|
||||
{
|
||||
int a, b, c, wrap, pred, scale;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
static const uint16_t dcpred[32] = {
|
||||
-1, 1024, 512, 341, 256, 205, 171, 146, 128,
|
||||
114, 102, 93, 85, 79, 73, 68, 64,
|
||||
@ -2402,10 +2402,10 @@ static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
*/
|
||||
static inline int vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
|
||||
int a_avail, int c_avail,
|
||||
uint16_t **dc_val_ptr, int *dir_ptr)
|
||||
int16_t **dc_val_ptr, int *dir_ptr)
|
||||
{
|
||||
int a, b, c, wrap, pred, scale;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
||||
int q1, q2 = 0;
|
||||
|
||||
@ -2578,7 +2578,7 @@ static int vc1_decode_i_block(VC1Context *v, DCTELEM block[64], int n, int coded
|
||||
MpegEncContext *s = &v->s;
|
||||
int dc_pred_dir = 0; /* Direction of the DC prediction used */
|
||||
int run_diff, i;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
int16_t *ac_val, *ac_val2;
|
||||
int dcdiff;
|
||||
|
||||
@ -2743,7 +2743,7 @@ static int vc1_decode_i_block_adv(VC1Context *v, DCTELEM block[64], int n, int c
|
||||
MpegEncContext *s = &v->s;
|
||||
int dc_pred_dir = 0; /* Direction of the DC prediction used */
|
||||
int run_diff, i;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
int16_t *ac_val, *ac_val2;
|
||||
int dcdiff;
|
||||
int a_avail = v->a_avail, c_avail = v->c_avail;
|
||||
@ -2940,7 +2940,7 @@ static int vc1_decode_intra_block(VC1Context *v, DCTELEM block[64], int n, int c
|
||||
MpegEncContext *s = &v->s;
|
||||
int dc_pred_dir = 0; /* Direction of the DC prediction used */
|
||||
int run_diff, i;
|
||||
uint16_t *dc_val;
|
||||
int16_t *dc_val;
|
||||
int16_t *ac_val, *ac_val2;
|
||||
int dcdiff;
|
||||
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
|
||||
|
Loading…
Reference in New Issue
Block a user