You've already forked FFmpeg
							
							
				mirror of
				https://github.com/FFmpeg/FFmpeg.git
				synced 2025-10-30 23:18:11 +02:00 
			
		
		
		
	lavc: Convert some remaining strides to ptrdiff_t
This commit is contained in:
		
				
					committed by
					
						 Luca Barbato
						Luca Barbato
					
				
			
			
				
	
			
			
			
						parent
						
							4baba6c813
						
					
				
				
					commit
					93f305473f
				
			| @@ -210,7 +210,7 @@ typedef struct AVSContext { | ||||
|        6:    A3  X2  X3   */ | ||||
|     int pred_mode_Y[3*3]; | ||||
|     int *top_pred_Y; | ||||
|     int l_stride, c_stride; | ||||
|     ptrdiff_t l_stride, c_stride; | ||||
|     int luma_scan[4]; | ||||
|     int qp; | ||||
|     int qp_fixed; | ||||
|   | ||||
| @@ -875,7 +875,7 @@ static av_always_inline void mc_dir_part(H264Context *h, Picture *pic, | ||||
|     const int mx      = h->mv_cache[list][scan8[n]][0] + src_x_offset * 8; | ||||
|     int my            = h->mv_cache[list][scan8[n]][1] + src_y_offset * 8; | ||||
|     const int luma_xy = (mx & 3) + ((my & 3) << 2); | ||||
|     int offset        = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; | ||||
|     ptrdiff_t offset  = ((mx >> 2) << pixel_shift) + (my >> 2) * h->mb_linesize; | ||||
|     uint8_t *src_y    = pic->f.data[0] + offset; | ||||
|     uint8_t *src_cb, *src_cr; | ||||
|     int extra_width  = 0; | ||||
|   | ||||
| @@ -276,7 +276,7 @@ typedef struct H264Context { | ||||
|  | ||||
|     /* coded dimensions -- 16 * mb w/h */ | ||||
|     int width, height; | ||||
|     int linesize, uvlinesize; | ||||
|     ptrdiff_t linesize, uvlinesize; | ||||
|     int chroma_x_shift, chroma_y_shift; | ||||
|  | ||||
|     int qscale; | ||||
| @@ -349,8 +349,8 @@ typedef struct H264Context { | ||||
|     uint32_t *mb2br_xy; | ||||
|     int b_stride;       // FIXME use s->b4_stride | ||||
|  | ||||
|     int mb_linesize;    ///< may be equal to s->linesize or s->linesize * 2, for mbaff | ||||
|     int mb_uvlinesize; | ||||
|     ptrdiff_t mb_linesize;  ///< may be equal to s->linesize or s->linesize * 2, for mbaff | ||||
|     ptrdiff_t mb_uvlinesize; | ||||
|  | ||||
|     unsigned current_sps_id; ///< id of the current SPS | ||||
|     SPS sps; ///< current sps | ||||
|   | ||||
| @@ -275,8 +275,8 @@ typedef struct MpegEncContext { | ||||
|     int b4_stride;             ///< 4*mb_width+1 used for some 4x4 block arrays to allow simple addressing | ||||
|     int h_edge_pos, v_edge_pos;///< horizontal / vertical position of the right/bottom edge (pixel replication) | ||||
|     int mb_num;                ///< number of MBs of a picture | ||||
|     int linesize;              ///< line size, in bytes, may be different from width | ||||
|     int uvlinesize;            ///< line size, for chroma in bytes, may be different from width | ||||
|     ptrdiff_t linesize;        ///< line size, in bytes, may be different from width | ||||
|     ptrdiff_t uvlinesize;      ///< line size, for chroma in bytes, may be different from width | ||||
|     Picture *picture;          ///< main picture buffer | ||||
|     Picture **input_picture;   ///< next pictures on display order for encoding | ||||
|     Picture **reordered_input_picture; ///< pointer to the next pictures in codedorder for encoding | ||||
|   | ||||
| @@ -877,7 +877,7 @@ static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg) | ||||
|         if (pic_arg->linesize[2] != s->uvlinesize) | ||||
|             direct = 0; | ||||
|  | ||||
|         av_dlog(s->avctx, "%d %d %d %d\n", pic_arg->linesize[0], | ||||
|         av_dlog(s->avctx, "%d %d %td %td\n", pic_arg->linesize[0], | ||||
|                 pic_arg->linesize[1], s->linesize, s->uvlinesize); | ||||
|  | ||||
|         if (direct) { | ||||
| @@ -1641,7 +1641,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s, | ||||
|     int skip_dct[8]; | ||||
|     int dct_offset = s->linesize * 8; // default for progressive frames | ||||
|     uint8_t *ptr_y, *ptr_cb, *ptr_cr; | ||||
|     int wrap_y, wrap_c; | ||||
|     ptrdiff_t wrap_y, wrap_c; | ||||
|  | ||||
|     for (i = 0; i < mb_block_count; i++) | ||||
|         skip_dct[i] = s->skipdct; | ||||
|   | ||||
| @@ -37,8 +37,8 @@ static void gmc1_motion(MpegEncContext *s, | ||||
|                         uint8_t **ref_picture) | ||||
| { | ||||
|     uint8_t *ptr; | ||||
|     int offset, src_x, src_y, linesize, uvlinesize; | ||||
|     int motion_x, motion_y; | ||||
|     int src_x, src_y, motion_x, motion_y; | ||||
|     ptrdiff_t offset, linesize, uvlinesize; | ||||
|     int emu=0; | ||||
|  | ||||
|     motion_x= s->sprite_offset[0][0]; | ||||
| @@ -462,7 +462,8 @@ static inline void qpel_motion(MpegEncContext *s, | ||||
|                                int motion_x, int motion_y, int h) | ||||
| { | ||||
|     uint8_t *ptr_y, *ptr_cb, *ptr_cr; | ||||
|     int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos, linesize, uvlinesize; | ||||
|     int dxy, uvdxy, mx, my, src_x, src_y, uvsrc_x, uvsrc_y, v_edge_pos; | ||||
|     ptrdiff_t linesize, uvlinesize; | ||||
|  | ||||
|     dxy = ((motion_y & 3) << 2) | (motion_x & 3); | ||||
|     src_x = s->mb_x *  16                 + (motion_x >> 2); | ||||
| @@ -555,8 +556,9 @@ static void chroma_4mv_motion(MpegEncContext *s, | ||||
|                               op_pixels_func *pix_op, | ||||
|                               int mx, int my) | ||||
| { | ||||
|     int dxy, emu=0, src_x, src_y, offset; | ||||
|     uint8_t *ptr; | ||||
|     int src_x, src_y, dxy, emu = 0; | ||||
|     ptrdiff_t offset; | ||||
|  | ||||
|     /* In case of 8X8, we construct a single chroma motion vector | ||||
|        with a special rounding */ | ||||
|   | ||||
| @@ -1288,7 +1288,7 @@ static void apply_loop_filter(Vp3DecodeContext *s, int plane, int ystart, int ye | ||||
|     int width           = s->fragment_width[!!plane]; | ||||
|     int height          = s->fragment_height[!!plane]; | ||||
|     int fragment        = s->fragment_start        [plane] + ystart * width; | ||||
|     int stride          = s->current_frame.f->linesize[plane]; | ||||
|     ptrdiff_t stride    = s->current_frame.f->linesize[plane]; | ||||
|     uint8_t *plane_data = s->current_frame.f->data    [plane]; | ||||
|     if (!s->flipped_image) stride = -stride; | ||||
|     plane_data += s->data_offset[plane] + 8*ystart*stride; | ||||
| @@ -1470,7 +1470,7 @@ static void render_slice(Vp3DecodeContext *s, int slice) | ||||
|         uint8_t *output_plane = s->current_frame.f->data    [plane] + s->data_offset[plane]; | ||||
|         uint8_t *  last_plane = s->   last_frame.f->data    [plane] + s->data_offset[plane]; | ||||
|         uint8_t *golden_plane = s-> golden_frame.f->data    [plane] + s->data_offset[plane]; | ||||
|         int stride            = s->current_frame.f->linesize[plane]; | ||||
|         ptrdiff_t stride      = s->current_frame.f->linesize[plane]; | ||||
|         int plane_width       = s->width  >> (plane && s->chroma_x_shift); | ||||
|         int plane_height      = s->height >> (plane && s->chroma_y_shift); | ||||
|         int8_t (*motion_val)[2] = s->motion_val[!!plane]; | ||||
|   | ||||
| @@ -303,7 +303,7 @@ static void vp56_add_predictors_dc(VP56Context *s, VP56Frame ref_frame) | ||||
| } | ||||
|  | ||||
| static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, | ||||
|                                 int stride, int dx, int dy) | ||||
|                                 ptrdiff_t stride, int dx, int dy) | ||||
| { | ||||
|     int t = ff_vp56_filter_threshold[s->quantizer]; | ||||
|     if (dx)  s->vp56dsp.edge_filter_hor(yuv +         10-dx , stride, t); | ||||
| @@ -311,7 +311,7 @@ static void vp56_deblock_filter(VP56Context *s, uint8_t *yuv, | ||||
| } | ||||
|  | ||||
| static void vp56_mc(VP56Context *s, int b, int plane, uint8_t *src, | ||||
|                     int stride, int x, int y) | ||||
|                     ptrdiff_t stride, int x, int y) | ||||
| { | ||||
|     uint8_t *dst = s->frames[VP56_FRAME_CURRENT]->data[plane] + s->block_offset[b]; | ||||
|     uint8_t *src_block; | ||||
| @@ -509,7 +509,8 @@ int ff_vp56_decode_frame(AVCodecContext *avctx, void *data, int *got_frame, | ||||
|  | ||||
|     for (is_alpha=0; is_alpha < 1+s->has_alpha; is_alpha++) { | ||||
|         int mb_row, mb_col, mb_row_flip, mb_offset = 0; | ||||
|         int block, y, uv, stride_y, stride_uv; | ||||
|         int block, y, uv; | ||||
|         ptrdiff_t stride_y, stride_uv; | ||||
|         int golden_frame = 0; | ||||
|  | ||||
|         s->modelp = &s->models[is_alpha]; | ||||
|   | ||||
| @@ -1179,7 +1179,7 @@ static av_always_inline | ||||
| void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, | ||||
|                  ThreadFrame *ref, const VP56mv *mv, | ||||
|                  int x_off, int y_off, int block_w, int block_h, | ||||
|                  int width, int height, int linesize, | ||||
|                  int width, int height, ptrdiff_t linesize, | ||||
|                  vp8_mc_func mc_func[3][3]) | ||||
| { | ||||
|     uint8_t *src = ref->f->data[0]; | ||||
| @@ -1229,7 +1229,7 @@ void vp8_mc_luma(VP8Context *s, VP8ThreadData *td, uint8_t *dst, | ||||
| static av_always_inline | ||||
| void vp8_mc_chroma(VP8Context *s, VP8ThreadData *td, uint8_t *dst1, uint8_t *dst2, | ||||
|                    ThreadFrame *ref, const VP56mv *mv, int x_off, int y_off, | ||||
|                    int block_w, int block_h, int width, int height, int linesize, | ||||
|                    int block_w, int block_h, int width, int height, ptrdiff_t linesize, | ||||
|                    vp8_mc_func mc_func[3][3]) | ||||
| { | ||||
|     uint8_t *src1 = ref->f->data[1], *src2 = ref->f->data[2]; | ||||
|   | ||||
| @@ -94,7 +94,8 @@ void ff_mspel_motion(MpegEncContext *s, | ||||
| { | ||||
|     Wmv2Context * const w= (Wmv2Context*)s; | ||||
|     uint8_t *ptr; | ||||
|     int dxy, offset, mx, my, src_x, src_y, v_edge_pos, linesize, uvlinesize; | ||||
|     int dxy, offset, mx, my, src_x, src_y, v_edge_pos; | ||||
|     ptrdiff_t linesize, uvlinesize; | ||||
|     int emu=0; | ||||
|  | ||||
|     dxy = ((motion_y & 1) << 1) | (motion_x & 1); | ||||
|   | ||||
		Reference in New Issue
	
	Block a user