1
0
mirror of https://github.com/FFmpeg/FFmpeg.git synced 2024-12-23 12:43:46 +02:00

mpegvideo: Use hpeldsp instead of dsputil for half-pel functions

This also converts vc1, since that is mpegvideo-based.

Signed-off-by: Martin Storsjö <martin@martin.st>
This commit is contained in:
Ronald S. Bultje 2013-03-10 13:55:07 -07:00 committed by Martin Storsjö
parent 6caa44aa7d
commit f4fed5a2f9
7 changed files with 41 additions and 37 deletions

2
configure vendored
View File

@ -1511,7 +1511,7 @@ mdct_select="fft"
rdft_select="fft" rdft_select="fft"
mpegaudio_select="mpegaudiodsp" mpegaudio_select="mpegaudiodsp"
mpegaudiodsp_select="dct" mpegaudiodsp_select="dct"
mpegvideo_select="dsputil videodsp" mpegvideo_select="dsputil hpeldsp videodsp"
mpegvideoenc_select="mpegvideo" mpegvideoenc_select="mpegvideo"
# decoders / encoders # decoders / encoders

View File

@ -339,9 +339,11 @@ int ff_init_me(MpegEncContext *s){
else else
c->sub_motion_search= hpel_motion_search; c->sub_motion_search= hpel_motion_search;
} }
c->hpel_avg= s->dsp.avg_pixels_tab; c->hpel_avg = s->hdsp.avg_pixels_tab;
if(s->no_rounding) c->hpel_put= s->dsp.put_no_rnd_pixels_tab; if (s->no_rounding)
else c->hpel_put= s->dsp.put_pixels_tab; c->hpel_put = s->hdsp.put_no_rnd_pixels_tab;
else
c->hpel_put = s->hdsp.put_pixels_tab;
if(s->linesize){ if(s->linesize){
c->stride = s->linesize; c->stride = s->linesize;
@ -625,9 +627,9 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
dxy = ((my4 & 1) << 1) | (mx4 & 1); dxy = ((my4 & 1) << 1) | (mx4 & 1);
if(s->no_rounding) if(s->no_rounding)
s->dsp.put_no_rnd_pixels_tab[1][dxy](dest_y , ref , stride, h); s->hdsp.put_no_rnd_pixels_tab[1][dxy](dest_y , ref , stride, h);
else else
s->dsp.put_pixels_tab [1][dxy](dest_y , ref , stride, h); s->hdsp.put_pixels_tab [1][dxy](dest_y , ref , stride, h);
} }
dmin_sum+= (mv_penalty[mx4-pred_x4] + mv_penalty[my4-pred_y4])*c->mb_penalty_factor; dmin_sum+= (mv_penalty[mx4-pred_x4] + mv_penalty[my4-pred_y4])*c->mb_penalty_factor;
}else }else
@ -666,11 +668,11 @@ static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize; offset= (s->mb_x*8 + (mx>>1)) + (s->mb_y*8 + (my>>1))*s->uvlinesize;
if(s->no_rounding){ if(s->no_rounding){
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
s->dsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); s->hdsp.put_no_rnd_pixels_tab[1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
}else{ }else{
s->dsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8); s->hdsp.put_pixels_tab [1][dxy](c->scratchpad , s->last_picture.f.data[1] + offset, s->uvlinesize, 8);
s->dsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8); s->hdsp.put_pixels_tab [1][dxy](c->scratchpad + 8, s->last_picture.f.data[2] + offset, s->uvlinesize, 8);
} }
dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8); dmin_sum += s->dsp.mb_cmp[1](s, s->new_picture.f.data[1] + s->mb_x*8 + s->mb_y*8*s->uvlinesize, c->scratchpad , s->uvlinesize, 8);
@ -778,9 +780,9 @@ static int interlaced_search(MpegEncContext *s, int ref_index,
dxy = ((my_i & 1) << 1) | (mx_i & 1); dxy = ((my_i & 1) << 1) | (mx_i & 1);
if(s->no_rounding){ if(s->no_rounding){
s->dsp.put_no_rnd_pixels_tab[size][dxy](c->scratchpad, ref , stride, h); s->hdsp.put_no_rnd_pixels_tab[size][dxy](c->scratchpad, ref , stride, h);
}else{ }else{
s->dsp.put_pixels_tab [size][dxy](c->scratchpad, ref , stride, h); s->hdsp.put_pixels_tab [size][dxy](c->scratchpad, ref , stride, h);
} }
dmin= s->dsp.mb_cmp[size](s, c->src[block][0], c->scratchpad, stride, h); dmin= s->dsp.mb_cmp[size](s, c->src[block][0], c->scratchpad, stride, h);
dmin+= (mv_penalty[mx_i-c->pred_x] + mv_penalty[my_i-c->pred_y] + 1)*c->mb_penalty_factor; dmin+= (mv_penalty[mx_i-c->pred_x] + mv_penalty[my_i-c->pred_y] + 1)*c->mb_penalty_factor;
@ -1217,14 +1219,14 @@ static inline int check_bidir_mv(MpegEncContext * s,
src_y = motion_fy >> 1; src_y = motion_fy >> 1;
ptr = ref_data[0] + (src_y * stride) + src_x; ptr = ref_data[0] + (src_y * stride) + src_x;
s->dsp.put_pixels_tab[size][dxy](dest_y , ptr , stride, h); s->hdsp.put_pixels_tab[size][dxy](dest_y , ptr , stride, h);
dxy = ((motion_by & 1) << 1) | (motion_bx & 1); dxy = ((motion_by & 1) << 1) | (motion_bx & 1);
src_x = motion_bx >> 1; src_x = motion_bx >> 1;
src_y = motion_by >> 1; src_y = motion_by >> 1;
ptr = ref2_data[0] + (src_y * stride) + src_x; ptr = ref2_data[0] + (src_y * stride) + src_x;
s->dsp.avg_pixels_tab[size][dxy](dest_y , ptr , stride, h); s->hdsp.avg_pixels_tab[size][dxy](dest_y , ptr , stride, h);
} }
fbmin = (mv_penalty_f[motion_fx-pred_fx] + mv_penalty_f[motion_fy-pred_fy])*c->mb_penalty_factor fbmin = (mv_penalty_f[motion_fx-pred_fx] + mv_penalty_f[motion_fy-pred_fy])*c->mb_penalty_factor

View File

@ -162,6 +162,7 @@ static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
av_cold int ff_dct_common_init(MpegEncContext *s) av_cold int ff_dct_common_init(MpegEncContext *s)
{ {
ff_dsputil_init(&s->dsp, s->avctx); ff_dsputil_init(&s->dsp, s->avctx);
ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample); ff_videodsp_init(&s->vdsp, s->avctx->bits_per_raw_sample);
s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c; s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_c;
@ -2056,13 +2057,13 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
op_qpix= s->me.qpel_put; op_qpix= s->me.qpel_put;
if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){ if ((!s->no_rounding) || s->pict_type==AV_PICTURE_TYPE_B){
op_pix = s->dsp.put_pixels_tab; op_pix = s->hdsp.put_pixels_tab;
}else{ }else{
op_pix = s->dsp.put_no_rnd_pixels_tab; op_pix = s->hdsp.put_no_rnd_pixels_tab;
} }
if (s->mv_dir & MV_DIR_FORWARD) { if (s->mv_dir & MV_DIR_FORWARD) {
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix); ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f.data, op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab; op_pix = s->hdsp.avg_pixels_tab;
op_qpix= s->me.qpel_avg; op_qpix= s->me.qpel_avg;
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
@ -2182,9 +2183,9 @@ void MPV_decode_mb_internal(MpegEncContext *s, int16_t block[12][64],
} }
skip_idct: skip_idct:
if(!readable){ if(!readable){
s->dsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16); s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y , linesize,16);
s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift); s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
s->dsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift); s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
} }
} }
} }

View File

@ -32,6 +32,7 @@
#include "dsputil.h" #include "dsputil.h"
#include "error_resilience.h" #include "error_resilience.h"
#include "get_bits.h" #include "get_bits.h"
#include "hpeldsp.h"
#include "put_bits.h" #include "put_bits.h"
#include "ratecontrol.h" #include "ratecontrol.h"
#include "parser.h" #include "parser.h"
@ -380,6 +381,7 @@ typedef struct MpegEncContext {
int h263_long_vectors; ///< use horrible h263v1 long vector mode int h263_long_vectors; ///< use horrible h263v1 long vector mode
DSPContext dsp; ///< pointers for accelerated dsp functions DSPContext dsp; ///< pointers for accelerated dsp functions
HpelDSPContext hdsp;
VideoDSPContext vdsp; VideoDSPContext vdsp;
int f_code; ///< forward MV resolution int f_code; ///< forward MV resolution
int b_code; ///< backward MV resolution for B Frames (mpeg4) int b_code; ///< backward MV resolution for B Frames (mpeg4)

View File

@ -1771,10 +1771,10 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
dest_cr = s->dest[2]; dest_cr = s->dest[2];
if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) { if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
op_pix = s->dsp.put_pixels_tab; op_pix = s->hdsp.put_pixels_tab;
op_qpix = s->dsp.put_qpel_pixels_tab; op_qpix = s->dsp.put_qpel_pixels_tab;
} else { } else {
op_pix = s->dsp.put_no_rnd_pixels_tab; op_pix = s->hdsp.put_no_rnd_pixels_tab;
op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab; op_qpix = s->dsp.put_no_rnd_qpel_pixels_tab;
} }
@ -1782,7 +1782,7 @@ static av_always_inline void encode_mb_internal(MpegEncContext *s,
ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0, ff_MPV_motion(s, dest_y, dest_cb, dest_cr, 0,
s->last_picture.f.data, s->last_picture.f.data,
op_pix, op_qpix); op_pix, op_qpix);
op_pix = s->dsp.avg_pixels_tab; op_pix = s->hdsp.avg_pixels_tab;
op_qpix = s->dsp.avg_qpel_pixels_tab; op_qpix = s->dsp.avg_qpel_pixels_tab;
} }
if (s->mv_dir & MV_DIR_BACKWARD) { if (s->mv_dir & MV_DIR_BACKWARD) {
@ -2759,9 +2759,9 @@ static int encode_thread(AVCodecContext *c, void *arg){
ff_h263_update_motion_val(s); ff_h263_update_motion_val(s);
if(next_block==0){ //FIXME 16 vs linesize16 if(next_block==0){ //FIXME 16 vs linesize16
s->dsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16); s->hdsp.put_pixels_tab[0][0](s->dest[0], s->rd_scratchpad , s->linesize ,16);
s->dsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8); s->hdsp.put_pixels_tab[1][0](s->dest[1], s->rd_scratchpad + 16*s->linesize , s->uvlinesize, 8);
s->dsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8); s->hdsp.put_pixels_tab[1][0](s->dest[2], s->rd_scratchpad + 16*s->linesize + 8, s->uvlinesize, 8);
} }
if(s->avctx->mb_decision == FF_MB_DECISION_BITS) if(s->avctx->mb_decision == FF_MB_DECISION_BITS)

View File

@ -75,9 +75,9 @@ static void gmc1_motion(MpegEncContext *s,
dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2); dxy= ((motion_x>>3)&1) | ((motion_y>>2)&2);
if (s->no_rounding){ if (s->no_rounding){
s->dsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16); s->hdsp.put_no_rnd_pixels_tab[0][dxy](dest_y, ptr, linesize, 16);
}else{ }else{
s->dsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16); s->hdsp.put_pixels_tab [0][dxy](dest_y, ptr, linesize, 16);
} }
} }
@ -853,7 +853,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
s->mv[dir][2*i + j][0], s->mv[dir][2*i + j][0],
s->mv[dir][2*i + j][1], 8, mb_y); s->mv[dir][2*i + j][1], 8, mb_y);
} }
pix_op = s->dsp.avg_pixels_tab; pix_op = s->hdsp.avg_pixels_tab;
} }
}else{ }else{
for(i=0; i<2; i++){ for(i=0; i<2; i++){
@ -863,7 +863,7 @@ static av_always_inline void MPV_motion_internal(MpegEncContext *s,
s->mv[dir][2*i][0],s->mv[dir][2*i][1],16, mb_y>>1); s->mv[dir][2*i][0],s->mv[dir][2*i][1],16, mb_y>>1);
// after put we make avg of the same block // after put we make avg of the same block
pix_op=s->dsp.avg_pixels_tab; pix_op=s->hdsp.avg_pixels_tab;
//opposite parity is always in the same frame if this is second field //opposite parity is always in the same frame if this is second field
if(!s->first_field){ if(!s->first_field){

View File

@ -511,9 +511,9 @@ static void vc1_mc_1mv(VC1Context *v, int dir)
} else { // hpel mc - always used for luma } else { // hpel mc - always used for luma
dxy = (my & 2) | ((mx & 2) >> 1); dxy = (my & 2) | ((mx & 2) >> 1);
if (!v->rnd) if (!v->rnd)
dsp->put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); s->hdsp.put_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
else else
dsp->put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); s->hdsp.put_no_rnd_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
} }
if (s->flags & CODEC_FLAG_GRAY) return; if (s->flags & CODEC_FLAG_GRAY) return;
@ -545,7 +545,6 @@ static inline int median4(int a, int b, int c, int d)
static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir) static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
{ {
MpegEncContext *s = &v->s; MpegEncContext *s = &v->s;
DSPContext *dsp = &v->s.dsp;
uint8_t *srcY; uint8_t *srcY;
int dxy, mx, my, src_x, src_y; int dxy, mx, my, src_x, src_y;
int off; int off;
@ -707,9 +706,9 @@ static void vc1_mc_4mv_luma(VC1Context *v, int n, int dir)
} else { // hpel mc - always used for luma } else { // hpel mc - always used for luma
dxy = (my & 2) | ((mx & 2) >> 1); dxy = (my & 2) | ((mx & 2) >> 1);
if (!v->rnd) if (!v->rnd)
dsp->put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); s->hdsp.put_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
else else
dsp->put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8); s->hdsp.put_no_rnd_pixels_tab[1][dxy](s->dest[0] + off, srcY, s->linesize, 8);
} }
} }
@ -1950,9 +1949,9 @@ static void vc1_interp_mc(VC1Context *v)
dxy = (my & 2) | ((mx & 2) >> 1); dxy = (my & 2) | ((mx & 2) >> 1);
if (!v->rnd) if (!v->rnd)
dsp->avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16); s->hdsp.avg_pixels_tab[0][dxy](s->dest[0] + off, srcY, s->linesize, 16);
else else
dsp->avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16); s->hdsp.avg_no_rnd_pixels_tab[dxy](s->dest[0] + off, srcY, s->linesize, 16);
} }
if (s->flags & CODEC_FLAG_GRAY) return; if (s->flags & CODEC_FLAG_GRAY) return;