From f1917274cc900a1d29879117a1bfbc74ac44f88a Mon Sep 17 00:00:00 2001 From: Kostya Shishkov Date: Fri, 4 Jan 2008 17:50:45 +0000 Subject: [PATCH] RV30 thirdpel motion compensation support Originally committed as revision 11398 to svn://svn.ffmpeg.org/ffmpeg/trunk --- libavcodec/rv30dsp.c | 249 +++++++++++++++++++++++++++++++++++++++++++ libavcodec/rv34.c | 34 +++--- 2 files changed, 267 insertions(+), 16 deletions(-) create mode 100644 libavcodec/rv30dsp.c diff --git a/libavcodec/rv30dsp.c b/libavcodec/rv30dsp.c new file mode 100644 index 0000000000..13b218b881 --- /dev/null +++ b/libavcodec/rv30dsp.c @@ -0,0 +1,249 @@ +/* + * RV30 decoder motion compensation functions + * Copyright (c) 2007 Konstantin Shishkov + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file rv30dsp.c + * RV30 decoder motion compensation functions + */ + +#include "avcodec.h" +#include "dsputil.h" + +#define RV30_LOWPASS(OPNAME, OP) \ +static av_unused void OPNAME ## rv30_tpel8_h_lowpass(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, const int C1, const int C2){\ + const int h=8;\ + uint8_t *cm = ff_cropTbl + MAX_NEG_CROP;\ + int i;\ + for(i=0; i>4]+1)>>1) +#define op_put(a, b) a = cm[((b) + 8)>>4] + +RV30_LOWPASS(put_ , op_put) +RV30_LOWPASS(avg_ , op_avg) +RV30_MC(put_, 8) +RV30_MC(put_, 16) +RV30_MC(avg_, 8) +RV30_MC(avg_, 16) + +void ff_rv30dsp_init(DSPContext* c, AVCodecContext *avctx) { + c->put_rv30_tpel_pixels_tab[0][ 0] = c->put_h264_qpel_pixels_tab[0][0]; + c->put_rv30_tpel_pixels_tab[0][ 1] = put_rv30_tpel16_mc10_c; + c->put_rv30_tpel_pixels_tab[0][ 2] = put_rv30_tpel16_mc20_c; + c->put_rv30_tpel_pixels_tab[0][ 4] = put_rv30_tpel16_mc01_c; + c->put_rv30_tpel_pixels_tab[0][ 5] = put_rv30_tpel16_mc11_c; + c->put_rv30_tpel_pixels_tab[0][ 6] = put_rv30_tpel16_mc21_c; + c->put_rv30_tpel_pixels_tab[0][ 8] = put_rv30_tpel16_mc02_c; + c->put_rv30_tpel_pixels_tab[0][ 9] = put_rv30_tpel16_mc12_c; + c->put_rv30_tpel_pixels_tab[0][10] = put_rv30_tpel16_mc22_c; + c->avg_rv30_tpel_pixels_tab[0][ 0] = c->avg_h264_qpel_pixels_tab[0][0]; + c->avg_rv30_tpel_pixels_tab[0][ 1] = avg_rv30_tpel16_mc10_c; + c->avg_rv30_tpel_pixels_tab[0][ 2] = avg_rv30_tpel16_mc20_c; + c->avg_rv30_tpel_pixels_tab[0][ 4] = avg_rv30_tpel16_mc01_c; + c->avg_rv30_tpel_pixels_tab[0][ 5] = avg_rv30_tpel16_mc11_c; + c->avg_rv30_tpel_pixels_tab[0][ 6] = avg_rv30_tpel16_mc21_c; + c->avg_rv30_tpel_pixels_tab[0][ 8] = avg_rv30_tpel16_mc02_c; + c->avg_rv30_tpel_pixels_tab[0][ 9] = avg_rv30_tpel16_mc12_c; + c->avg_rv30_tpel_pixels_tab[0][10] = avg_rv30_tpel16_mc22_c; + c->put_rv30_tpel_pixels_tab[1][ 0] = c->put_h264_qpel_pixels_tab[1][0]; + c->put_rv30_tpel_pixels_tab[1][ 1] = put_rv30_tpel8_mc10_c; + c->put_rv30_tpel_pixels_tab[1][ 2] = put_rv30_tpel8_mc20_c; + c->put_rv30_tpel_pixels_tab[1][ 4] = put_rv30_tpel8_mc01_c; + c->put_rv30_tpel_pixels_tab[1][ 5] = put_rv30_tpel8_mc11_c; + c->put_rv30_tpel_pixels_tab[1][ 6] = put_rv30_tpel8_mc21_c; + c->put_rv30_tpel_pixels_tab[1][ 8] = put_rv30_tpel8_mc02_c; + c->put_rv30_tpel_pixels_tab[1][ 9] = put_rv30_tpel8_mc12_c; + c->put_rv30_tpel_pixels_tab[1][10] = put_rv30_tpel8_mc22_c; + c->avg_rv30_tpel_pixels_tab[1][ 0] = c->avg_h264_qpel_pixels_tab[1][0]; + c->avg_rv30_tpel_pixels_tab[1][ 1] = avg_rv30_tpel8_mc10_c; + c->avg_rv30_tpel_pixels_tab[1][ 2] = avg_rv30_tpel8_mc20_c; + c->avg_rv30_tpel_pixels_tab[1][ 4] = avg_rv30_tpel8_mc01_c; + c->avg_rv30_tpel_pixels_tab[1][ 5] = avg_rv30_tpel8_mc11_c; + c->avg_rv30_tpel_pixels_tab[1][ 6] = avg_rv30_tpel8_mc21_c; + c->avg_rv30_tpel_pixels_tab[1][ 8] = avg_rv30_tpel8_mc02_c; + c->avg_rv30_tpel_pixels_tab[1][ 9] = avg_rv30_tpel8_mc12_c; + c->avg_rv30_tpel_pixels_tab[1][10] = avg_rv30_tpel8_mc22_c; +} diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c index fe7f4d6837..af1b840864 100644 --- a/libavcodec/rv34.c +++ b/libavcodec/rv34.c @@ -559,6 +559,8 @@ static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir) fill_rectangle(cur_pic->motion_val[!dir][mv_pos], 2, 2, s->b8_stride, 0, 4); } +static const int chroma_coeffs[3] = { 8, 5, 3 }; + /** * generic motion compensation function * @@ -584,21 +586,15 @@ static inline void rv34_mc(RV34DecContext *r, const int block_type, int is16x16 = 1; if(thirdpel){ -#if 0 /// todo int lx, ly; - mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] / 3; - my = s->current_picture_ptr->motion_val[dir][mv_pos][1] / 3; - lx = ((s->current_picture_ptr->motion_val[dir][mv_pos][0] % 3) + 3) % 3; - ly = ((s->current_picture_ptr->motion_val[dir][mv_pos][1] % 3) + 3) % 3; - dxy = ly*3 + lx; - uvmx = -#endif - mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; - my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; - dxy = ((my & 3) << 2) | (mx & 3); - uvmx = mx & 6; - uvmy = my & 6; + mx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) / 3 - (1 << 24); + my = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) / 3 - (1 << 24); + lx = (s->current_picture_ptr->motion_val[dir][mv_pos][0] + (3 << 24)) % 3; + ly = (s->current_picture_ptr->motion_val[dir][mv_pos][1] + (3 << 24)) % 3; + dxy = ly*4 + lx; + uvmx = chroma_coeffs[(3*(mx&1) + lx) >> 1]; + uvmy = chroma_coeffs[(3*(my&1) + ly) >> 1]; }else{ mx = s->current_picture_ptr->motion_val[dir][mv_pos][0] >> 2; my = s->current_picture_ptr->motion_val[dir][mv_pos][1] >> 2; @@ -655,15 +651,21 @@ static void rv34_mc_1mv(RV34DecContext *r, const int block_type, const int width, const int height, int dir) { rv34_mc(r, block_type, xoff, yoff, mv_off, width, height, dir, r->rv30, - r->s.dsp.put_h264_qpel_pixels_tab, r->s.dsp.put_h264_chroma_pixels_tab); + r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab + : r->s.dsp.put_h264_qpel_pixels_tab, + r->s.dsp.put_h264_chroma_pixels_tab); } static void rv34_mc_2mv(RV34DecContext *r, const int block_type) { rv34_mc(r, block_type, 0, 0, 0, 2, 2, 0, r->rv30, - r->s.dsp.put_h264_qpel_pixels_tab, r->s.dsp.put_h264_chroma_pixels_tab); + r->rv30 ? r->s.dsp.put_rv30_tpel_pixels_tab + : r->s.dsp.put_h264_qpel_pixels_tab, + r->s.dsp.put_h264_chroma_pixels_tab); rv34_mc(r, block_type, 0, 0, 0, 2, 2, 1, r->rv30, - r->s.dsp.avg_h264_qpel_pixels_tab, r->s.dsp.avg_h264_chroma_pixels_tab); + r->rv30 ? r->s.dsp.avg_rv30_tpel_pixels_tab + : r->s.dsp.avg_h264_qpel_pixels_tab, + r->s.dsp.avg_h264_chroma_pixels_tab); } /** number of motion vectors in each macroblock type */