• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/vc1dsp_altivec.c

Go to the documentation of this file.
00001 /*
00002  * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
00003  * Copyright (c) 2006 Konstantin Shishkov
00004  *
00005  * This file is part of FFmpeg.
00006  *
00007  * FFmpeg is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU Lesser General Public
00009  * License as published by the Free Software Foundation; either
00010  * version 2.1 of the License, or (at your option) any later version.
00011  *
00012  * FFmpeg is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Lesser General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Lesser General Public
00018  * License along with FFmpeg; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020  */
00021 
00022 #include "libavcodec/dsputil.h"
00023 
00024 #include "util_altivec.h"
00025 #include "dsputil_altivec.h"
00026 
00027 // main steps of 8x8 transform
00028 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
00029 do { \
00030     t0 = vec_sl(vec_add(s0, s4), vec_2); \
00031     t0 = vec_add(vec_sl(t0, vec_1), t0); \
00032     t0 = vec_add(t0, vec_rnd); \
00033     t1 = vec_sl(vec_sub(s0, s4), vec_2); \
00034     t1 = vec_add(vec_sl(t1, vec_1), t1); \
00035     t1 = vec_add(t1, vec_rnd); \
00036     t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
00037     t2 = vec_add(t2, vec_sl(s2, vec_4)); \
00038     t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
00039     t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
00040     t4 = vec_add(t0, t2); \
00041     t5 = vec_add(t1, t3); \
00042     t6 = vec_sub(t1, t3); \
00043     t7 = vec_sub(t0, t2); \
00044 \
00045     t0 = vec_sl(vec_add(s1, s3), vec_4); \
00046     t0 = vec_add(t0, vec_sl(s5, vec_3)); \
00047     t0 = vec_add(t0, vec_sl(s7, vec_2)); \
00048     t0 = vec_add(t0, vec_sub(s5, s3)); \
00049 \
00050     t1 = vec_sl(vec_sub(s1, s5), vec_4); \
00051     t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
00052     t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
00053     t1 = vec_sub(t1, vec_add(s1, s7)); \
00054 \
00055     t2 = vec_sl(vec_sub(s7, s3), vec_4); \
00056     t2 = vec_add(t2, vec_sl(s1, vec_3)); \
00057     t2 = vec_add(t2, vec_sl(s5, vec_2)); \
00058     t2 = vec_add(t2, vec_sub(s1, s7)); \
00059 \
00060     t3 = vec_sl(vec_sub(s5, s7), vec_4); \
00061     t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
00062     t3 = vec_add(t3, vec_sl(s1, vec_2)); \
00063     t3 = vec_sub(t3, vec_add(s3, s5)); \
00064 \
00065     s0 = vec_add(t4, t0); \
00066     s1 = vec_add(t5, t1); \
00067     s2 = vec_add(t6, t2); \
00068     s3 = vec_add(t7, t3); \
00069     s4 = vec_sub(t7, t3); \
00070     s5 = vec_sub(t6, t2); \
00071     s6 = vec_sub(t5, t1); \
00072     s7 = vec_sub(t4, t0); \
00073 }while(0)
00074 
00075 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
00076 do { \
00077     s0 = vec_sra(s0, vec_3); \
00078     s1 = vec_sra(s1, vec_3); \
00079     s2 = vec_sra(s2, vec_3); \
00080     s3 = vec_sra(s3, vec_3); \
00081     s4 = vec_sra(s4, vec_3); \
00082     s5 = vec_sra(s5, vec_3); \
00083     s6 = vec_sra(s6, vec_3); \
00084     s7 = vec_sra(s7, vec_3); \
00085 }while(0)
00086 
00087 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
00088 do { \
00089     s0 = vec_sra(s0, vec_7); \
00090     s1 = vec_sra(s1, vec_7); \
00091     s2 = vec_sra(s2, vec_7); \
00092     s3 = vec_sra(s3, vec_7); \
00093     s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
00094     s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
00095     s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
00096     s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
00097 }while(0)
00098 
00099 /* main steps of 4x4 transform */
00100 #define STEP4(s0, s1, s2, s3, vec_rnd) \
00101 do { \
00102     t1 = vec_add(vec_sl(s0, vec_4), s0); \
00103     t1 = vec_add(t1, vec_rnd); \
00104     t2 = vec_add(vec_sl(s2, vec_4), s2); \
00105     t0 = vec_add(t1, t2); \
00106     t1 = vec_sub(t1, t2); \
00107     t3 = vec_sl(vec_sub(s3, s1), vec_1); \
00108     t3 = vec_add(t3, vec_sl(t3, vec_2)); \
00109     t2 = vec_add(t3, vec_sl(s1, vec_5)); \
00110     t3 = vec_add(t3, vec_sl(s3, vec_3)); \
00111     t3 = vec_add(t3, vec_sl(s3, vec_2)); \
00112     s0 = vec_add(t0, t2); \
00113     s1 = vec_sub(t1, t3); \
00114     s2 = vec_add(t1, t3); \
00115     s3 = vec_sub(t0, t2); \
00116 }while (0)
00117 
00118 #define SHIFT_HOR4(s0, s1, s2, s3) \
00119     s0 = vec_sra(s0, vec_3); \
00120     s1 = vec_sra(s1, vec_3); \
00121     s2 = vec_sra(s2, vec_3); \
00122     s3 = vec_sra(s3, vec_3);
00123 
00124 #define SHIFT_VERT4(s0, s1, s2, s3) \
00125     s0 = vec_sra(s0, vec_7); \
00126     s1 = vec_sra(s1, vec_7); \
00127     s2 = vec_sra(s2, vec_7); \
00128     s3 = vec_sra(s3, vec_7);
00129 
00132 static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
00133 {
00134     vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
00135     vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
00136     vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
00137     vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
00138     const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
00139     const vector unsigned int vec_7 = vec_splat_u32(7);
00140     const vector unsigned int vec_4 = vec_splat_u32(4);
00141     const vector  signed int vec_4s = vec_splat_s32(4);
00142     const vector unsigned int vec_3 = vec_splat_u32(3);
00143     const vector unsigned int vec_2 = vec_splat_u32(2);
00144     const vector  signed int vec_1s = vec_splat_s32(1);
00145     const vector unsigned int vec_1 = vec_splat_u32(1);
00146 
00147 
00148     src0 = vec_ld(  0, block);
00149     src1 = vec_ld( 16, block);
00150     src2 = vec_ld( 32, block);
00151     src3 = vec_ld( 48, block);
00152     src4 = vec_ld( 64, block);
00153     src5 = vec_ld( 80, block);
00154     src6 = vec_ld( 96, block);
00155     src7 = vec_ld(112, block);
00156 
00157     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00158     s0 = vec_unpackl(src0);
00159     s1 = vec_unpackl(src1);
00160     s2 = vec_unpackl(src2);
00161     s3 = vec_unpackl(src3);
00162     s4 = vec_unpackl(src4);
00163     s5 = vec_unpackl(src5);
00164     s6 = vec_unpackl(src6);
00165     s7 = vec_unpackl(src7);
00166     s8 = vec_unpackh(src0);
00167     s9 = vec_unpackh(src1);
00168     sA = vec_unpackh(src2);
00169     sB = vec_unpackh(src3);
00170     sC = vec_unpackh(src4);
00171     sD = vec_unpackh(src5);
00172     sE = vec_unpackh(src6);
00173     sF = vec_unpackh(src7);
00174     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
00175     SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
00176     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
00177     SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
00178     src0 = vec_pack(s8, s0);
00179     src1 = vec_pack(s9, s1);
00180     src2 = vec_pack(sA, s2);
00181     src3 = vec_pack(sB, s3);
00182     src4 = vec_pack(sC, s4);
00183     src5 = vec_pack(sD, s5);
00184     src6 = vec_pack(sE, s6);
00185     src7 = vec_pack(sF, s7);
00186     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00187 
00188     s0 = vec_unpackl(src0);
00189     s1 = vec_unpackl(src1);
00190     s2 = vec_unpackl(src2);
00191     s3 = vec_unpackl(src3);
00192     s4 = vec_unpackl(src4);
00193     s5 = vec_unpackl(src5);
00194     s6 = vec_unpackl(src6);
00195     s7 = vec_unpackl(src7);
00196     s8 = vec_unpackh(src0);
00197     s9 = vec_unpackh(src1);
00198     sA = vec_unpackh(src2);
00199     sB = vec_unpackh(src3);
00200     sC = vec_unpackh(src4);
00201     sD = vec_unpackh(src5);
00202     sE = vec_unpackh(src6);
00203     sF = vec_unpackh(src7);
00204     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
00205     SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
00206     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
00207     SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
00208     src0 = vec_pack(s8, s0);
00209     src1 = vec_pack(s9, s1);
00210     src2 = vec_pack(sA, s2);
00211     src3 = vec_pack(sB, s3);
00212     src4 = vec_pack(sC, s4);
00213     src5 = vec_pack(sD, s5);
00214     src6 = vec_pack(sE, s6);
00215     src7 = vec_pack(sF, s7);
00216 
00217     vec_st(src0,  0, block);
00218     vec_st(src1, 16, block);
00219     vec_st(src2, 32, block);
00220     vec_st(src3, 48, block);
00221     vec_st(src4, 64, block);
00222     vec_st(src5, 80, block);
00223     vec_st(src6, 96, block);
00224     vec_st(src7,112, block);
00225 }
00226 
00229 static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
00230 {
00231     vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
00232     vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
00233     vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
00234     vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
00235     const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
00236     const vector unsigned int vec_7 = vec_splat_u32(7);
00237     const vector unsigned int vec_5 = vec_splat_u32(5);
00238     const vector unsigned int vec_4 = vec_splat_u32(4);
00239     const vector  signed int vec_4s = vec_splat_s32(4);
00240     const vector unsigned int vec_3 = vec_splat_u32(3);
00241     const vector unsigned int vec_2 = vec_splat_u32(2);
00242     const vector unsigned int vec_1 = vec_splat_u32(1);
00243     vector unsigned char tmp;
00244     vector signed short tmp2, tmp3;
00245     vector unsigned char perm0, perm1, p0, p1, p;
00246 
00247     src0 = vec_ld(  0, block);
00248     src1 = vec_ld( 16, block);
00249     src2 = vec_ld( 32, block);
00250     src3 = vec_ld( 48, block);
00251     src4 = vec_ld( 64, block);
00252     src5 = vec_ld( 80, block);
00253     src6 = vec_ld( 96, block);
00254     src7 = vec_ld(112, block);
00255 
00256     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00257     s0 = vec_unpackl(src0);
00258     s1 = vec_unpackl(src1);
00259     s2 = vec_unpackl(src2);
00260     s3 = vec_unpackl(src3);
00261     s4 = vec_unpackl(src4);
00262     s5 = vec_unpackl(src5);
00263     s6 = vec_unpackl(src6);
00264     s7 = vec_unpackl(src7);
00265     s8 = vec_unpackh(src0);
00266     s9 = vec_unpackh(src1);
00267     sA = vec_unpackh(src2);
00268     sB = vec_unpackh(src3);
00269     sC = vec_unpackh(src4);
00270     sD = vec_unpackh(src5);
00271     sE = vec_unpackh(src6);
00272     sF = vec_unpackh(src7);
00273     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
00274     SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
00275     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
00276     SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
00277     src0 = vec_pack(s8, s0);
00278     src1 = vec_pack(s9, s1);
00279     src2 = vec_pack(sA, s2);
00280     src3 = vec_pack(sB, s3);
00281     src4 = vec_pack(sC, s4);
00282     src5 = vec_pack(sD, s5);
00283     src6 = vec_pack(sE, s6);
00284     src7 = vec_pack(sF, s7);
00285     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00286 
00287     s0 = vec_unpackh(src0);
00288     s1 = vec_unpackh(src1);
00289     s2 = vec_unpackh(src2);
00290     s3 = vec_unpackh(src3);
00291     s8 = vec_unpackl(src0);
00292     s9 = vec_unpackl(src1);
00293     sA = vec_unpackl(src2);
00294     sB = vec_unpackl(src3);
00295     STEP4(s0, s1, s2, s3, vec_64);
00296     SHIFT_VERT4(s0, s1, s2, s3);
00297     STEP4(s8, s9, sA, sB, vec_64);
00298     SHIFT_VERT4(s8, s9, sA, sB);
00299     src0 = vec_pack(s0, s8);
00300     src1 = vec_pack(s1, s9);
00301     src2 = vec_pack(s2, sA);
00302     src3 = vec_pack(s3, sB);
00303 
00304     p0 = vec_lvsl (0, dest);
00305     p1 = vec_lvsl (stride, dest);
00306     p = vec_splat_u8 (-1);
00307     perm0 = vec_mergeh (p, p0);
00308     perm1 = vec_mergeh (p, p1);
00309 
00310 #define ADD(dest,src,perm)                                              \
00311     /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
00312     tmp = vec_ld (0, dest);                                             \
00313     tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm);  \
00314     tmp3 = vec_adds (tmp2, src);                                        \
00315     tmp = vec_packsu (tmp3, tmp3);                                      \
00316     vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \
00317     vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);
00318 
00319     ADD (dest, src0, perm0)      dest += stride;
00320     ADD (dest, src1, perm1)      dest += stride;
00321     ADD (dest, src2, perm0)      dest += stride;
00322     ADD (dest, src3, perm1)
00323 }
00324 
00325 
00326 void vc1dsp_init_altivec(DSPContext* dsp, AVCodecContext *avctx) {
00327     dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
00328     dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
00329 }

Generated on Fri Sep 16 2011 17:17:41 for FFmpeg by  doxygen 1.7.1