• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/float_altivec.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2006 Luca Barbato <lu_zero@gentoo.org>
00003  *
00004  * This file is part of FFmpeg.
00005  *
00006  * FFmpeg is free software; you can redistribute it and/or
00007  * modify it under the terms of the GNU Lesser General Public
00008  * License as published by the Free Software Foundation; either
00009  * version 2.1 of the License, or (at your option) any later version.
00010  *
00011  * FFmpeg is distributed in the hope that it will be useful,
00012  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * Lesser General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU Lesser General Public
00017  * License along with FFmpeg; if not, write to the Free Software
00018  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00019  */
00020 
00021 #include "libavcodec/dsputil.h"
00022 
00023 #include "dsputil_altivec.h"
00024 #include "util_altivec.h"
00025 
00026 static void vector_fmul_altivec(float *dst, const float *src, int len)
00027 {
00028     int i;
00029     vector float d0, d1, s, zero = (vector float)vec_splat_u32(0);
00030     for(i=0; i<len-7; i+=8) {
00031         d0 = vec_ld(0, dst+i);
00032         s = vec_ld(0, src+i);
00033         d1 = vec_ld(16, dst+i);
00034         d0 = vec_madd(d0, s, zero);
00035         d1 = vec_madd(d1, vec_ld(16,src+i), zero);
00036         vec_st(d0, 0, dst+i);
00037         vec_st(d1, 16, dst+i);
00038     }
00039 }
00040 
00041 static void vector_fmul_reverse_altivec(float *dst, const float *src0,
00042                                         const float *src1, int len)
00043 {
00044     int i;
00045     vector float d, s0, s1, h0, l0,
00046                  s2, s3, zero = (vector float)vec_splat_u32(0);
00047     src1 += len-4;
00048     for(i=0; i<len-7; i+=8) {
00049         s1 = vec_ld(0, src1-i);              // [a,b,c,d]
00050         s0 = vec_ld(0, src0+i);
00051         l0 = vec_mergel(s1, s1);             // [c,c,d,d]
00052         s3 = vec_ld(-16, src1-i);
00053         h0 = vec_mergeh(s1, s1);             // [a,a,b,b]
00054         s2 = vec_ld(16, src0+i);
00055         s1 = vec_mergeh(vec_mergel(l0,h0),   // [d,b,d,b]
00056                         vec_mergeh(l0,h0));  // [c,a,c,a]
00057                                              // [d,c,b,a]
00058         l0 = vec_mergel(s3, s3);
00059         d = vec_madd(s0, s1, zero);
00060         h0 = vec_mergeh(s3, s3);
00061         vec_st(d, 0, dst+i);
00062         s3 = vec_mergeh(vec_mergel(l0,h0),
00063                         vec_mergeh(l0,h0));
00064         d = vec_madd(s2, s3, zero);
00065         vec_st(d, 16, dst+i);
00066     }
00067 }
00068 
00069 static void vector_fmul_add_altivec(float *dst, const float *src0,
00070                                     const float *src1, const float *src2,
00071                                     int len)
00072 {
00073     int i;
00074     vector float d, s0, s1, s2, t0, t1, edges;
00075     vector unsigned char align = vec_lvsr(0,dst),
00076                          mask = vec_lvsl(0, dst);
00077 
00078     for (i=0; i<len-3; i+=4) {
00079         t0 = vec_ld(0, dst+i);
00080         t1 = vec_ld(15, dst+i);
00081         s0 = vec_ld(0, src0+i);
00082         s1 = vec_ld(0, src1+i);
00083         s2 = vec_ld(0, src2+i);
00084         edges = vec_perm(t1 ,t0, mask);
00085         d = vec_madd(s0,s1,s2);
00086         t1 = vec_perm(d, edges, align);
00087         t0 = vec_perm(edges, d, align);
00088         vec_st(t1, 15, dst+i);
00089         vec_st(t0, 0, dst+i);
00090     }
00091 }
00092 
00093 static void vector_fmul_window_altivec(float *dst, const float *src0, const float *src1, const float *win, float add_bias, int len)
00094 {
00095     union {
00096         vector float v;
00097         float s[4];
00098     } vadd;
00099     vector float vadd_bias, zero, t0, t1, s0, s1, wi, wj;
00100     const vector unsigned char reverse = vcprm(3,2,1,0);
00101     int i,j;
00102 
00103     dst += len;
00104     win += len;
00105     src0+= len;
00106 
00107     vadd.s[0] = add_bias;
00108     vadd_bias = vec_splat(vadd.v, 0);
00109     zero = (vector float)vec_splat_u32(0);
00110 
00111     for(i=-len*4, j=len*4-16; i<0; i+=16, j-=16) {
00112         s0 = vec_ld(i, src0);
00113         s1 = vec_ld(j, src1);
00114         wi = vec_ld(i, win);
00115         wj = vec_ld(j, win);
00116 
00117         s1 = vec_perm(s1, s1, reverse);
00118         wj = vec_perm(wj, wj, reverse);
00119 
00120         t0 = vec_madd(s0, wj, vadd_bias);
00121         t0 = vec_nmsub(s1, wi, t0);
00122         t1 = vec_madd(s0, wi, vadd_bias);
00123         t1 = vec_madd(s1, wj, t1);
00124         t1 = vec_perm(t1, t1, reverse);
00125 
00126         vec_st(t0, i, dst);
00127         vec_st(t1, j, dst);
00128     }
00129 }
00130 
00131 static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src, float mul, int len)
00132 {
00133     union {
00134         vector float v;
00135         float s[4];
00136     } mul_u;
00137     int i;
00138     vector float src1, src2, dst1, dst2, mul_v, zero;
00139 
00140     zero = (vector float)vec_splat_u32(0);
00141     mul_u.s[0] = mul;
00142     mul_v = vec_splat(mul_u.v, 0);
00143 
00144     for(i=0; i<len; i+=8) {
00145         src1 = vec_ctf(vec_ld(0,  src+i), 0);
00146         src2 = vec_ctf(vec_ld(16, src+i), 0);
00147         dst1 = vec_madd(src1, mul_v, zero);
00148         dst2 = vec_madd(src2, mul_v, zero);
00149         vec_st(dst1,  0, dst+i);
00150         vec_st(dst2, 16, dst+i);
00151     }
00152 }
00153 
00154 
00155 static vector signed short
00156 float_to_int16_one_altivec(const float *src)
00157 {
00158     vector float s0 = vec_ld(0, src);
00159     vector float s1 = vec_ld(16, src);
00160     vector signed int t0 = vec_cts(s0, 0);
00161     vector signed int t1 = vec_cts(s1, 0);
00162     return vec_packs(t0,t1);
00163 }
00164 
00165 static void float_to_int16_altivec(int16_t *dst, const float *src, long len)
00166 {
00167     int i;
00168     vector signed short d0, d1, d;
00169     vector unsigned char align;
00170     if(((long)dst)&15) //FIXME
00171     for(i=0; i<len-7; i+=8) {
00172         d0 = vec_ld(0, dst+i);
00173         d = float_to_int16_one_altivec(src+i);
00174         d1 = vec_ld(15, dst+i);
00175         d1 = vec_perm(d1, d0, vec_lvsl(0,dst+i));
00176         align = vec_lvsr(0, dst+i);
00177         d0 = vec_perm(d1, d, align);
00178         d1 = vec_perm(d, d1, align);
00179         vec_st(d0, 0, dst+i);
00180         vec_st(d1,15, dst+i);
00181     }
00182     else
00183     for(i=0; i<len-7; i+=8) {
00184         d = float_to_int16_one_altivec(src+i);
00185         vec_st(d, 0, dst+i);
00186     }
00187 }
00188 
00189 static void
00190 float_to_int16_interleave_altivec(int16_t *dst, const float **src,
00191                                   long len, int channels)
00192 {
00193     int i;
00194     vector signed short d0, d1, d2, c0, c1, t0, t1;
00195     vector unsigned char align;
00196     if(channels == 1)
00197         float_to_int16_altivec(dst, src[0], len);
00198     else
00199         if (channels == 2) {
00200         if(((long)dst)&15)
00201         for(i=0; i<len-7; i+=8) {
00202             d0 = vec_ld(0, dst + i);
00203             t0 = float_to_int16_one_altivec(src[0] + i);
00204             d1 = vec_ld(31, dst + i);
00205             t1 = float_to_int16_one_altivec(src[1] + i);
00206             c0 = vec_mergeh(t0, t1);
00207             c1 = vec_mergel(t0, t1);
00208             d2 = vec_perm(d1, d0, vec_lvsl(0, dst + i));
00209             align = vec_lvsr(0, dst + i);
00210             d0 = vec_perm(d2, c0, align);
00211             d1 = vec_perm(c0, c1, align);
00212             vec_st(d0,  0, dst + i);
00213             d0 = vec_perm(c1, d2, align);
00214             vec_st(d1, 15, dst + i);
00215             vec_st(d0, 31, dst + i);
00216             dst+=8;
00217         }
00218         else
00219         for(i=0; i<len-7; i+=8) {
00220             t0 = float_to_int16_one_altivec(src[0] + i);
00221             t1 = float_to_int16_one_altivec(src[1] + i);
00222             d0 = vec_mergeh(t0, t1);
00223             d1 = vec_mergel(t0, t1);
00224             vec_st(d0,  0, dst + i);
00225             vec_st(d1, 16, dst + i);
00226             dst+=8;
00227         }
00228     } else {
00229         DECLARE_ALIGNED(16, int16_t, tmp)[len];
00230         int c, j;
00231         for (c = 0; c < channels; c++) {
00232             float_to_int16_altivec(tmp, src[c], len);
00233             for (i = 0, j = c; i < len; i++, j+=channels) {
00234                 dst[j] = tmp[i];
00235             }
00236         }
00237    }
00238 }
00239 
00240 void float_init_altivec(DSPContext* c, AVCodecContext *avctx)
00241 {
00242     c->vector_fmul = vector_fmul_altivec;
00243     c->vector_fmul_reverse = vector_fmul_reverse_altivec;
00244     c->vector_fmul_add = vector_fmul_add_altivec;
00245     c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec;
00246     if(!(avctx->flags & CODEC_FLAG_BITEXACT)) {
00247         c->vector_fmul_window = vector_fmul_window_altivec;
00248         c->float_to_int16 = float_to_int16_altivec;
00249         c->float_to_int16_interleave = float_to_int16_interleave_altivec;
00250     }
00251 }

Generated on Fri Sep 16 2011 17:17:41 for FFmpeg by  doxygen 1.7.1