• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/ppc/int_altivec.c

Go to the documentation of this file.
00001 /*
00002  * Copyright (c) 2007 Luca Barbato <lu_zero@gentoo.org>
00003  *
00004  * This file is part of FFmpeg.
00005  *
00006  * FFmpeg is free software; you can redistribute it and/or
00007  * modify it under the terms of the GNU Lesser General Public
00008  * License as published by the Free Software Foundation; either
00009  * version 2.1 of the License, or (at your option) any later version.
00010  *
00011  * FFmpeg is distributed in the hope that it will be useful,
00012  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00013  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00014  * Lesser General Public License for more details.
00015  *
00016  * You should have received a copy of the GNU Lesser General Public
00017  * License along with FFmpeg; if not, write to the Free Software
00018  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00019  */
00020 
00026 #include "config.h"
00027 #if HAVE_ALTIVEC_H
00028 #include <altivec.h>
00029 #endif
00030 
00031 #include "libavcodec/dsputil.h"
00032 
00033 #include "dsputil_altivec.h"
00034 
00035 #include "types_altivec.h"
00036 
00037 static int ssd_int8_vs_int16_altivec(const int8_t *pix1, const int16_t *pix2,
00038                                      int size) {
00039     int i, size16;
00040     vector signed char vpix1;
00041     vector signed short vpix2, vdiff, vpix1l,vpix1h;
00042     union { vector signed int vscore;
00043             int32_t score[4];
00044           } u;
00045     u.vscore = vec_splat_s32(0);
00046 //
00047 //XXX lazy way, fix it later
00048 
00049 #define vec_unaligned_load(b) \
00050     vec_perm(vec_ld(0,b),vec_ld(15,b),vec_lvsl(0, b));
00051 
00052     size16 = size >> 4;
00053     while(size16) {
00054 //        score += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
00055         //load pix1 and the first batch of pix2
00056 
00057         vpix1 = vec_unaligned_load(pix1);
00058         vpix2 = vec_unaligned_load(pix2);
00059         pix2 += 8;
00060         //unpack
00061         vpix1h = vec_unpackh(vpix1);
00062         vdiff  = vec_sub(vpix1h, vpix2);
00063         vpix1l = vec_unpackl(vpix1);
00064         // load another batch from pix2
00065         vpix2 = vec_unaligned_load(pix2);
00066         u.vscore = vec_msum(vdiff, vdiff, u.vscore);
00067         vdiff  = vec_sub(vpix1l, vpix2);
00068         u.vscore = vec_msum(vdiff, vdiff, u.vscore);
00069         pix1 += 16;
00070         pix2 += 8;
00071         size16--;
00072     }
00073     u.vscore = vec_sums(u.vscore, vec_splat_s32(0));
00074 
00075     size %= 16;
00076     for (i = 0; i < size; i++) {
00077         u.score[3] += (pix1[i]-pix2[i])*(pix1[i]-pix2[i]);
00078     }
00079     return u.score[3];
00080 }
00081 
00082 static int32_t scalarproduct_int16_altivec(int16_t * v1, int16_t * v2, int order, const int shift)
00083 {
00084     int i;
00085     LOAD_ZERO;
00086     register vec_s16 vec1, *pv;
00087     register vec_s32 res = vec_splat_s32(0), t;
00088     register vec_u32 shifts;
00089     int32_t ires;
00090 
00091     shifts = zero_u32v;
00092     if(shift & 0x10) shifts = vec_add(shifts, vec_sl(vec_splat_u32(0x08), vec_splat_u32(0x1)));
00093     if(shift & 0x08) shifts = vec_add(shifts, vec_splat_u32(0x08));
00094     if(shift & 0x04) shifts = vec_add(shifts, vec_splat_u32(0x04));
00095     if(shift & 0x02) shifts = vec_add(shifts, vec_splat_u32(0x02));
00096     if(shift & 0x01) shifts = vec_add(shifts, vec_splat_u32(0x01));
00097 
00098     for(i = 0; i < order; i += 8){
00099         pv = (vec_s16*)v1;
00100         vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
00101         t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
00102         t = vec_sr(t, shifts);
00103         res = vec_sums(t, res);
00104         v1 += 8;
00105         v2 += 8;
00106     }
00107     res = vec_splat(res, 3);
00108     vec_ste(res, 0, &ires);
00109     return ires;
00110 }
00111 
00112 static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul)
00113 {
00114     LOAD_ZERO;
00115     vec_s16 *pv1 = (vec_s16*)v1;
00116     vec_s16 *pv2 = (vec_s16*)v2;
00117     vec_s16 *pv3 = (vec_s16*)v3;
00118     register vec_s16 muls = {mul,mul,mul,mul,mul,mul,mul,mul};
00119     register vec_s16 t0, t1, i0, i1;
00120     register vec_s16 i2 = pv2[0], i3 = pv3[0];
00121     register vec_s32 res = zero_s32v;
00122     register vec_u8 align = vec_lvsl(0, v2);
00123     int32_t ires;
00124     order >>= 4;
00125     do {
00126         t0 = vec_perm(i2, pv2[1], align);
00127         i2 = pv2[2];
00128         t1 = vec_perm(pv2[1], i2, align);
00129         i0 = pv1[0];
00130         i1 = pv1[1];
00131         res = vec_msum(t0, i0, res);
00132         res = vec_msum(t1, i1, res);
00133         t0 = vec_perm(i3, pv3[1], align);
00134         i3 = pv3[2];
00135         t1 = vec_perm(pv3[1], i3, align);
00136         pv1[0] = vec_mladd(t0, muls, i0);
00137         pv1[1] = vec_mladd(t1, muls, i1);
00138         pv1 += 2;
00139         pv2 += 2;
00140         pv3 += 2;
00141     } while(--order);
00142     res = vec_splat(vec_sums(res, zero_s32v), 3);
00143     vec_ste(res, 0, &ires);
00144     return ires;
00145 }
00146 
00147 void int_init_altivec(DSPContext* c, AVCodecContext *avctx)
00148 {
00149     c->ssd_int8_vs_int16 = ssd_int8_vs_int16_altivec;
00150     c->scalarproduct_int16 = scalarproduct_int16_altivec;
00151     c->scalarproduct_and_madd_int16 = scalarproduct_and_madd_int16_altivec;
00152 }

Generated on Fri Sep 16 2011 17:17:41 for FFmpeg by  doxygen 1.7.1