Libav 0.7.1
|
00001 /* 00002 * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org> 00003 * 00004 * This file is part of Libav. 00005 * 00006 * Libav is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * Libav is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with Libav; if not, write to the Free Software 00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00019 */ 00020 00021 #include "libavutil/cpu.h" 00022 #include "libavcodec/dsputil.h" 00023 #include "libavcodec/h264data.h" 00024 #include "libavcodec/h264dsp.h" 00025 00026 #include "dsputil_altivec.h" 00027 #include "util_altivec.h" 00028 #include "types_altivec.h" 00029 00030 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s 00031 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s) 00032 00033 #define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC 00034 #define PREFIX_h264_chroma_mc8_altivec put_h264_chroma_mc8_altivec 00035 #define PREFIX_h264_chroma_mc8_num altivec_put_h264_chroma_mc8_num 00036 #define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec 00037 #define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num 00038 #define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec 00039 #define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num 00040 #define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec 00041 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num 00042 #include "h264_template_altivec.c" 00043 #undef OP_U8_ALTIVEC 00044 #undef PREFIX_h264_chroma_mc8_altivec 00045 #undef PREFIX_h264_chroma_mc8_num 00046 #undef PREFIX_h264_qpel16_h_lowpass_altivec 00047 #undef PREFIX_h264_qpel16_h_lowpass_num 00048 #undef PREFIX_h264_qpel16_v_lowpass_altivec 00049 #undef PREFIX_h264_qpel16_v_lowpass_num 00050 #undef PREFIX_h264_qpel16_hv_lowpass_altivec 00051 #undef PREFIX_h264_qpel16_hv_lowpass_num 00052 00053 #define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC 00054 #define PREFIX_h264_chroma_mc8_altivec avg_h264_chroma_mc8_altivec 00055 #define PREFIX_h264_chroma_mc8_num altivec_avg_h264_chroma_mc8_num 00056 #define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec 00057 #define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num 00058 #define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec 00059 #define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num 00060 #define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec 00061 #define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num 00062 #include "h264_template_altivec.c" 00063 #undef OP_U8_ALTIVEC 00064 #undef PREFIX_h264_chroma_mc8_altivec 00065 #undef PREFIX_h264_chroma_mc8_num 00066 #undef PREFIX_h264_qpel16_h_lowpass_altivec 00067 #undef PREFIX_h264_qpel16_h_lowpass_num 00068 #undef PREFIX_h264_qpel16_v_lowpass_altivec 00069 #undef PREFIX_h264_qpel16_v_lowpass_num 00070 #undef PREFIX_h264_qpel16_hv_lowpass_altivec 00071 #undef PREFIX_h264_qpel16_hv_lowpass_num 00072 00073 #define H264_MC(OPNAME, SIZE, CODETYPE) \ 00074 static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, int stride){\ 00075 OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\ 00076 }\ 00077 \ 00078 static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){ \ 00079 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 00080 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 00081 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ 00082 }\ 00083 \ 00084 static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00085 OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\ 00086 }\ 00087 \ 00088 static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00089 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 00090 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 00091 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\ 00092 }\ 00093 \ 00094 static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00095 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 00096 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 00097 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\ 00098 }\ 00099 \ 00100 static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00101 OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\ 00102 }\ 00103 \ 00104 static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00105 DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\ 00106 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\ 00107 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\ 00108 }\ 00109 \ 00110 static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00111 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00112 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00113 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 00114 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 00115 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 00116 }\ 00117 \ 00118 static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00119 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00120 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00121 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 00122 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 00123 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 00124 }\ 00125 \ 00126 static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00127 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00128 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00129 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 00130 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 00131 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 00132 }\ 00133 \ 00134 static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00135 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00136 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00137 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 00138 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 00139 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\ 00140 }\ 00141 \ 00142 static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00143 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 00144 OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\ 00145 }\ 00146 \ 00147 static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00148 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00149 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 00150 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 00151 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\ 00152 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 00153 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ 00154 }\ 00155 \ 00156 static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00157 DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\ 00158 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 00159 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 00160 put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\ 00161 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 00162 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\ 00163 }\ 00164 \ 00165 static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00166 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00167 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 00168 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 00169 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\ 00170 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 00171 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ 00172 }\ 00173 \ 00174 static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, int stride){\ 00175 DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\ 00176 DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\ 00177 DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\ 00178 put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\ 00179 put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\ 00180 OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\ 00181 }\ 00182 00183 static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, 00184 const uint8_t * src2, int dst_stride, 00185 int src_stride1, int h) 00186 { 00187 int i; 00188 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align; 00189 00190 mask_ = vec_lvsl(0, src2); 00191 00192 for (i = 0; i < h; i++) { 00193 00194 tmp1 = vec_ld(i * src_stride1, src1); 00195 mask = vec_lvsl(i * src_stride1, src1); 00196 tmp2 = vec_ld(i * src_stride1 + 15, src1); 00197 00198 a = vec_perm(tmp1, tmp2, mask); 00199 00200 tmp1 = vec_ld(i * 16, src2); 00201 tmp2 = vec_ld(i * 16 + 15, src2); 00202 00203 b = vec_perm(tmp1, tmp2, mask_); 00204 00205 tmp1 = vec_ld(0, dst); 00206 mask = vec_lvsl(0, dst); 00207 tmp2 = vec_ld(15, dst); 00208 00209 d = vec_avg(a, b); 00210 00211 edges = vec_perm(tmp2, tmp1, mask); 00212 00213 align = vec_lvsr(0, dst); 00214 00215 tmp2 = vec_perm(d, edges, align); 00216 tmp1 = vec_perm(edges, d, align); 00217 00218 vec_st(tmp2, 15, dst); 00219 vec_st(tmp1, 0 , dst); 00220 00221 dst += dst_stride; 00222 } 00223 } 00224 00225 static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1, 00226 const uint8_t * src2, int dst_stride, 00227 int src_stride1, int h) 00228 { 00229 int i; 00230 vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align; 00231 00232 mask_ = vec_lvsl(0, src2); 00233 00234 for (i = 0; i < h; i++) { 00235 00236 tmp1 = vec_ld(i * src_stride1, src1); 00237 mask = vec_lvsl(i * src_stride1, src1); 00238 tmp2 = vec_ld(i * src_stride1 + 15, src1); 00239 00240 a = vec_perm(tmp1, tmp2, mask); 00241 00242 tmp1 = vec_ld(i * 16, src2); 00243 tmp2 = vec_ld(i * 16 + 15, src2); 00244 00245 b = vec_perm(tmp1, tmp2, mask_); 00246 00247 tmp1 = vec_ld(0, dst); 00248 mask = vec_lvsl(0, dst); 00249 tmp2 = vec_ld(15, dst); 00250 00251 d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b)); 00252 00253 edges = vec_perm(tmp2, tmp1, mask); 00254 00255 align = vec_lvsr(0, dst); 00256 00257 tmp2 = vec_perm(d, edges, align); 00258 tmp1 = vec_perm(edges, d, align); 00259 00260 vec_st(tmp2, 15, dst); 00261 vec_st(tmp1, 0 , dst); 00262 00263 dst += dst_stride; 00264 } 00265 } 00266 00267 /* Implemented but could be faster 00268 #define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h) 00269 #define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h) 00270 */ 00271 00272 H264_MC(put_, 16, altivec) 00273 H264_MC(avg_, 16, altivec) 00274 00275 00276 /**************************************************************************** 00277 * IDCT transform: 00278 ****************************************************************************/ 00279 00280 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \ 00281 /* 1st stage */ \ 00282 vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \ 00283 vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \ 00284 vz2 = vec_sra(vb1,vec_splat_u16(1)); \ 00285 vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \ 00286 vz3 = vec_sra(vb3,vec_splat_u16(1)); \ 00287 vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \ 00288 /* 2nd stage: output */ \ 00289 va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \ 00290 va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \ 00291 va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \ 00292 va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */ 00293 00294 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ 00295 b0 = vec_mergeh( a0, a0 ); \ 00296 b1 = vec_mergeh( a1, a0 ); \ 00297 b2 = vec_mergeh( a2, a0 ); \ 00298 b3 = vec_mergeh( a3, a0 ); \ 00299 a0 = vec_mergeh( b0, b2 ); \ 00300 a1 = vec_mergel( b0, b2 ); \ 00301 a2 = vec_mergeh( b1, b3 ); \ 00302 a3 = vec_mergel( b1, b3 ); \ 00303 b0 = vec_mergeh( a0, a2 ); \ 00304 b1 = vec_mergel( a0, a2 ); \ 00305 b2 = vec_mergeh( a1, a3 ); \ 00306 b3 = vec_mergel( a1, a3 ) 00307 00308 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \ 00309 vdst_orig = vec_ld(0, dst); \ 00310 vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \ 00311 vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \ 00312 va = vec_add(va, vdst_ss); \ 00313 va_u8 = vec_packsu(va, zero_s16v); \ 00314 va_u32 = vec_splat((vec_u32)va_u8, 0); \ 00315 vec_ste(va_u32, element, (uint32_t*)dst); 00316 00317 static void ff_h264_idct_add_altivec(uint8_t *dst, DCTELEM *block, int stride) 00318 { 00319 vec_s16 va0, va1, va2, va3; 00320 vec_s16 vz0, vz1, vz2, vz3; 00321 vec_s16 vtmp0, vtmp1, vtmp2, vtmp3; 00322 vec_u8 va_u8; 00323 vec_u32 va_u32; 00324 vec_s16 vdst_ss; 00325 const vec_u16 v6us = vec_splat_u16(6); 00326 vec_u8 vdst, vdst_orig; 00327 vec_u8 vdst_mask = vec_lvsl(0, dst); 00328 int element = ((unsigned long)dst & 0xf) >> 2; 00329 LOAD_ZERO; 00330 00331 block[0] += 32; /* add 32 as a DC-level for rounding */ 00332 00333 vtmp0 = vec_ld(0,block); 00334 vtmp1 = vec_sld(vtmp0, vtmp0, 8); 00335 vtmp2 = vec_ld(16,block); 00336 vtmp3 = vec_sld(vtmp2, vtmp2, 8); 00337 00338 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); 00339 VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3); 00340 VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3); 00341 00342 va0 = vec_sra(va0,v6us); 00343 va1 = vec_sra(va1,v6us); 00344 va2 = vec_sra(va2,v6us); 00345 va3 = vec_sra(va3,v6us); 00346 00347 VEC_LOAD_U8_ADD_S16_STORE_U8(va0); 00348 dst += stride; 00349 VEC_LOAD_U8_ADD_S16_STORE_U8(va1); 00350 dst += stride; 00351 VEC_LOAD_U8_ADD_S16_STORE_U8(va2); 00352 dst += stride; 00353 VEC_LOAD_U8_ADD_S16_STORE_U8(va3); 00354 } 00355 00356 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\ 00357 /* a0 = SRC(0) + SRC(4); */ \ 00358 vec_s16 a0v = vec_add(s0, s4); \ 00359 /* a2 = SRC(0) - SRC(4); */ \ 00360 vec_s16 a2v = vec_sub(s0, s4); \ 00361 /* a4 = (SRC(2)>>1) - SRC(6); */ \ 00362 vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \ 00363 /* a6 = (SRC(6)>>1) + SRC(2); */ \ 00364 vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \ 00365 /* b0 = a0 + a6; */ \ 00366 vec_s16 b0v = vec_add(a0v, a6v); \ 00367 /* b2 = a2 + a4; */ \ 00368 vec_s16 b2v = vec_add(a2v, a4v); \ 00369 /* b4 = a2 - a4; */ \ 00370 vec_s16 b4v = vec_sub(a2v, a4v); \ 00371 /* b6 = a0 - a6; */ \ 00372 vec_s16 b6v = vec_sub(a0v, a6v); \ 00373 /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \ 00374 /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \ 00375 vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ 00376 /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \ 00377 /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \ 00378 vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ 00379 /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \ 00380 /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \ 00381 vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ 00382 /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \ 00383 vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ 00384 /* b1 = (a7>>2) + a1; */ \ 00385 vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ 00386 /* b3 = a3 + (a5>>2); */ \ 00387 vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ 00388 /* b5 = (a3>>2) - a5; */ \ 00389 vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ 00390 /* b7 = a7 - (a1>>2); */ \ 00391 vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ 00392 /* DST(0, b0 + b7); */ \ 00393 d0 = vec_add(b0v, b7v); \ 00394 /* DST(1, b2 + b5); */ \ 00395 d1 = vec_add(b2v, b5v); \ 00396 /* DST(2, b4 + b3); */ \ 00397 d2 = vec_add(b4v, b3v); \ 00398 /* DST(3, b6 + b1); */ \ 00399 d3 = vec_add(b6v, b1v); \ 00400 /* DST(4, b6 - b1); */ \ 00401 d4 = vec_sub(b6v, b1v); \ 00402 /* DST(5, b4 - b3); */ \ 00403 d5 = vec_sub(b4v, b3v); \ 00404 /* DST(6, b2 - b5); */ \ 00405 d6 = vec_sub(b2v, b5v); \ 00406 /* DST(7, b0 - b7); */ \ 00407 d7 = vec_sub(b0v, b7v); \ 00408 } 00409 00410 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ 00411 /* unaligned load */ \ 00412 vec_u8 hv = vec_ld( 0, dest ); \ 00413 vec_u8 lv = vec_ld( 7, dest ); \ 00414 vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \ 00415 vec_s16 idct_sh6 = vec_sra(idctv, sixv); \ 00416 vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \ 00417 vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \ 00418 vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \ 00419 vec_u8 edgehv; \ 00420 /* unaligned store */ \ 00421 vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\ 00422 vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \ 00423 lv = vec_sel( lv, bodyv, edgelv ); \ 00424 vec_st( lv, 7, dest ); \ 00425 hv = vec_ld( 0, dest ); \ 00426 edgehv = vec_perm( zero_u8v, sel, perm_stv ); \ 00427 hv = vec_sel( hv, bodyv, edgehv ); \ 00428 vec_st( hv, 0, dest ); \ 00429 } 00430 00431 static void ff_h264_idct8_add_altivec( uint8_t *dst, DCTELEM *dct, int stride ) { 00432 vec_s16 s0, s1, s2, s3, s4, s5, s6, s7; 00433 vec_s16 d0, d1, d2, d3, d4, d5, d6, d7; 00434 vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7; 00435 00436 vec_u8 perm_ldv = vec_lvsl(0, dst); 00437 vec_u8 perm_stv = vec_lvsr(8, dst); 00438 00439 const vec_u16 onev = vec_splat_u16(1); 00440 const vec_u16 twov = vec_splat_u16(2); 00441 const vec_u16 sixv = vec_splat_u16(6); 00442 00443 const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1}; 00444 LOAD_ZERO; 00445 00446 dct[0] += 32; // rounding for the >>6 at the end 00447 00448 s0 = vec_ld(0x00, (int16_t*)dct); 00449 s1 = vec_ld(0x10, (int16_t*)dct); 00450 s2 = vec_ld(0x20, (int16_t*)dct); 00451 s3 = vec_ld(0x30, (int16_t*)dct); 00452 s4 = vec_ld(0x40, (int16_t*)dct); 00453 s5 = vec_ld(0x50, (int16_t*)dct); 00454 s6 = vec_ld(0x60, (int16_t*)dct); 00455 s7 = vec_ld(0x70, (int16_t*)dct); 00456 00457 IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, 00458 d0, d1, d2, d3, d4, d5, d6, d7); 00459 00460 TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 ); 00461 00462 IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7, 00463 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7); 00464 00465 ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel); 00466 ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel); 00467 ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel); 00468 ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel); 00469 ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel); 00470 ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel); 00471 ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel); 00472 ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel); 00473 } 00474 00475 static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, DCTELEM *block, int stride, int size) 00476 { 00477 vec_s16 dc16; 00478 vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner; 00479 LOAD_ZERO; 00480 DECLARE_ALIGNED(16, int, dc); 00481 int i; 00482 00483 dc = (block[0] + 32) >> 6; 00484 dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1); 00485 00486 if (size == 4) 00487 dc16 = vec_sld(dc16, zero_s16v, 8); 00488 dcplus = vec_packsu(dc16, zero_s16v); 00489 dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v); 00490 00491 aligner = vec_lvsr(0, dst); 00492 dcplus = vec_perm(dcplus, dcplus, aligner); 00493 dcminus = vec_perm(dcminus, dcminus, aligner); 00494 00495 for (i = 0; i < size; i += 4) { 00496 v0 = vec_ld(0, dst+0*stride); 00497 v1 = vec_ld(0, dst+1*stride); 00498 v2 = vec_ld(0, dst+2*stride); 00499 v3 = vec_ld(0, dst+3*stride); 00500 00501 v0 = vec_adds(v0, dcplus); 00502 v1 = vec_adds(v1, dcplus); 00503 v2 = vec_adds(v2, dcplus); 00504 v3 = vec_adds(v3, dcplus); 00505 00506 v0 = vec_subs(v0, dcminus); 00507 v1 = vec_subs(v1, dcminus); 00508 v2 = vec_subs(v2, dcminus); 00509 v3 = vec_subs(v3, dcminus); 00510 00511 vec_st(v0, 0, dst+0*stride); 00512 vec_st(v1, 0, dst+1*stride); 00513 vec_st(v2, 0, dst+2*stride); 00514 vec_st(v3, 0, dst+3*stride); 00515 00516 dst += 4*stride; 00517 } 00518 } 00519 00520 static void h264_idct_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) 00521 { 00522 h264_idct_dc_add_internal(dst, block, stride, 4); 00523 } 00524 00525 static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, DCTELEM *block, int stride) 00526 { 00527 h264_idct_dc_add_internal(dst, block, stride, 8); 00528 } 00529 00530 static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){ 00531 int i; 00532 for(i=0; i<16; i++){ 00533 int nnz = nnzc[ scan8[i] ]; 00534 if(nnz){ 00535 if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); 00536 else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); 00537 } 00538 } 00539 } 00540 00541 static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){ 00542 int i; 00543 for(i=0; i<16; i++){ 00544 if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride); 00545 else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride); 00546 } 00547 } 00548 00549 static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){ 00550 int i; 00551 for(i=0; i<16; i+=4){ 00552 int nnz = nnzc[ scan8[i] ]; 00553 if(nnz){ 00554 if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride); 00555 else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride); 00556 } 00557 } 00558 } 00559 00560 static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, DCTELEM *block, int stride, const uint8_t nnzc[15*8]){ 00561 int i, j; 00562 for (j = 1; j < 3; j++) { 00563 for(i = j * 16; i < j * 16 + 4; i++){ 00564 if(nnzc[ scan8[i] ]) 00565 ff_h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); 00566 else if(block[i*16]) 00567 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride); 00568 } 00569 } 00570 } 00571 00572 #define transpose4x16(r0, r1, r2, r3) { \ 00573 register vec_u8 r4; \ 00574 register vec_u8 r5; \ 00575 register vec_u8 r6; \ 00576 register vec_u8 r7; \ 00577 \ 00578 r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \ 00579 r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \ 00580 r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \ 00581 r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \ 00582 \ 00583 r0 = vec_mergeh(r4, r6); /*all set 0*/ \ 00584 r1 = vec_mergel(r4, r6); /*all set 1*/ \ 00585 r2 = vec_mergeh(r5, r7); /*all set 2*/ \ 00586 r3 = vec_mergel(r5, r7); /*all set 3*/ \ 00587 } 00588 00589 static inline void write16x4(uint8_t *dst, int dst_stride, 00590 register vec_u8 r0, register vec_u8 r1, 00591 register vec_u8 r2, register vec_u8 r3) { 00592 DECLARE_ALIGNED(16, unsigned char, result)[64]; 00593 uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst; 00594 int int_dst_stride = dst_stride/4; 00595 00596 vec_st(r0, 0, result); 00597 vec_st(r1, 16, result); 00598 vec_st(r2, 32, result); 00599 vec_st(r3, 48, result); 00600 /* FIXME: there has to be a better way!!!! */ 00601 *dst_int = *src_int; 00602 *(dst_int+ int_dst_stride) = *(src_int + 1); 00603 *(dst_int+ 2*int_dst_stride) = *(src_int + 2); 00604 *(dst_int+ 3*int_dst_stride) = *(src_int + 3); 00605 *(dst_int+ 4*int_dst_stride) = *(src_int + 4); 00606 *(dst_int+ 5*int_dst_stride) = *(src_int + 5); 00607 *(dst_int+ 6*int_dst_stride) = *(src_int + 6); 00608 *(dst_int+ 7*int_dst_stride) = *(src_int + 7); 00609 *(dst_int+ 8*int_dst_stride) = *(src_int + 8); 00610 *(dst_int+ 9*int_dst_stride) = *(src_int + 9); 00611 *(dst_int+10*int_dst_stride) = *(src_int + 10); 00612 *(dst_int+11*int_dst_stride) = *(src_int + 11); 00613 *(dst_int+12*int_dst_stride) = *(src_int + 12); 00614 *(dst_int+13*int_dst_stride) = *(src_int + 13); 00615 *(dst_int+14*int_dst_stride) = *(src_int + 14); 00616 *(dst_int+15*int_dst_stride) = *(src_int + 15); 00617 } 00618 00622 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ 00623 register vec_u8 r0 = unaligned_load(0, src); \ 00624 register vec_u8 r1 = unaligned_load( src_stride, src); \ 00625 register vec_u8 r2 = unaligned_load(2* src_stride, src); \ 00626 register vec_u8 r3 = unaligned_load(3* src_stride, src); \ 00627 register vec_u8 r4 = unaligned_load(4* src_stride, src); \ 00628 register vec_u8 r5 = unaligned_load(5* src_stride, src); \ 00629 register vec_u8 r6 = unaligned_load(6* src_stride, src); \ 00630 register vec_u8 r7 = unaligned_load(7* src_stride, src); \ 00631 register vec_u8 r14 = unaligned_load(14*src_stride, src); \ 00632 register vec_u8 r15 = unaligned_load(15*src_stride, src); \ 00633 \ 00634 r8 = unaligned_load( 8*src_stride, src); \ 00635 r9 = unaligned_load( 9*src_stride, src); \ 00636 r10 = unaligned_load(10*src_stride, src); \ 00637 r11 = unaligned_load(11*src_stride, src); \ 00638 r12 = unaligned_load(12*src_stride, src); \ 00639 r13 = unaligned_load(13*src_stride, src); \ 00640 \ 00641 /*Merge first pairs*/ \ 00642 r0 = vec_mergeh(r0, r8); /*0, 8*/ \ 00643 r1 = vec_mergeh(r1, r9); /*1, 9*/ \ 00644 r2 = vec_mergeh(r2, r10); /*2,10*/ \ 00645 r3 = vec_mergeh(r3, r11); /*3,11*/ \ 00646 r4 = vec_mergeh(r4, r12); /*4,12*/ \ 00647 r5 = vec_mergeh(r5, r13); /*5,13*/ \ 00648 r6 = vec_mergeh(r6, r14); /*6,14*/ \ 00649 r7 = vec_mergeh(r7, r15); /*7,15*/ \ 00650 \ 00651 /*Merge second pairs*/ \ 00652 r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \ 00653 r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \ 00654 r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \ 00655 r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \ 00656 r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \ 00657 r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \ 00658 r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \ 00659 r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \ 00660 \ 00661 /*Third merge*/ \ 00662 r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \ 00663 r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \ 00664 r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \ 00665 r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \ 00666 r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \ 00667 r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \ 00668 /* Don't need to compute 3 and 7*/ \ 00669 \ 00670 /*Final merge*/ \ 00671 r8 = vec_mergeh(r0, r4); /*all set 0*/ \ 00672 r9 = vec_mergel(r0, r4); /*all set 1*/ \ 00673 r10 = vec_mergeh(r1, r5); /*all set 2*/ \ 00674 r11 = vec_mergel(r1, r5); /*all set 3*/ \ 00675 r12 = vec_mergeh(r2, r6); /*all set 4*/ \ 00676 r13 = vec_mergel(r2, r6); /*all set 5*/ \ 00677 /* Don't need to compute 14 and 15*/ \ 00678 \ 00679 } 00680 00681 // out: o = |x-y| < a 00682 static inline vec_u8 diff_lt_altivec ( register vec_u8 x, 00683 register vec_u8 y, 00684 register vec_u8 a) { 00685 00686 register vec_u8 diff = vec_subs(x, y); 00687 register vec_u8 diffneg = vec_subs(y, x); 00688 register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */ 00689 o = (vec_u8)vec_cmplt(o, a); 00690 return o; 00691 } 00692 00693 static inline vec_u8 h264_deblock_mask ( register vec_u8 p0, 00694 register vec_u8 p1, 00695 register vec_u8 q0, 00696 register vec_u8 q1, 00697 register vec_u8 alpha, 00698 register vec_u8 beta) { 00699 00700 register vec_u8 mask; 00701 register vec_u8 tempmask; 00702 00703 mask = diff_lt_altivec(p0, q0, alpha); 00704 tempmask = diff_lt_altivec(p1, p0, beta); 00705 mask = vec_and(mask, tempmask); 00706 tempmask = diff_lt_altivec(q1, q0, beta); 00707 mask = vec_and(mask, tempmask); 00708 00709 return mask; 00710 } 00711 00712 // out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0) 00713 static inline vec_u8 h264_deblock_q1(register vec_u8 p0, 00714 register vec_u8 p1, 00715 register vec_u8 p2, 00716 register vec_u8 q0, 00717 register vec_u8 tc0) { 00718 00719 register vec_u8 average = vec_avg(p0, q0); 00720 register vec_u8 temp; 00721 register vec_u8 uncliped; 00722 register vec_u8 ones; 00723 register vec_u8 max; 00724 register vec_u8 min; 00725 register vec_u8 newp1; 00726 00727 temp = vec_xor(average, p2); 00728 average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */ 00729 ones = vec_splat_u8(1); 00730 temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */ 00731 uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */ 00732 max = vec_adds(p1, tc0); 00733 min = vec_subs(p1, tc0); 00734 newp1 = vec_max(min, uncliped); 00735 newp1 = vec_min(max, newp1); 00736 return newp1; 00737 } 00738 00739 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \ 00740 \ 00741 const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \ 00742 \ 00743 register vec_u8 pq0bit = vec_xor(p0,q0); \ 00744 register vec_u8 q1minus; \ 00745 register vec_u8 p0minus; \ 00746 register vec_u8 stage1; \ 00747 register vec_u8 stage2; \ 00748 register vec_u8 vec160; \ 00749 register vec_u8 delta; \ 00750 register vec_u8 deltaneg; \ 00751 \ 00752 q1minus = vec_nor(q1, q1); /* 255 - q1 */ \ 00753 stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \ 00754 stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \ 00755 p0minus = vec_nor(p0, p0); /* 255 - p0 */ \ 00756 stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \ 00757 pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \ 00758 stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \ 00759 stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \ 00760 vec160 = vec_ld(0, &A0v); \ 00761 deltaneg = vec_subs(vec160, stage2); /* -d */ \ 00762 delta = vec_subs(stage2, vec160); /* d */ \ 00763 deltaneg = vec_min(tc0masked, deltaneg); \ 00764 delta = vec_min(tc0masked, delta); \ 00765 p0 = vec_subs(p0, deltaneg); \ 00766 q0 = vec_subs(q0, delta); \ 00767 p0 = vec_adds(p0, delta); \ 00768 q0 = vec_adds(q0, deltaneg); \ 00769 } 00770 00771 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \ 00772 DECLARE_ALIGNED(16, unsigned char, temp)[16]; \ 00773 register vec_u8 alphavec; \ 00774 register vec_u8 betavec; \ 00775 register vec_u8 mask; \ 00776 register vec_u8 p1mask; \ 00777 register vec_u8 q1mask; \ 00778 register vector signed char tc0vec; \ 00779 register vec_u8 finaltc0; \ 00780 register vec_u8 tc0masked; \ 00781 register vec_u8 newp1; \ 00782 register vec_u8 newq1; \ 00783 \ 00784 temp[0] = alpha; \ 00785 temp[1] = beta; \ 00786 alphavec = vec_ld(0, temp); \ 00787 betavec = vec_splat(alphavec, 0x1); \ 00788 alphavec = vec_splat(alphavec, 0x0); \ 00789 mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \ 00790 \ 00791 *((int *)temp) = *((int *)tc0); \ 00792 tc0vec = vec_ld(0, (signed char*)temp); \ 00793 tc0vec = vec_mergeh(tc0vec, tc0vec); \ 00794 tc0vec = vec_mergeh(tc0vec, tc0vec); \ 00795 mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \ 00796 finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \ 00797 \ 00798 p1mask = diff_lt_altivec(p2, p0, betavec); \ 00799 p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \ 00800 tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \ 00801 finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \ 00802 newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \ 00803 /*end if*/ \ 00804 \ 00805 q1mask = diff_lt_altivec(q2, q0, betavec); \ 00806 q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\ 00807 tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \ 00808 finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \ 00809 newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \ 00810 /*end if*/ \ 00811 \ 00812 h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \ 00813 p1 = newp1; \ 00814 q1 = newq1; \ 00815 } 00816 00817 static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { 00818 00819 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) { 00820 register vec_u8 p2 = vec_ld(-3*stride, pix); 00821 register vec_u8 p1 = vec_ld(-2*stride, pix); 00822 register vec_u8 p0 = vec_ld(-1*stride, pix); 00823 register vec_u8 q0 = vec_ld(0, pix); 00824 register vec_u8 q1 = vec_ld(stride, pix); 00825 register vec_u8 q2 = vec_ld(2*stride, pix); 00826 h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0); 00827 vec_st(p1, -2*stride, pix); 00828 vec_st(p0, -1*stride, pix); 00829 vec_st(q0, 0, pix); 00830 vec_st(q1, stride, pix); 00831 } 00832 } 00833 00834 static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) { 00835 00836 register vec_u8 line0, line1, line2, line3, line4, line5; 00837 if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0) 00838 return; 00839 readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5); 00840 h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0); 00841 transpose4x16(line1, line2, line3, line4); 00842 write16x4(pix-2, stride, line1, line2, line3, line4); 00843 } 00844 00845 static av_always_inline 00846 void weight_h264_WxH_altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset, int w, int h) 00847 { 00848 int y, aligned; 00849 vec_u8 vblock; 00850 vec_s16 vtemp, vweight, voffset, v0, v1; 00851 vec_u16 vlog2_denom; 00852 DECLARE_ALIGNED(16, int32_t, temp)[4]; 00853 LOAD_ZERO; 00854 00855 offset <<= log2_denom; 00856 if(log2_denom) offset += 1<<(log2_denom-1); 00857 temp[0] = log2_denom; 00858 temp[1] = weight; 00859 temp[2] = offset; 00860 00861 vtemp = (vec_s16)vec_ld(0, temp); 00862 vlog2_denom = (vec_u16)vec_splat(vtemp, 1); 00863 vweight = vec_splat(vtemp, 3); 00864 voffset = vec_splat(vtemp, 5); 00865 aligned = !((unsigned long)block & 0xf); 00866 00867 for (y=0; y<h; y++) { 00868 vblock = vec_ld(0, block); 00869 00870 v0 = (vec_s16)vec_mergeh(zero_u8v, vblock); 00871 v1 = (vec_s16)vec_mergel(zero_u8v, vblock); 00872 00873 if (w == 16 || aligned) { 00874 v0 = vec_mladd(v0, vweight, zero_s16v); 00875 v0 = vec_adds(v0, voffset); 00876 v0 = vec_sra(v0, vlog2_denom); 00877 } 00878 if (w == 16 || !aligned) { 00879 v1 = vec_mladd(v1, vweight, zero_s16v); 00880 v1 = vec_adds(v1, voffset); 00881 v1 = vec_sra(v1, vlog2_denom); 00882 } 00883 vblock = vec_packsu(v0, v1); 00884 vec_st(vblock, 0, block); 00885 00886 block += stride; 00887 } 00888 } 00889 00890 static av_always_inline 00891 void biweight_h264_WxH_altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, 00892 int weightd, int weights, int offset, int w, int h) 00893 { 00894 int y, dst_aligned, src_aligned; 00895 vec_u8 vsrc, vdst; 00896 vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3; 00897 vec_u16 vlog2_denom; 00898 DECLARE_ALIGNED(16, int32_t, temp)[4]; 00899 LOAD_ZERO; 00900 00901 offset = ((offset + 1) | 1) << log2_denom; 00902 temp[0] = log2_denom+1; 00903 temp[1] = weights; 00904 temp[2] = weightd; 00905 temp[3] = offset; 00906 00907 vtemp = (vec_s16)vec_ld(0, temp); 00908 vlog2_denom = (vec_u16)vec_splat(vtemp, 1); 00909 vweights = vec_splat(vtemp, 3); 00910 vweightd = vec_splat(vtemp, 5); 00911 voffset = vec_splat(vtemp, 7); 00912 dst_aligned = !((unsigned long)dst & 0xf); 00913 src_aligned = !((unsigned long)src & 0xf); 00914 00915 for (y=0; y<h; y++) { 00916 vdst = vec_ld(0, dst); 00917 vsrc = vec_ld(0, src); 00918 00919 v0 = (vec_s16)vec_mergeh(zero_u8v, vdst); 00920 v1 = (vec_s16)vec_mergel(zero_u8v, vdst); 00921 v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc); 00922 v3 = (vec_s16)vec_mergel(zero_u8v, vsrc); 00923 00924 if (w == 8) { 00925 if (src_aligned) 00926 v3 = v2; 00927 else 00928 v2 = v3; 00929 } 00930 00931 if (w == 16 || dst_aligned) { 00932 v0 = vec_mladd(v0, vweightd, zero_s16v); 00933 v2 = vec_mladd(v2, vweights, zero_s16v); 00934 00935 v0 = vec_adds(v0, voffset); 00936 v0 = vec_adds(v0, v2); 00937 v0 = vec_sra(v0, vlog2_denom); 00938 } 00939 if (w == 16 || !dst_aligned) { 00940 v1 = vec_mladd(v1, vweightd, zero_s16v); 00941 v3 = vec_mladd(v3, vweights, zero_s16v); 00942 00943 v1 = vec_adds(v1, voffset); 00944 v1 = vec_adds(v1, v3); 00945 v1 = vec_sra(v1, vlog2_denom); 00946 } 00947 vdst = vec_packsu(v0, v1); 00948 vec_st(vdst, 0, dst); 00949 00950 dst += stride; 00951 src += stride; 00952 } 00953 } 00954 00955 #define H264_WEIGHT(W,H) \ 00956 static void ff_weight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *block, int stride, int log2_denom, int weight, int offset){ \ 00957 weight_h264_WxH_altivec(block, stride, log2_denom, weight, offset, W, H); \ 00958 }\ 00959 static void ff_biweight_h264_pixels ## W ## x ## H ## _altivec(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset){ \ 00960 biweight_h264_WxH_altivec(dst, src, stride, log2_denom, weightd, weights, offset, W, H); \ 00961 } 00962 00963 H264_WEIGHT(16,16) 00964 H264_WEIGHT(16, 8) 00965 H264_WEIGHT( 8,16) 00966 H264_WEIGHT( 8, 8) 00967 H264_WEIGHT( 8, 4) 00968 00969 void dsputil_h264_init_ppc(DSPContext* c, AVCodecContext *avctx) { 00970 const int high_bit_depth = avctx->codec_id == CODEC_ID_H264 && avctx->bits_per_raw_sample > 8; 00971 00972 if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) { 00973 if (!high_bit_depth) { 00974 c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec; 00975 c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec; 00976 00977 #define dspfunc(PFX, IDX, NUM) \ 00978 c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \ 00979 c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \ 00980 c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \ 00981 c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \ 00982 c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \ 00983 c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \ 00984 c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \ 00985 c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \ 00986 c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \ 00987 c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \ 00988 c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \ 00989 c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \ 00990 c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \ 00991 c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \ 00992 c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \ 00993 c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec 00994 00995 dspfunc(put_h264_qpel, 0, 16); 00996 dspfunc(avg_h264_qpel, 0, 16); 00997 #undef dspfunc 00998 } 00999 } 01000 } 01001 01002 void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth) 01003 { 01004 if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) { 01005 if (bit_depth == 8) { 01006 c->h264_idct_add = ff_h264_idct_add_altivec; 01007 c->h264_idct_add8 = ff_h264_idct_add8_altivec; 01008 c->h264_idct_add16 = ff_h264_idct_add16_altivec; 01009 c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec; 01010 c->h264_idct_dc_add= h264_idct_dc_add_altivec; 01011 c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec; 01012 c->h264_idct8_add = ff_h264_idct8_add_altivec; 01013 c->h264_idct8_add4 = ff_h264_idct8_add4_altivec; 01014 c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec; 01015 c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec; 01016 01017 c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16x16_altivec; 01018 c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels16x8_altivec; 01019 c->weight_h264_pixels_tab[2] = ff_weight_h264_pixels8x16_altivec; 01020 c->weight_h264_pixels_tab[3] = ff_weight_h264_pixels8x8_altivec; 01021 c->weight_h264_pixels_tab[4] = ff_weight_h264_pixels8x4_altivec; 01022 c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16x16_altivec; 01023 c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels16x8_altivec; 01024 c->biweight_h264_pixels_tab[2] = ff_biweight_h264_pixels8x16_altivec; 01025 c->biweight_h264_pixels_tab[3] = ff_biweight_h264_pixels8x8_altivec; 01026 c->biweight_h264_pixels_tab[4] = ff_biweight_h264_pixels8x4_altivec; 01027 } 01028 } 01029 }