Libav
|
00001 /* 00002 * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org> 00003 * 00004 * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at) 00005 * 00006 * This file is part of FFmpeg. 00007 * 00008 * FFmpeg is free software; you can redistribute it and/or modify 00009 * it under the terms of the GNU General Public License as published by 00010 * the Free Software Foundation; either version 2 of the License, or 00011 * (at your option) any later version. 00012 * 00013 * FFmpeg is distributed in the hope that it will be useful, 00014 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00015 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 00016 * GNU General Public License for more details. 00017 * 00018 * You should have received a copy of the GNU General Public License 00019 * along with FFmpeg; if not, write to the Free Software 00020 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00021 */ 00022 00023 #include "libavutil/avutil.h" 00024 00025 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \ 00026 do { \ 00027 __typeof__(src_a) tempA1, tempB1, tempC1, tempD1; \ 00028 __typeof__(src_a) tempE1, tempF1, tempG1, tempH1; \ 00029 __typeof__(src_a) tempA2, tempB2, tempC2, tempD2; \ 00030 __typeof__(src_a) tempE2, tempF2, tempG2, tempH2; \ 00031 tempA1 = vec_mergeh (src_a, src_e); \ 00032 tempB1 = vec_mergel (src_a, src_e); \ 00033 tempC1 = vec_mergeh (src_b, src_f); \ 00034 tempD1 = vec_mergel (src_b, src_f); \ 00035 tempE1 = vec_mergeh (src_c, src_g); \ 00036 tempF1 = vec_mergel (src_c, src_g); \ 00037 tempG1 = vec_mergeh (src_d, src_h); \ 00038 tempH1 = vec_mergel (src_d, src_h); \ 00039 tempA2 = vec_mergeh (tempA1, tempE1); \ 00040 tempB2 = vec_mergel (tempA1, tempE1); \ 00041 tempC2 = vec_mergeh (tempB1, tempF1); \ 00042 tempD2 = vec_mergel (tempB1, tempF1); \ 00043 tempE2 = vec_mergeh (tempC1, tempG1); \ 00044 tempF2 = vec_mergel (tempC1, tempG1); \ 00045 tempG2 = vec_mergeh (tempD1, tempH1); \ 00046 tempH2 = vec_mergel (tempD1, tempH1); \ 00047 src_a = vec_mergeh (tempA2, tempE2); \ 00048 src_b = vec_mergel (tempA2, tempE2); \ 00049 src_c = vec_mergeh (tempB2, tempF2); \ 00050 src_d = vec_mergel (tempB2, tempF2); \ 00051 src_e = vec_mergeh (tempC2, tempG2); \ 00052 src_f = vec_mergel (tempC2, tempG2); \ 00053 src_g = vec_mergeh (tempD2, tempH2); \ 00054 src_h = vec_mergel (tempD2, tempH2); \ 00055 } while (0) 00056 00057 00058 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) { 00059 /* 00060 this code makes no assumption on src or stride. 00061 One could remove the recomputation of the perm 00062 vector by assuming (stride % 16) == 0, unfortunately 00063 this is not always true. 00064 */ 00065 DECLARE_ALIGNED(16, short, data)[8] = 00066 { 00067 ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1, 00068 data[0] * 2 + 1, 00069 c->QP * 2, 00070 c->QP * 4 00071 }; 00072 int numEq; 00073 uint8_t *src2 = src; 00074 vector signed short v_dcOffset; 00075 vector signed short v2QP; 00076 vector unsigned short v4QP; 00077 vector unsigned short v_dcThreshold; 00078 const int properStride = (stride % 16); 00079 const int srcAlign = ((unsigned long)src2 % 16); 00080 const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0; 00081 const vector signed int zero = vec_splat_s32(0); 00082 const vector signed short mask = vec_splat_s16(1); 00083 vector signed int v_numEq = vec_splat_s32(0); 00084 vector signed short v_data = vec_ld(0, data); 00085 vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3, 00086 v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7; 00087 //FIXME avoid this mess if possible 00088 register int j0 = 0, 00089 j1 = stride, 00090 j2 = 2 * stride, 00091 j3 = 3 * stride, 00092 j4 = 4 * stride, 00093 j5 = 5 * stride, 00094 j6 = 6 * stride, 00095 j7 = 7 * stride; 00096 vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3, 00097 v_srcA4, v_srcA5, v_srcA6, v_srcA7; 00098 00099 v_dcOffset = vec_splat(v_data, 0); 00100 v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1); 00101 v2QP = vec_splat(v_data, 2); 00102 v4QP = (vector unsigned short)vec_splat(v_data, 3); 00103 00104 src2 += stride * 4; 00105 00106 #define LOAD_LINE(i) \ 00107 { \ 00108 vector unsigned char perm##i = vec_lvsl(j##i, src2); \ 00109 vector unsigned char v_srcA2##i; \ 00110 vector unsigned char v_srcA1##i = vec_ld(j##i, src2); \ 00111 if (two_vectors) \ 00112 v_srcA2##i = vec_ld(j##i + 16, src2); \ 00113 v_srcA##i = \ 00114 vec_perm(v_srcA1##i, v_srcA2##i, perm##i); \ 00115 v_srcAss##i = \ 00116 (vector signed short)vec_mergeh((vector signed char)zero, \ 00117 (vector signed char)v_srcA##i); } 00118 00119 #define LOAD_LINE_ALIGNED(i) \ 00120 v_srcA##i = vec_ld(j##i, src2); \ 00121 v_srcAss##i = \ 00122 (vector signed short)vec_mergeh((vector signed char)zero, \ 00123 (vector signed char)v_srcA##i) 00124 00125 /* Special-casing the aligned case is worthwhile, as all calls from 00126 * the (transposed) horizontable deblocks will be aligned, in addition 00127 * to the naturally aligned vertical deblocks. */ 00128 if (properStride && srcAlign) { 00129 LOAD_LINE_ALIGNED(0); 00130 LOAD_LINE_ALIGNED(1); 00131 LOAD_LINE_ALIGNED(2); 00132 LOAD_LINE_ALIGNED(3); 00133 LOAD_LINE_ALIGNED(4); 00134 LOAD_LINE_ALIGNED(5); 00135 LOAD_LINE_ALIGNED(6); 00136 LOAD_LINE_ALIGNED(7); 00137 } else { 00138 LOAD_LINE(0); 00139 LOAD_LINE(1); 00140 LOAD_LINE(2); 00141 LOAD_LINE(3); 00142 LOAD_LINE(4); 00143 LOAD_LINE(5); 00144 LOAD_LINE(6); 00145 LOAD_LINE(7); 00146 } 00147 #undef LOAD_LINE 00148 #undef LOAD_LINE_ALIGNED 00149 00150 #define ITER(i, j) \ 00151 const vector signed short v_diff##i = \ 00152 vec_sub(v_srcAss##i, v_srcAss##j); \ 00153 const vector signed short v_sum##i = \ 00154 vec_add(v_diff##i, v_dcOffset); \ 00155 const vector signed short v_comp##i = \ 00156 (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \ 00157 v_dcThreshold); \ 00158 const vector signed short v_part##i = vec_and(mask, v_comp##i); 00159 00160 { 00161 ITER(0, 1) 00162 ITER(1, 2) 00163 ITER(2, 3) 00164 ITER(3, 4) 00165 ITER(4, 5) 00166 ITER(5, 6) 00167 ITER(6, 7) 00168 00169 v_numEq = vec_sum4s(v_part0, v_numEq); 00170 v_numEq = vec_sum4s(v_part1, v_numEq); 00171 v_numEq = vec_sum4s(v_part2, v_numEq); 00172 v_numEq = vec_sum4s(v_part3, v_numEq); 00173 v_numEq = vec_sum4s(v_part4, v_numEq); 00174 v_numEq = vec_sum4s(v_part5, v_numEq); 00175 v_numEq = vec_sum4s(v_part6, v_numEq); 00176 } 00177 00178 #undef ITER 00179 00180 v_numEq = vec_sums(v_numEq, zero); 00181 00182 v_numEq = vec_splat(v_numEq, 3); 00183 vec_ste(v_numEq, 0, &numEq); 00184 00185 if (numEq > c->ppMode.flatnessThreshold){ 00186 const vector unsigned char mmoP1 = (const vector unsigned char) 00187 {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 00188 0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B}; 00189 const vector unsigned char mmoP2 = (const vector unsigned char) 00190 {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F, 00191 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f}; 00192 const vector unsigned char mmoP = (const vector unsigned char) 00193 vec_lvsl(8, (unsigned char*)0); 00194 00195 vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1); 00196 vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2); 00197 vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP); 00198 vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1); 00199 vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2); 00200 vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP); 00201 vector signed short mmoDiff = vec_sub(mmoL, mmoR); 00202 vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP); 00203 00204 if (vec_any_gt(mmoSum, v4QP)) 00205 return 0; 00206 else 00207 return 1; 00208 } 00209 else return 2; 00210 } 00211 00212 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) { 00213 /* 00214 this code makes no assumption on src or stride. 00215 One could remove the recomputation of the perm 00216 vector by assuming (stride % 16) == 0, unfortunately 00217 this is not always true. Quite a lot of load/stores 00218 can be removed by assuming proper alignment of 00219 src & stride :-( 00220 */ 00221 uint8_t *src2 = src; 00222 const vector signed int zero = vec_splat_s32(0); 00223 const int properStride = (stride % 16); 00224 const int srcAlign = ((unsigned long)src2 % 16); 00225 DECLARE_ALIGNED(16, short, qp)[8] = {c->QP}; 00226 vector signed short vqp = vec_ld(0, qp); 00227 vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9; 00228 vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9; 00229 vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9; 00230 vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9; 00231 vector unsigned char perml0, perml1, perml2, perml3, perml4, 00232 perml5, perml6, perml7, perml8, perml9; 00233 register int j0 = 0, 00234 j1 = stride, 00235 j2 = 2 * stride, 00236 j3 = 3 * stride, 00237 j4 = 4 * stride, 00238 j5 = 5 * stride, 00239 j6 = 6 * stride, 00240 j7 = 7 * stride, 00241 j8 = 8 * stride, 00242 j9 = 9 * stride; 00243 00244 vqp = vec_splat(vqp, 0); 00245 00246 src2 += stride*3; 00247 00248 #define LOAD_LINE(i) \ 00249 perml##i = vec_lvsl(i * stride, src2); \ 00250 vbA##i = vec_ld(i * stride, src2); \ 00251 vbB##i = vec_ld(i * stride + 16, src2); \ 00252 vbT##i = vec_perm(vbA##i, vbB##i, perml##i); \ 00253 vb##i = \ 00254 (vector signed short)vec_mergeh((vector unsigned char)zero, \ 00255 (vector unsigned char)vbT##i) 00256 00257 #define LOAD_LINE_ALIGNED(i) \ 00258 vbT##i = vec_ld(j##i, src2); \ 00259 vb##i = \ 00260 (vector signed short)vec_mergeh((vector signed char)zero, \ 00261 (vector signed char)vbT##i) 00262 00263 /* Special-casing the aligned case is worthwhile, as all calls from 00264 * the (transposed) horizontable deblocks will be aligned, in addition 00265 * to the naturally aligned vertical deblocks. */ 00266 if (properStride && srcAlign) { 00267 LOAD_LINE_ALIGNED(0); 00268 LOAD_LINE_ALIGNED(1); 00269 LOAD_LINE_ALIGNED(2); 00270 LOAD_LINE_ALIGNED(3); 00271 LOAD_LINE_ALIGNED(4); 00272 LOAD_LINE_ALIGNED(5); 00273 LOAD_LINE_ALIGNED(6); 00274 LOAD_LINE_ALIGNED(7); 00275 LOAD_LINE_ALIGNED(8); 00276 LOAD_LINE_ALIGNED(9); 00277 } else { 00278 LOAD_LINE(0); 00279 LOAD_LINE(1); 00280 LOAD_LINE(2); 00281 LOAD_LINE(3); 00282 LOAD_LINE(4); 00283 LOAD_LINE(5); 00284 LOAD_LINE(6); 00285 LOAD_LINE(7); 00286 LOAD_LINE(8); 00287 LOAD_LINE(9); 00288 } 00289 #undef LOAD_LINE 00290 #undef LOAD_LINE_ALIGNED 00291 { 00292 const vector unsigned short v_2 = vec_splat_u16(2); 00293 const vector unsigned short v_4 = vec_splat_u16(4); 00294 00295 const vector signed short v_diff01 = vec_sub(vb0, vb1); 00296 const vector unsigned short v_cmp01 = 00297 (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp); 00298 const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01); 00299 const vector signed short v_diff89 = vec_sub(vb8, vb9); 00300 const vector unsigned short v_cmp89 = 00301 (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp); 00302 const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89); 00303 00304 const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1); 00305 const vector signed short temp02 = vec_add(vb2, vb3); 00306 const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4); 00307 const vector signed short v_sumsB0 = vec_add(temp02, temp03); 00308 00309 const vector signed short temp11 = vec_sub(v_sumsB0, v_first); 00310 const vector signed short v_sumsB1 = vec_add(temp11, vb4); 00311 00312 const vector signed short temp21 = vec_sub(v_sumsB1, v_first); 00313 const vector signed short v_sumsB2 = vec_add(temp21, vb5); 00314 00315 const vector signed short temp31 = vec_sub(v_sumsB2, v_first); 00316 const vector signed short v_sumsB3 = vec_add(temp31, vb6); 00317 00318 const vector signed short temp41 = vec_sub(v_sumsB3, v_first); 00319 const vector signed short v_sumsB4 = vec_add(temp41, vb7); 00320 00321 const vector signed short temp51 = vec_sub(v_sumsB4, vb1); 00322 const vector signed short v_sumsB5 = vec_add(temp51, vb8); 00323 00324 const vector signed short temp61 = vec_sub(v_sumsB5, vb2); 00325 const vector signed short v_sumsB6 = vec_add(temp61, v_last); 00326 00327 const vector signed short temp71 = vec_sub(v_sumsB6, vb3); 00328 const vector signed short v_sumsB7 = vec_add(temp71, v_last); 00329 00330 const vector signed short temp81 = vec_sub(v_sumsB7, vb4); 00331 const vector signed short v_sumsB8 = vec_add(temp81, v_last); 00332 00333 const vector signed short temp91 = vec_sub(v_sumsB8, vb5); 00334 const vector signed short v_sumsB9 = vec_add(temp91, v_last); 00335 00336 #define COMPUTE_VR(i, j, k) \ 00337 const vector signed short temps1##i = \ 00338 vec_add(v_sumsB##i, v_sumsB##k); \ 00339 const vector signed short temps2##i = \ 00340 vec_mladd(vb##j, (vector signed short)v_2, temps1##i); \ 00341 const vector signed short vr##j = vec_sra(temps2##i, v_4) 00342 00343 COMPUTE_VR(0, 1, 2); 00344 COMPUTE_VR(1, 2, 3); 00345 COMPUTE_VR(2, 3, 4); 00346 COMPUTE_VR(3, 4, 5); 00347 COMPUTE_VR(4, 5, 6); 00348 COMPUTE_VR(5, 6, 7); 00349 COMPUTE_VR(6, 7, 8); 00350 COMPUTE_VR(7, 8, 9); 00351 00352 const vector signed char neg1 = vec_splat_s8(-1); 00353 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 00354 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; 00355 00356 #define PACK_AND_STORE(i) \ 00357 { const vector unsigned char perms##i = \ 00358 vec_lvsr(i * stride, src2); \ 00359 const vector unsigned char vf##i = \ 00360 vec_packsu(vr##i, (vector signed short)zero); \ 00361 const vector unsigned char vg##i = \ 00362 vec_perm(vf##i, vbT##i, permHH); \ 00363 const vector unsigned char mask##i = \ 00364 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ 00365 const vector unsigned char vg2##i = \ 00366 vec_perm(vg##i, vg##i, perms##i); \ 00367 const vector unsigned char svA##i = \ 00368 vec_sel(vbA##i, vg2##i, mask##i); \ 00369 const vector unsigned char svB##i = \ 00370 vec_sel(vg2##i, vbB##i, mask##i); \ 00371 vec_st(svA##i, i * stride, src2); \ 00372 vec_st(svB##i, i * stride + 16, src2);} 00373 00374 #define PACK_AND_STORE_ALIGNED(i) \ 00375 { const vector unsigned char vf##i = \ 00376 vec_packsu(vr##i, (vector signed short)zero); \ 00377 const vector unsigned char vg##i = \ 00378 vec_perm(vf##i, vbT##i, permHH); \ 00379 vec_st(vg##i, i * stride, src2);} 00380 00381 /* Special-casing the aligned case is worthwhile, as all calls from 00382 * the (transposed) horizontable deblocks will be aligned, in addition 00383 * to the naturally aligned vertical deblocks. */ 00384 if (properStride && srcAlign) { 00385 PACK_AND_STORE_ALIGNED(1) 00386 PACK_AND_STORE_ALIGNED(2) 00387 PACK_AND_STORE_ALIGNED(3) 00388 PACK_AND_STORE_ALIGNED(4) 00389 PACK_AND_STORE_ALIGNED(5) 00390 PACK_AND_STORE_ALIGNED(6) 00391 PACK_AND_STORE_ALIGNED(7) 00392 PACK_AND_STORE_ALIGNED(8) 00393 } else { 00394 PACK_AND_STORE(1) 00395 PACK_AND_STORE(2) 00396 PACK_AND_STORE(3) 00397 PACK_AND_STORE(4) 00398 PACK_AND_STORE(5) 00399 PACK_AND_STORE(6) 00400 PACK_AND_STORE(7) 00401 PACK_AND_STORE(8) 00402 } 00403 #undef PACK_AND_STORE 00404 #undef PACK_AND_STORE_ALIGNED 00405 } 00406 } 00407 00408 00409 00410 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) { 00411 /* 00412 this code makes no assumption on src or stride. 00413 One could remove the recomputation of the perm 00414 vector by assuming (stride % 16) == 0, unfortunately 00415 this is not always true. Quite a lot of load/stores 00416 can be removed by assuming proper alignment of 00417 src & stride :-( 00418 */ 00419 uint8_t *src2 = src + stride*3; 00420 const vector signed int zero = vec_splat_s32(0); 00421 DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP}; 00422 vector signed short vqp = vec_splat( 00423 (vector signed short)vec_ld(0, qp), 0); 00424 00425 #define LOAD_LINE(i) \ 00426 const vector unsigned char perm##i = \ 00427 vec_lvsl(i * stride, src2); \ 00428 const vector unsigned char vbA##i = \ 00429 vec_ld(i * stride, src2); \ 00430 const vector unsigned char vbB##i = \ 00431 vec_ld(i * stride + 16, src2); \ 00432 const vector unsigned char vbT##i = \ 00433 vec_perm(vbA##i, vbB##i, perm##i); \ 00434 const vector signed short vb##i = \ 00435 (vector signed short)vec_mergeh((vector unsigned char)zero, \ 00436 (vector unsigned char)vbT##i) 00437 00438 LOAD_LINE(1); 00439 LOAD_LINE(2); 00440 LOAD_LINE(3); 00441 LOAD_LINE(4); 00442 LOAD_LINE(5); 00443 LOAD_LINE(6); 00444 LOAD_LINE(7); 00445 LOAD_LINE(8); 00446 #undef LOAD_LINE 00447 00448 const vector signed short v_1 = vec_splat_s16(1); 00449 const vector signed short v_2 = vec_splat_s16(2); 00450 const vector signed short v_5 = vec_splat_s16(5); 00451 const vector signed short v_32 = vec_sl(v_1, 00452 (vector unsigned short)v_5); 00453 /* middle energy */ 00454 const vector signed short l3minusl6 = vec_sub(vb3, vb6); 00455 const vector signed short l5minusl4 = vec_sub(vb5, vb4); 00456 const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero); 00457 const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6); 00458 const vector signed short absmE = vec_abs(mE); 00459 /* left & right energy */ 00460 const vector signed short l1minusl4 = vec_sub(vb1, vb4); 00461 const vector signed short l3minusl2 = vec_sub(vb3, vb2); 00462 const vector signed short l5minusl8 = vec_sub(vb5, vb8); 00463 const vector signed short l7minusl6 = vec_sub(vb7, vb6); 00464 const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero); 00465 const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero); 00466 const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4); 00467 const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8); 00468 /* d */ 00469 const vector signed short ddiff = vec_sub(absmE, 00470 vec_min(vec_abs(lE), 00471 vec_abs(rE))); 00472 const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero); 00473 const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32); 00474 const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6)); 00475 const vector signed short minusd = vec_sub((vector signed short)zero, d); 00476 const vector signed short finald = vec_sel(minusd, 00477 d, 00478 vec_cmpgt(vec_sub((vector signed short)zero, mE), 00479 (vector signed short)zero)); 00480 /* q */ 00481 const vector signed short qtimes2 = vec_sub(vb4, vb5); 00482 /* for a shift right to behave like /2, we need to add one 00483 to all negative integer */ 00484 const vector signed short rounddown = vec_sel((vector signed short)zero, 00485 v_1, 00486 vec_cmplt(qtimes2, (vector signed short)zero)); 00487 const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1)); 00488 /* clamp */ 00489 const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald); 00490 const vector signed short dclamp_P = vec_min(dclamp_P1, q); 00491 const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald); 00492 const vector signed short dclamp_N = vec_max(dclamp_N1, q); 00493 00494 const vector signed short dclampedfinal = vec_sel(dclamp_N, 00495 dclamp_P, 00496 vec_cmpgt(q, (vector signed short)zero)); 00497 const vector signed short dornotd = vec_sel((vector signed short)zero, 00498 dclampedfinal, 00499 vec_cmplt(absmE, vqp)); 00500 /* add/subtract to l4 and l5 */ 00501 const vector signed short vb4minusd = vec_sub(vb4, dornotd); 00502 const vector signed short vb5plusd = vec_add(vb5, dornotd); 00503 /* finally, stores */ 00504 const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero); 00505 const vector unsigned char st5 = vec_packsu(vb5plusd, (vector signed short)zero); 00506 00507 const vector signed char neg1 = vec_splat_s8(-1); 00508 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 00509 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; 00510 00511 #define STORE(i) \ 00512 { const vector unsigned char perms##i = \ 00513 vec_lvsr(i * stride, src2); \ 00514 const vector unsigned char vg##i = \ 00515 vec_perm(st##i, vbT##i, permHH); \ 00516 const vector unsigned char mask##i = \ 00517 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \ 00518 const vector unsigned char vg2##i = \ 00519 vec_perm(vg##i, vg##i, perms##i); \ 00520 const vector unsigned char svA##i = \ 00521 vec_sel(vbA##i, vg2##i, mask##i); \ 00522 const vector unsigned char svB##i = \ 00523 vec_sel(vg2##i, vbB##i, mask##i); \ 00524 vec_st(svA##i, i * stride, src2); \ 00525 vec_st(svB##i, i * stride + 16, src2);} 00526 00527 STORE(4) 00528 STORE(5) 00529 } 00530 00531 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) { 00532 /* 00533 this code makes no assumption on src or stride. 00534 One could remove the recomputation of the perm 00535 vector by assuming (stride % 16) == 0, unfortunately 00536 this is not always true. Quite a lot of load/stores 00537 can be removed by assuming proper alignment of 00538 src & stride :-( 00539 */ 00540 uint8_t *srcCopy = src; 00541 DECLARE_ALIGNED(16, uint8_t, dt)[16]; 00542 const vector signed int zero = vec_splat_s32(0); 00543 vector unsigned char v_dt; 00544 dt[0] = deringThreshold; 00545 v_dt = vec_splat(vec_ld(0, dt), 0); 00546 00547 #define LOAD_LINE(i) \ 00548 const vector unsigned char perm##i = \ 00549 vec_lvsl(i * stride, srcCopy); \ 00550 vector unsigned char sA##i = vec_ld(i * stride, srcCopy); \ 00551 vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy); \ 00552 vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i) 00553 00554 LOAD_LINE(0); 00555 LOAD_LINE(1); 00556 LOAD_LINE(2); 00557 LOAD_LINE(3); 00558 LOAD_LINE(4); 00559 LOAD_LINE(5); 00560 LOAD_LINE(6); 00561 LOAD_LINE(7); 00562 LOAD_LINE(8); 00563 LOAD_LINE(9); 00564 #undef LOAD_LINE 00565 00566 vector unsigned char v_avg; 00567 { 00568 const vector unsigned char trunc_perm = (vector unsigned char) 00569 {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 00570 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18}; 00571 const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm); 00572 const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm); 00573 const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm); 00574 const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm); 00575 00576 #define EXTRACT(op) do { \ 00577 const vector unsigned char s##op##_1 = vec_##op(trunc_src12, trunc_src34); \ 00578 const vector unsigned char s##op##_2 = vec_##op(trunc_src56, trunc_src78); \ 00579 const vector unsigned char s##op##_6 = vec_##op(s##op##_1, s##op##_2); \ 00580 const vector unsigned char s##op##_8h = vec_mergeh(s##op##_6, s##op##_6); \ 00581 const vector unsigned char s##op##_8l = vec_mergel(s##op##_6, s##op##_6); \ 00582 const vector unsigned char s##op##_9 = vec_##op(s##op##_8h, s##op##_8l); \ 00583 const vector unsigned char s##op##_9h = vec_mergeh(s##op##_9, s##op##_9); \ 00584 const vector unsigned char s##op##_9l = vec_mergel(s##op##_9, s##op##_9); \ 00585 const vector unsigned char s##op##_10 = vec_##op(s##op##_9h, s##op##_9l); \ 00586 const vector unsigned char s##op##_10h = vec_mergeh(s##op##_10, s##op##_10); \ 00587 const vector unsigned char s##op##_10l = vec_mergel(s##op##_10, s##op##_10); \ 00588 const vector unsigned char s##op##_11 = vec_##op(s##op##_10h, s##op##_10l); \ 00589 const vector unsigned char s##op##_11h = vec_mergeh(s##op##_11, s##op##_11); \ 00590 const vector unsigned char s##op##_11l = vec_mergel(s##op##_11, s##op##_11); \ 00591 v_##op = vec_##op(s##op##_11h, s##op##_11l); } while (0) 00592 00593 vector unsigned char v_min; 00594 vector unsigned char v_max; 00595 EXTRACT(min); 00596 EXTRACT(max); 00597 #undef EXTRACT 00598 00599 if (vec_all_lt(vec_sub(v_max, v_min), v_dt)) 00600 return; 00601 00602 v_avg = vec_avg(v_min, v_max); 00603 } 00604 00605 DECLARE_ALIGNED(16, signed int, S)[8]; 00606 { 00607 const vector unsigned short mask1 = (vector unsigned short) 00608 {0x0001, 0x0002, 0x0004, 0x0008, 00609 0x0010, 0x0020, 0x0040, 0x0080}; 00610 const vector unsigned short mask2 = (vector unsigned short) 00611 {0x0100, 0x0200, 0x0000, 0x0000, 00612 0x0000, 0x0000, 0x0000, 0x0000}; 00613 00614 const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4)); 00615 const vector unsigned int vuint32_1 = vec_splat_u32(1); 00616 00617 #define COMPARE(i) \ 00618 vector signed int sum##i; \ 00619 do { \ 00620 const vector unsigned char cmp##i = \ 00621 (vector unsigned char)vec_cmpgt(src##i, v_avg); \ 00622 const vector unsigned short cmpHi##i = \ 00623 (vector unsigned short)vec_mergeh(cmp##i, cmp##i); \ 00624 const vector unsigned short cmpLi##i = \ 00625 (vector unsigned short)vec_mergel(cmp##i, cmp##i); \ 00626 const vector signed short cmpHf##i = \ 00627 (vector signed short)vec_and(cmpHi##i, mask1); \ 00628 const vector signed short cmpLf##i = \ 00629 (vector signed short)vec_and(cmpLi##i, mask2); \ 00630 const vector signed int sump##i = vec_sum4s(cmpHf##i, zero); \ 00631 const vector signed int sumq##i = vec_sum4s(cmpLf##i, sump##i); \ 00632 sum##i = vec_sums(sumq##i, zero); } while (0) 00633 00634 COMPARE(0); 00635 COMPARE(1); 00636 COMPARE(2); 00637 COMPARE(3); 00638 COMPARE(4); 00639 COMPARE(5); 00640 COMPARE(6); 00641 COMPARE(7); 00642 COMPARE(8); 00643 COMPARE(9); 00644 #undef COMPARE 00645 00646 vector signed int sumA2; 00647 vector signed int sumB2; 00648 { 00649 const vector signed int sump02 = vec_mergel(sum0, sum2); 00650 const vector signed int sump13 = vec_mergel(sum1, sum3); 00651 const vector signed int sumA = vec_mergel(sump02, sump13); 00652 00653 const vector signed int sump46 = vec_mergel(sum4, sum6); 00654 const vector signed int sump57 = vec_mergel(sum5, sum7); 00655 const vector signed int sumB = vec_mergel(sump46, sump57); 00656 00657 const vector signed int sump8A = vec_mergel(sum8, zero); 00658 const vector signed int sump9B = vec_mergel(sum9, zero); 00659 const vector signed int sumC = vec_mergel(sump8A, sump9B); 00660 00661 const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16); 00662 const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16); 00663 const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16); 00664 const vector signed int t2A = vec_or(sumA, tA); 00665 const vector signed int t2B = vec_or(sumB, tB); 00666 const vector signed int t2C = vec_or(sumC, tC); 00667 const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1), 00668 vec_sl(t2A, vuint32_1)); 00669 const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1), 00670 vec_sl(t2B, vuint32_1)); 00671 const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1), 00672 vec_sl(t2C, vuint32_1)); 00673 const vector signed int yA = vec_and(t2A, t3A); 00674 const vector signed int yB = vec_and(t2B, t3B); 00675 const vector signed int yC = vec_and(t2C, t3C); 00676 00677 const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0); 00678 const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0); 00679 const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1); 00680 const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2); 00681 const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1); 00682 const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2); 00683 const vector signed int sumAp = vec_and(yA, 00684 vec_and(sumAd4,sumAd8)); 00685 const vector signed int sumBp = vec_and(yB, 00686 vec_and(sumBd4,sumBd8)); 00687 sumA2 = vec_or(sumAp, 00688 vec_sra(sumAp, 00689 vuint32_16)); 00690 sumB2 = vec_or(sumBp, 00691 vec_sra(sumBp, 00692 vuint32_16)); 00693 } 00694 vec_st(sumA2, 0, S); 00695 vec_st(sumB2, 16, S); 00696 } 00697 00698 /* I'm not sure the following is actually faster 00699 than straight, unvectorized C code :-( */ 00700 00701 DECLARE_ALIGNED(16, int, tQP2)[4]; 00702 tQP2[0]= c->QP/2 + 1; 00703 vector signed int vQP2 = vec_ld(0, tQP2); 00704 vQP2 = vec_splat(vQP2, 0); 00705 const vector signed int vsint32_8 = vec_splat_s32(8); 00706 const vector unsigned int vuint32_4 = vec_splat_u32(4); 00707 00708 const vector unsigned char permA1 = (vector unsigned char) 00709 {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F, 00710 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F}; 00711 const vector unsigned char permA2 = (vector unsigned char) 00712 {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11, 00713 0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F}; 00714 const vector unsigned char permA1inc = (vector unsigned char) 00715 {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 00716 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 00717 const vector unsigned char permA2inc = (vector unsigned char) 00718 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 00719 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 00720 const vector unsigned char magic = (vector unsigned char) 00721 {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02, 00722 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 00723 const vector unsigned char extractPerm = (vector unsigned char) 00724 {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01, 00725 0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01}; 00726 const vector unsigned char extractPermInc = (vector unsigned char) 00727 {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 00728 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01}; 00729 const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0); 00730 const vector unsigned char tenRight = (vector unsigned char) 00731 {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 00732 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; 00733 const vector unsigned char eightLeft = (vector unsigned char) 00734 {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 00735 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08}; 00736 00737 00738 #define F_INIT(i) \ 00739 vector unsigned char tenRightM##i = tenRight; \ 00740 vector unsigned char permA1M##i = permA1; \ 00741 vector unsigned char permA2M##i = permA2; \ 00742 vector unsigned char extractPermM##i = extractPerm 00743 00744 #define F2(i, j, k, l) \ 00745 if (S[i] & (1 << (l+1))) { \ 00746 const vector unsigned char a_##j##_A##l = \ 00747 vec_perm(src##i, src##j, permA1M##i); \ 00748 const vector unsigned char a_##j##_B##l = \ 00749 vec_perm(a_##j##_A##l, src##k, permA2M##i); \ 00750 const vector signed int a_##j##_sump##l = \ 00751 (vector signed int)vec_msum(a_##j##_B##l, magic, \ 00752 (vector unsigned int)zero); \ 00753 vector signed int F_##j##_##l = \ 00754 vec_sr(vec_sums(a_##j##_sump##l, vsint32_8), vuint32_4); \ 00755 F_##j##_##l = vec_splat(F_##j##_##l, 3); \ 00756 const vector signed int p_##j##_##l = \ 00757 (vector signed int)vec_perm(src##j, \ 00758 (vector unsigned char)zero, \ 00759 extractPermM##i); \ 00760 const vector signed int sum_##j##_##l = vec_add( p_##j##_##l, vQP2);\ 00761 const vector signed int diff_##j##_##l = vec_sub( p_##j##_##l, vQP2);\ 00762 vector signed int newpm_##j##_##l; \ 00763 if (vec_all_lt(sum_##j##_##l, F_##j##_##l)) \ 00764 newpm_##j##_##l = sum_##j##_##l; \ 00765 else if (vec_all_gt(diff_##j##_##l, F_##j##_##l)) \ 00766 newpm_##j##_##l = diff_##j##_##l; \ 00767 else newpm_##j##_##l = F_##j##_##l; \ 00768 const vector unsigned char newpm2_##j##_##l = \ 00769 vec_splat((vector unsigned char)newpm_##j##_##l, 15); \ 00770 const vector unsigned char mask##j##l = vec_add(identity, \ 00771 tenRightM##i); \ 00772 src##j = vec_perm(src##j, newpm2_##j##_##l, mask##j##l); \ 00773 } \ 00774 permA1M##i = vec_add(permA1M##i, permA1inc); \ 00775 permA2M##i = vec_add(permA2M##i, permA2inc); \ 00776 tenRightM##i = vec_sro(tenRightM##i, eightLeft); \ 00777 extractPermM##i = vec_add(extractPermM##i, extractPermInc) 00778 00779 #define ITER(i, j, k) \ 00780 F_INIT(i); \ 00781 F2(i, j, k, 0); \ 00782 F2(i, j, k, 1); \ 00783 F2(i, j, k, 2); \ 00784 F2(i, j, k, 3); \ 00785 F2(i, j, k, 4); \ 00786 F2(i, j, k, 5); \ 00787 F2(i, j, k, 6); \ 00788 F2(i, j, k, 7) 00789 00790 ITER(0, 1, 2); 00791 ITER(1, 2, 3); 00792 ITER(2, 3, 4); 00793 ITER(3, 4, 5); 00794 ITER(4, 5, 6); 00795 ITER(5, 6, 7); 00796 ITER(6, 7, 8); 00797 ITER(7, 8, 9); 00798 00799 const vector signed char neg1 = vec_splat_s8(-1); 00800 00801 #define STORE_LINE(i) \ 00802 const vector unsigned char permST##i = \ 00803 vec_lvsr(i * stride, srcCopy); \ 00804 const vector unsigned char maskST##i = \ 00805 vec_perm((vector unsigned char)zero, \ 00806 (vector unsigned char)neg1, permST##i);\ 00807 src##i = vec_perm(src##i ,src##i, permST##i); \ 00808 sA##i= vec_sel(sA##i, src##i, maskST##i); \ 00809 sB##i= vec_sel(src##i, sB##i, maskST##i); \ 00810 vec_st(sA##i, i * stride, srcCopy); \ 00811 vec_st(sB##i, i * stride + 16, srcCopy) 00812 00813 STORE_LINE(1); 00814 STORE_LINE(2); 00815 STORE_LINE(3); 00816 STORE_LINE(4); 00817 STORE_LINE(5); 00818 STORE_LINE(6); 00819 STORE_LINE(7); 00820 STORE_LINE(8); 00821 00822 #undef STORE_LINE 00823 #undef ITER 00824 #undef F2 00825 } 00826 00827 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a) 00828 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a) 00829 #define do_a_deblock_altivec(a...) do_a_deblock_C(a) 00830 00831 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride, 00832 uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise) 00833 { 00834 const vector signed int zero = vec_splat_s32(0); 00835 const vector signed short vsint16_1 = vec_splat_s16(1); 00836 vector signed int v_dp = zero; 00837 vector signed int v_sysdp = zero; 00838 int d, sysd, i; 00839 00840 tempBlurredPast[127]= maxNoise[0]; 00841 tempBlurredPast[128]= maxNoise[1]; 00842 tempBlurredPast[129]= maxNoise[2]; 00843 00844 #define LOAD_LINE(src, i) \ 00845 register int j##src##i = i * stride; \ 00846 vector unsigned char perm##src##i = vec_lvsl(j##src##i, src); \ 00847 const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \ 00848 const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \ 00849 const vector unsigned char v_##src##A##i = \ 00850 vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i); \ 00851 vector signed short v_##src##Ass##i = \ 00852 (vector signed short)vec_mergeh((vector signed char)zero, \ 00853 (vector signed char)v_##src##A##i) 00854 00855 LOAD_LINE(src, 0); 00856 LOAD_LINE(src, 1); 00857 LOAD_LINE(src, 2); 00858 LOAD_LINE(src, 3); 00859 LOAD_LINE(src, 4); 00860 LOAD_LINE(src, 5); 00861 LOAD_LINE(src, 6); 00862 LOAD_LINE(src, 7); 00863 00864 LOAD_LINE(tempBlurred, 0); 00865 LOAD_LINE(tempBlurred, 1); 00866 LOAD_LINE(tempBlurred, 2); 00867 LOAD_LINE(tempBlurred, 3); 00868 LOAD_LINE(tempBlurred, 4); 00869 LOAD_LINE(tempBlurred, 5); 00870 LOAD_LINE(tempBlurred, 6); 00871 LOAD_LINE(tempBlurred, 7); 00872 #undef LOAD_LINE 00873 00874 #define ACCUMULATE_DIFFS(i) \ 00875 vector signed short v_d##i = vec_sub(v_tempBlurredAss##i, \ 00876 v_srcAss##i); \ 00877 v_dp = vec_msums(v_d##i, v_d##i, v_dp); \ 00878 v_sysdp = vec_msums(v_d##i, vsint16_1, v_sysdp) 00879 00880 ACCUMULATE_DIFFS(0); 00881 ACCUMULATE_DIFFS(1); 00882 ACCUMULATE_DIFFS(2); 00883 ACCUMULATE_DIFFS(3); 00884 ACCUMULATE_DIFFS(4); 00885 ACCUMULATE_DIFFS(5); 00886 ACCUMULATE_DIFFS(6); 00887 ACCUMULATE_DIFFS(7); 00888 #undef ACCUMULATE_DIFFS 00889 00890 v_dp = vec_sums(v_dp, zero); 00891 v_sysdp = vec_sums(v_sysdp, zero); 00892 00893 v_dp = vec_splat(v_dp, 3); 00894 v_sysdp = vec_splat(v_sysdp, 3); 00895 00896 vec_ste(v_dp, 0, &d); 00897 vec_ste(v_sysdp, 0, &sysd); 00898 00899 i = d; 00900 d = (4*d 00901 +(*(tempBlurredPast-256)) 00902 +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1)) 00903 +(*(tempBlurredPast+256)) 00904 +4)>>3; 00905 00906 *tempBlurredPast=i; 00907 00908 if (d > maxNoise[1]) { 00909 if (d < maxNoise[2]) { 00910 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i); 00911 00912 OP(0); 00913 OP(1); 00914 OP(2); 00915 OP(3); 00916 OP(4); 00917 OP(5); 00918 OP(6); 00919 OP(7); 00920 #undef OP 00921 } else { 00922 #define OP(i) v_tempBlurredAss##i = v_srcAss##i; 00923 00924 OP(0); 00925 OP(1); 00926 OP(2); 00927 OP(3); 00928 OP(4); 00929 OP(5); 00930 OP(6); 00931 OP(7); 00932 #undef OP 00933 } 00934 } else { 00935 if (d < maxNoise[0]) { 00936 const vector signed short vsint16_7 = vec_splat_s16(7); 00937 const vector signed short vsint16_4 = vec_splat_s16(4); 00938 const vector unsigned short vuint16_3 = vec_splat_u16(3); 00939 00940 #define OP(i) \ 00941 const vector signed short v_temp##i = \ 00942 vec_mladd(v_tempBlurredAss##i, \ 00943 vsint16_7, v_srcAss##i); \ 00944 const vector signed short v_temp2##i = \ 00945 vec_add(v_temp##i, vsint16_4); \ 00946 v_tempBlurredAss##i = vec_sr(v_temp2##i, vuint16_3) 00947 00948 OP(0); 00949 OP(1); 00950 OP(2); 00951 OP(3); 00952 OP(4); 00953 OP(5); 00954 OP(6); 00955 OP(7); 00956 #undef OP 00957 } else { 00958 const vector signed short vsint16_3 = vec_splat_s16(3); 00959 const vector signed short vsint16_2 = vec_splat_s16(2); 00960 00961 #define OP(i) \ 00962 const vector signed short v_temp##i = \ 00963 vec_mladd(v_tempBlurredAss##i, \ 00964 vsint16_3, v_srcAss##i); \ 00965 const vector signed short v_temp2##i = \ 00966 vec_add(v_temp##i, vsint16_2); \ 00967 v_tempBlurredAss##i = vec_sr(v_temp2##i, (vector unsigned short)vsint16_2) 00968 00969 OP(0); 00970 OP(1); 00971 OP(2); 00972 OP(3); 00973 OP(4); 00974 OP(5); 00975 OP(6); 00976 OP(7); 00977 #undef OP 00978 } 00979 } 00980 00981 const vector signed char neg1 = vec_splat_s8(-1); 00982 const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 00983 0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F}; 00984 00985 #define PACK_AND_STORE(src, i) \ 00986 const vector unsigned char perms##src##i = \ 00987 vec_lvsr(i * stride, src); \ 00988 const vector unsigned char vf##src##i = \ 00989 vec_packsu(v_tempBlurredAss##i, (vector signed short)zero); \ 00990 const vector unsigned char vg##src##i = \ 00991 vec_perm(vf##src##i, v_##src##A##i, permHH); \ 00992 const vector unsigned char mask##src##i = \ 00993 vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##src##i); \ 00994 const vector unsigned char vg2##src##i = \ 00995 vec_perm(vg##src##i, vg##src##i, perms##src##i); \ 00996 const vector unsigned char svA##src##i = \ 00997 vec_sel(v_##src##A1##i, vg2##src##i, mask##src##i); \ 00998 const vector unsigned char svB##src##i = \ 00999 vec_sel(vg2##src##i, v_##src##A2##i, mask##src##i); \ 01000 vec_st(svA##src##i, i * stride, src); \ 01001 vec_st(svB##src##i, i * stride + 16, src) 01002 01003 PACK_AND_STORE(src, 0); 01004 PACK_AND_STORE(src, 1); 01005 PACK_AND_STORE(src, 2); 01006 PACK_AND_STORE(src, 3); 01007 PACK_AND_STORE(src, 4); 01008 PACK_AND_STORE(src, 5); 01009 PACK_AND_STORE(src, 6); 01010 PACK_AND_STORE(src, 7); 01011 PACK_AND_STORE(tempBlurred, 0); 01012 PACK_AND_STORE(tempBlurred, 1); 01013 PACK_AND_STORE(tempBlurred, 2); 01014 PACK_AND_STORE(tempBlurred, 3); 01015 PACK_AND_STORE(tempBlurred, 4); 01016 PACK_AND_STORE(tempBlurred, 5); 01017 PACK_AND_STORE(tempBlurred, 6); 01018 PACK_AND_STORE(tempBlurred, 7); 01019 #undef PACK_AND_STORE 01020 } 01021 01022 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { 01023 const vector unsigned char zero = vec_splat_u8(0); 01024 01025 #define LOAD_DOUBLE_LINE(i, j) \ 01026 vector unsigned char perm1##i = vec_lvsl(i * stride, src); \ 01027 vector unsigned char perm2##i = vec_lvsl(j * stride, src); \ 01028 vector unsigned char srcA##i = vec_ld(i * stride, src); \ 01029 vector unsigned char srcB##i = vec_ld(i * stride + 16, src); \ 01030 vector unsigned char srcC##i = vec_ld(j * stride, src); \ 01031 vector unsigned char srcD##i = vec_ld(j * stride+ 16, src); \ 01032 vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \ 01033 vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i) 01034 01035 LOAD_DOUBLE_LINE(0, 1); 01036 LOAD_DOUBLE_LINE(2, 3); 01037 LOAD_DOUBLE_LINE(4, 5); 01038 LOAD_DOUBLE_LINE(6, 7); 01039 #undef LOAD_DOUBLE_LINE 01040 01041 vector unsigned char tempA = vec_mergeh(src0, zero); 01042 vector unsigned char tempB = vec_mergel(src0, zero); 01043 vector unsigned char tempC = vec_mergeh(src1, zero); 01044 vector unsigned char tempD = vec_mergel(src1, zero); 01045 vector unsigned char tempE = vec_mergeh(src2, zero); 01046 vector unsigned char tempF = vec_mergel(src2, zero); 01047 vector unsigned char tempG = vec_mergeh(src3, zero); 01048 vector unsigned char tempH = vec_mergel(src3, zero); 01049 vector unsigned char tempI = vec_mergeh(src4, zero); 01050 vector unsigned char tempJ = vec_mergel(src4, zero); 01051 vector unsigned char tempK = vec_mergeh(src5, zero); 01052 vector unsigned char tempL = vec_mergel(src5, zero); 01053 vector unsigned char tempM = vec_mergeh(src6, zero); 01054 vector unsigned char tempN = vec_mergel(src6, zero); 01055 vector unsigned char tempO = vec_mergeh(src7, zero); 01056 vector unsigned char tempP = vec_mergel(src7, zero); 01057 01058 vector unsigned char temp0 = vec_mergeh(tempA, tempI); 01059 vector unsigned char temp1 = vec_mergel(tempA, tempI); 01060 vector unsigned char temp2 = vec_mergeh(tempB, tempJ); 01061 vector unsigned char temp3 = vec_mergel(tempB, tempJ); 01062 vector unsigned char temp4 = vec_mergeh(tempC, tempK); 01063 vector unsigned char temp5 = vec_mergel(tempC, tempK); 01064 vector unsigned char temp6 = vec_mergeh(tempD, tempL); 01065 vector unsigned char temp7 = vec_mergel(tempD, tempL); 01066 vector unsigned char temp8 = vec_mergeh(tempE, tempM); 01067 vector unsigned char temp9 = vec_mergel(tempE, tempM); 01068 vector unsigned char temp10 = vec_mergeh(tempF, tempN); 01069 vector unsigned char temp11 = vec_mergel(tempF, tempN); 01070 vector unsigned char temp12 = vec_mergeh(tempG, tempO); 01071 vector unsigned char temp13 = vec_mergel(tempG, tempO); 01072 vector unsigned char temp14 = vec_mergeh(tempH, tempP); 01073 vector unsigned char temp15 = vec_mergel(tempH, tempP); 01074 01075 tempA = vec_mergeh(temp0, temp8); 01076 tempB = vec_mergel(temp0, temp8); 01077 tempC = vec_mergeh(temp1, temp9); 01078 tempD = vec_mergel(temp1, temp9); 01079 tempE = vec_mergeh(temp2, temp10); 01080 tempF = vec_mergel(temp2, temp10); 01081 tempG = vec_mergeh(temp3, temp11); 01082 tempH = vec_mergel(temp3, temp11); 01083 tempI = vec_mergeh(temp4, temp12); 01084 tempJ = vec_mergel(temp4, temp12); 01085 tempK = vec_mergeh(temp5, temp13); 01086 tempL = vec_mergel(temp5, temp13); 01087 tempM = vec_mergeh(temp6, temp14); 01088 tempN = vec_mergel(temp6, temp14); 01089 tempO = vec_mergeh(temp7, temp15); 01090 tempP = vec_mergel(temp7, temp15); 01091 01092 temp0 = vec_mergeh(tempA, tempI); 01093 temp1 = vec_mergel(tempA, tempI); 01094 temp2 = vec_mergeh(tempB, tempJ); 01095 temp3 = vec_mergel(tempB, tempJ); 01096 temp4 = vec_mergeh(tempC, tempK); 01097 temp5 = vec_mergel(tempC, tempK); 01098 temp6 = vec_mergeh(tempD, tempL); 01099 temp7 = vec_mergel(tempD, tempL); 01100 temp8 = vec_mergeh(tempE, tempM); 01101 temp9 = vec_mergel(tempE, tempM); 01102 temp10 = vec_mergeh(tempF, tempN); 01103 temp11 = vec_mergel(tempF, tempN); 01104 temp12 = vec_mergeh(tempG, tempO); 01105 temp13 = vec_mergel(tempG, tempO); 01106 temp14 = vec_mergeh(tempH, tempP); 01107 temp15 = vec_mergel(tempH, tempP); 01108 01109 vec_st(temp0, 0, dst); 01110 vec_st(temp1, 16, dst); 01111 vec_st(temp2, 32, dst); 01112 vec_st(temp3, 48, dst); 01113 vec_st(temp4, 64, dst); 01114 vec_st(temp5, 80, dst); 01115 vec_st(temp6, 96, dst); 01116 vec_st(temp7, 112, dst); 01117 vec_st(temp8, 128, dst); 01118 vec_st(temp9, 144, dst); 01119 vec_st(temp10, 160, dst); 01120 vec_st(temp11, 176, dst); 01121 vec_st(temp12, 192, dst); 01122 vec_st(temp13, 208, dst); 01123 vec_st(temp14, 224, dst); 01124 vec_st(temp15, 240, dst); 01125 } 01126 01127 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) { 01128 const vector unsigned char zero = vec_splat_u8(0); 01129 01130 #define LOAD_DOUBLE_LINE(i, j) \ 01131 vector unsigned char src##i = vec_ld(i * 16, src); \ 01132 vector unsigned char src##j = vec_ld(j * 16, src) 01133 01134 LOAD_DOUBLE_LINE(0, 1); 01135 LOAD_DOUBLE_LINE(2, 3); 01136 LOAD_DOUBLE_LINE(4, 5); 01137 LOAD_DOUBLE_LINE(6, 7); 01138 LOAD_DOUBLE_LINE(8, 9); 01139 LOAD_DOUBLE_LINE(10, 11); 01140 LOAD_DOUBLE_LINE(12, 13); 01141 LOAD_DOUBLE_LINE(14, 15); 01142 #undef LOAD_DOUBLE_LINE 01143 01144 vector unsigned char tempA = vec_mergeh(src0, src8); 01145 vector unsigned char tempB; 01146 vector unsigned char tempC = vec_mergeh(src1, src9); 01147 vector unsigned char tempD; 01148 vector unsigned char tempE = vec_mergeh(src2, src10); 01149 vector unsigned char tempG = vec_mergeh(src3, src11); 01150 vector unsigned char tempI = vec_mergeh(src4, src12); 01151 vector unsigned char tempJ; 01152 vector unsigned char tempK = vec_mergeh(src5, src13); 01153 vector unsigned char tempL; 01154 vector unsigned char tempM = vec_mergeh(src6, src14); 01155 vector unsigned char tempO = vec_mergeh(src7, src15); 01156 01157 vector unsigned char temp0 = vec_mergeh(tempA, tempI); 01158 vector unsigned char temp1 = vec_mergel(tempA, tempI); 01159 vector unsigned char temp2; 01160 vector unsigned char temp3; 01161 vector unsigned char temp4 = vec_mergeh(tempC, tempK); 01162 vector unsigned char temp5 = vec_mergel(tempC, tempK); 01163 vector unsigned char temp6; 01164 vector unsigned char temp7; 01165 vector unsigned char temp8 = vec_mergeh(tempE, tempM); 01166 vector unsigned char temp9 = vec_mergel(tempE, tempM); 01167 vector unsigned char temp12 = vec_mergeh(tempG, tempO); 01168 vector unsigned char temp13 = vec_mergel(tempG, tempO); 01169 01170 tempA = vec_mergeh(temp0, temp8); 01171 tempB = vec_mergel(temp0, temp8); 01172 tempC = vec_mergeh(temp1, temp9); 01173 tempD = vec_mergel(temp1, temp9); 01174 tempI = vec_mergeh(temp4, temp12); 01175 tempJ = vec_mergel(temp4, temp12); 01176 tempK = vec_mergeh(temp5, temp13); 01177 tempL = vec_mergel(temp5, temp13); 01178 01179 temp0 = vec_mergeh(tempA, tempI); 01180 temp1 = vec_mergel(tempA, tempI); 01181 temp2 = vec_mergeh(tempB, tempJ); 01182 temp3 = vec_mergel(tempB, tempJ); 01183 temp4 = vec_mergeh(tempC, tempK); 01184 temp5 = vec_mergel(tempC, tempK); 01185 temp6 = vec_mergeh(tempD, tempL); 01186 temp7 = vec_mergel(tempD, tempL); 01187 01188 01189 const vector signed char neg1 = vec_splat_s8(-1); 01190 #define STORE_DOUBLE_LINE(i, j) \ 01191 vector unsigned char dstA##i = vec_ld(i * stride, dst); \ 01192 vector unsigned char dstB##i = vec_ld(i * stride + 16, dst); \ 01193 vector unsigned char dstA##j = vec_ld(j * stride, dst); \ 01194 vector unsigned char dstB##j = vec_ld(j * stride+ 16, dst); \ 01195 vector unsigned char align##i = vec_lvsr(i * stride, dst); \ 01196 vector unsigned char align##j = vec_lvsr(j * stride, dst); \ 01197 vector unsigned char mask##i = vec_perm(zero, (vector unsigned char)neg1, align##i); \ 01198 vector unsigned char mask##j = vec_perm(zero, (vector unsigned char)neg1, align##j); \ 01199 vector unsigned char dstR##i = vec_perm(temp##i, temp##i, align##i);\ 01200 vector unsigned char dstR##j = vec_perm(temp##j, temp##j, align##j);\ 01201 vector unsigned char dstAF##i = vec_sel(dstA##i, dstR##i, mask##i); \ 01202 vector unsigned char dstBF##i = vec_sel(dstR##i, dstB##i, mask##i); \ 01203 vector unsigned char dstAF##j = vec_sel(dstA##j, dstR##j, mask##j); \ 01204 vector unsigned char dstBF##j = vec_sel(dstR##j, dstB##j, mask##j); \ 01205 vec_st(dstAF##i, i * stride, dst); \ 01206 vec_st(dstBF##i, i * stride + 16, dst); \ 01207 vec_st(dstAF##j, j * stride, dst); \ 01208 vec_st(dstBF##j, j * stride + 16, dst) 01209 01210 STORE_DOUBLE_LINE(0,1); 01211 STORE_DOUBLE_LINE(2,3); 01212 STORE_DOUBLE_LINE(4,5); 01213 STORE_DOUBLE_LINE(6,7); 01214 }