Libav 0.7.1
|
00001 /* 00002 * MMX and SSE2 optimized snow DSP utils 00003 * Copyright (c) 2005-2006 Robert Edele <yartrebo@earthlink.net> 00004 * 00005 * This file is part of Libav. 00006 * 00007 * Libav is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * Libav is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with Libav; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 #include "libavutil/cpu.h" 00023 #include "libavutil/x86_cpu.h" 00024 #include "libavcodec/avcodec.h" 00025 #include "libavcodec/snow.h" 00026 #include "libavcodec/dwt.h" 00027 #include "dsputil_mmx.h" 00028 00029 static void ff_snow_horizontal_compose97i_sse2(IDWTELEM *b, int width){ 00030 const int w2= (width+1)>>1; 00031 DECLARE_ALIGNED(16, IDWTELEM, temp)[width>>1]; 00032 const int w_l= (width>>1); 00033 const int w_r= w2 - 1; 00034 int i; 00035 00036 { // Lift 0 00037 IDWTELEM * const ref = b + w2 - 1; 00038 IDWTELEM b_0 = b[0]; //By allowing the first entry in b[0] to be calculated twice 00039 // (the first time erroneously), we allow the SSE2 code to run an extra pass. 00040 // The savings in code and time are well worth having to store this value and 00041 // calculate b[0] correctly afterwards. 00042 00043 i = 0; 00044 __asm__ volatile( 00045 "pcmpeqd %%xmm7, %%xmm7 \n\t" 00046 "pcmpeqd %%xmm3, %%xmm3 \n\t" 00047 "psllw $1, %%xmm3 \n\t" 00048 "paddw %%xmm7, %%xmm3 \n\t" 00049 "psllw $13, %%xmm3 \n\t" 00050 ::); 00051 for(; i<w_l-15; i+=16){ 00052 __asm__ volatile( 00053 "movdqu (%1), %%xmm1 \n\t" 00054 "movdqu 16(%1), %%xmm5 \n\t" 00055 "movdqu 2(%1), %%xmm2 \n\t" 00056 "movdqu 18(%1), %%xmm6 \n\t" 00057 "paddw %%xmm1, %%xmm2 \n\t" 00058 "paddw %%xmm5, %%xmm6 \n\t" 00059 "paddw %%xmm7, %%xmm2 \n\t" 00060 "paddw %%xmm7, %%xmm6 \n\t" 00061 "pmulhw %%xmm3, %%xmm2 \n\t" 00062 "pmulhw %%xmm3, %%xmm6 \n\t" 00063 "paddw (%0), %%xmm2 \n\t" 00064 "paddw 16(%0), %%xmm6 \n\t" 00065 "movdqa %%xmm2, (%0) \n\t" 00066 "movdqa %%xmm6, 16(%0) \n\t" 00067 :: "r"(&b[i]), "r"(&ref[i]) 00068 : "memory" 00069 ); 00070 } 00071 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); 00072 b[0] = b_0 - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); 00073 } 00074 00075 { // Lift 1 00076 IDWTELEM * const dst = b+w2; 00077 00078 i = 0; 00079 for(; (((x86_reg)&dst[i]) & 0x1F) && i<w_r; i++){ 00080 dst[i] = dst[i] - (b[i] + b[i + 1]); 00081 } 00082 for(; i<w_r-15; i+=16){ 00083 __asm__ volatile( 00084 "movdqu (%1), %%xmm1 \n\t" 00085 "movdqu 16(%1), %%xmm5 \n\t" 00086 "movdqu 2(%1), %%xmm2 \n\t" 00087 "movdqu 18(%1), %%xmm6 \n\t" 00088 "paddw %%xmm1, %%xmm2 \n\t" 00089 "paddw %%xmm5, %%xmm6 \n\t" 00090 "movdqa (%0), %%xmm0 \n\t" 00091 "movdqa 16(%0), %%xmm4 \n\t" 00092 "psubw %%xmm2, %%xmm0 \n\t" 00093 "psubw %%xmm6, %%xmm4 \n\t" 00094 "movdqa %%xmm0, (%0) \n\t" 00095 "movdqa %%xmm4, 16(%0) \n\t" 00096 :: "r"(&dst[i]), "r"(&b[i]) 00097 : "memory" 00098 ); 00099 } 00100 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); 00101 } 00102 00103 { // Lift 2 00104 IDWTELEM * const ref = b+w2 - 1; 00105 IDWTELEM b_0 = b[0]; 00106 00107 i = 0; 00108 __asm__ volatile( 00109 "psllw $15, %%xmm7 \n\t" 00110 "pcmpeqw %%xmm6, %%xmm6 \n\t" 00111 "psrlw $13, %%xmm6 \n\t" 00112 "paddw %%xmm7, %%xmm6 \n\t" 00113 ::); 00114 for(; i<w_l-15; i+=16){ 00115 __asm__ volatile( 00116 "movdqu (%1), %%xmm0 \n\t" 00117 "movdqu 16(%1), %%xmm4 \n\t" 00118 "movdqu 2(%1), %%xmm1 \n\t" 00119 "movdqu 18(%1), %%xmm5 \n\t" //FIXME try aligned reads and shifts 00120 "paddw %%xmm6, %%xmm0 \n\t" 00121 "paddw %%xmm6, %%xmm4 \n\t" 00122 "paddw %%xmm7, %%xmm1 \n\t" 00123 "paddw %%xmm7, %%xmm5 \n\t" 00124 "pavgw %%xmm1, %%xmm0 \n\t" 00125 "pavgw %%xmm5, %%xmm4 \n\t" 00126 "psubw %%xmm7, %%xmm0 \n\t" 00127 "psubw %%xmm7, %%xmm4 \n\t" 00128 "psraw $1, %%xmm0 \n\t" 00129 "psraw $1, %%xmm4 \n\t" 00130 "movdqa (%0), %%xmm1 \n\t" 00131 "movdqa 16(%0), %%xmm5 \n\t" 00132 "paddw %%xmm1, %%xmm0 \n\t" 00133 "paddw %%xmm5, %%xmm4 \n\t" 00134 "psraw $2, %%xmm0 \n\t" 00135 "psraw $2, %%xmm4 \n\t" 00136 "paddw %%xmm1, %%xmm0 \n\t" 00137 "paddw %%xmm5, %%xmm4 \n\t" 00138 "movdqa %%xmm0, (%0) \n\t" 00139 "movdqa %%xmm4, 16(%0) \n\t" 00140 :: "r"(&b[i]), "r"(&ref[i]) 00141 : "memory" 00142 ); 00143 } 00144 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); 00145 b[0] = b_0 + ((2 * ref[1] + W_BO-1 + 4 * b_0) >> W_BS); 00146 } 00147 00148 { // Lift 3 00149 IDWTELEM * const src = b+w2; 00150 00151 i = 0; 00152 for(; (((x86_reg)&temp[i]) & 0x1F) && i<w_r; i++){ 00153 temp[i] = src[i] - ((-W_AM*(b[i] + b[i+1]))>>W_AS); 00154 } 00155 for(; i<w_r-7; i+=8){ 00156 __asm__ volatile( 00157 "movdqu 2(%1), %%xmm2 \n\t" 00158 "movdqu 18(%1), %%xmm6 \n\t" 00159 "paddw (%1), %%xmm2 \n\t" 00160 "paddw 16(%1), %%xmm6 \n\t" 00161 "movdqu (%0), %%xmm0 \n\t" 00162 "movdqu 16(%0), %%xmm4 \n\t" 00163 "paddw %%xmm2, %%xmm0 \n\t" 00164 "paddw %%xmm6, %%xmm4 \n\t" 00165 "psraw $1, %%xmm2 \n\t" 00166 "psraw $1, %%xmm6 \n\t" 00167 "paddw %%xmm0, %%xmm2 \n\t" 00168 "paddw %%xmm4, %%xmm6 \n\t" 00169 "movdqa %%xmm2, (%2) \n\t" 00170 "movdqa %%xmm6, 16(%2) \n\t" 00171 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) 00172 : "memory" 00173 ); 00174 } 00175 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); 00176 } 00177 00178 { 00179 snow_interleave_line_header(&i, width, b, temp); 00180 00181 for (; (i & 0x3E) != 0x3E; i-=2){ 00182 b[i+1] = temp[i>>1]; 00183 b[i] = b[i>>1]; 00184 } 00185 for (i-=62; i>=0; i-=64){ 00186 __asm__ volatile( 00187 "movdqa (%1), %%xmm0 \n\t" 00188 "movdqa 16(%1), %%xmm2 \n\t" 00189 "movdqa 32(%1), %%xmm4 \n\t" 00190 "movdqa 48(%1), %%xmm6 \n\t" 00191 "movdqa (%1), %%xmm1 \n\t" 00192 "movdqa 16(%1), %%xmm3 \n\t" 00193 "movdqa 32(%1), %%xmm5 \n\t" 00194 "movdqa 48(%1), %%xmm7 \n\t" 00195 "punpcklwd (%2), %%xmm0 \n\t" 00196 "punpcklwd 16(%2), %%xmm2 \n\t" 00197 "punpcklwd 32(%2), %%xmm4 \n\t" 00198 "punpcklwd 48(%2), %%xmm6 \n\t" 00199 "movdqa %%xmm0, (%0) \n\t" 00200 "movdqa %%xmm2, 32(%0) \n\t" 00201 "movdqa %%xmm4, 64(%0) \n\t" 00202 "movdqa %%xmm6, 96(%0) \n\t" 00203 "punpckhwd (%2), %%xmm1 \n\t" 00204 "punpckhwd 16(%2), %%xmm3 \n\t" 00205 "punpckhwd 32(%2), %%xmm5 \n\t" 00206 "punpckhwd 48(%2), %%xmm7 \n\t" 00207 "movdqa %%xmm1, 16(%0) \n\t" 00208 "movdqa %%xmm3, 48(%0) \n\t" 00209 "movdqa %%xmm5, 80(%0) \n\t" 00210 "movdqa %%xmm7, 112(%0) \n\t" 00211 :: "r"(&(b)[i]), "r"(&(b)[i>>1]), "r"(&(temp)[i>>1]) 00212 : "memory" 00213 ); 00214 } 00215 } 00216 } 00217 00218 static void ff_snow_horizontal_compose97i_mmx(IDWTELEM *b, int width){ 00219 const int w2= (width+1)>>1; 00220 IDWTELEM temp[width >> 1]; 00221 const int w_l= (width>>1); 00222 const int w_r= w2 - 1; 00223 int i; 00224 00225 { // Lift 0 00226 IDWTELEM * const ref = b + w2 - 1; 00227 00228 i = 1; 00229 b[0] = b[0] - ((W_DM * 2 * ref[1]+W_DO)>>W_DS); 00230 __asm__ volatile( 00231 "pcmpeqw %%mm7, %%mm7 \n\t" 00232 "pcmpeqw %%mm3, %%mm3 \n\t" 00233 "psllw $1, %%mm3 \n\t" 00234 "paddw %%mm7, %%mm3 \n\t" 00235 "psllw $13, %%mm3 \n\t" 00236 ::); 00237 for(; i<w_l-7; i+=8){ 00238 __asm__ volatile( 00239 "movq (%1), %%mm2 \n\t" 00240 "movq 8(%1), %%mm6 \n\t" 00241 "paddw 2(%1), %%mm2 \n\t" 00242 "paddw 10(%1), %%mm6 \n\t" 00243 "paddw %%mm7, %%mm2 \n\t" 00244 "paddw %%mm7, %%mm6 \n\t" 00245 "pmulhw %%mm3, %%mm2 \n\t" 00246 "pmulhw %%mm3, %%mm6 \n\t" 00247 "paddw (%0), %%mm2 \n\t" 00248 "paddw 8(%0), %%mm6 \n\t" 00249 "movq %%mm2, (%0) \n\t" 00250 "movq %%mm6, 8(%0) \n\t" 00251 :: "r"(&b[i]), "r"(&ref[i]) 00252 : "memory" 00253 ); 00254 } 00255 snow_horizontal_compose_lift_lead_out(i, b, b, ref, width, w_l, 0, W_DM, W_DO, W_DS); 00256 } 00257 00258 { // Lift 1 00259 IDWTELEM * const dst = b+w2; 00260 00261 i = 0; 00262 for(; i<w_r-7; i+=8){ 00263 __asm__ volatile( 00264 "movq (%1), %%mm2 \n\t" 00265 "movq 8(%1), %%mm6 \n\t" 00266 "paddw 2(%1), %%mm2 \n\t" 00267 "paddw 10(%1), %%mm6 \n\t" 00268 "movq (%0), %%mm0 \n\t" 00269 "movq 8(%0), %%mm4 \n\t" 00270 "psubw %%mm2, %%mm0 \n\t" 00271 "psubw %%mm6, %%mm4 \n\t" 00272 "movq %%mm0, (%0) \n\t" 00273 "movq %%mm4, 8(%0) \n\t" 00274 :: "r"(&dst[i]), "r"(&b[i]) 00275 : "memory" 00276 ); 00277 } 00278 snow_horizontal_compose_lift_lead_out(i, dst, dst, b, width, w_r, 1, W_CM, W_CO, W_CS); 00279 } 00280 00281 { // Lift 2 00282 IDWTELEM * const ref = b+w2 - 1; 00283 00284 i = 1; 00285 b[0] = b[0] + (((2 * ref[1] + W_BO) + 4 * b[0]) >> W_BS); 00286 __asm__ volatile( 00287 "psllw $15, %%mm7 \n\t" 00288 "pcmpeqw %%mm6, %%mm6 \n\t" 00289 "psrlw $13, %%mm6 \n\t" 00290 "paddw %%mm7, %%mm6 \n\t" 00291 ::); 00292 for(; i<w_l-7; i+=8){ 00293 __asm__ volatile( 00294 "movq (%1), %%mm0 \n\t" 00295 "movq 8(%1), %%mm4 \n\t" 00296 "movq 2(%1), %%mm1 \n\t" 00297 "movq 10(%1), %%mm5 \n\t" 00298 "paddw %%mm6, %%mm0 \n\t" 00299 "paddw %%mm6, %%mm4 \n\t" 00300 "paddw %%mm7, %%mm1 \n\t" 00301 "paddw %%mm7, %%mm5 \n\t" 00302 "pavgw %%mm1, %%mm0 \n\t" 00303 "pavgw %%mm5, %%mm4 \n\t" 00304 "psubw %%mm7, %%mm0 \n\t" 00305 "psubw %%mm7, %%mm4 \n\t" 00306 "psraw $1, %%mm0 \n\t" 00307 "psraw $1, %%mm4 \n\t" 00308 "movq (%0), %%mm1 \n\t" 00309 "movq 8(%0), %%mm5 \n\t" 00310 "paddw %%mm1, %%mm0 \n\t" 00311 "paddw %%mm5, %%mm4 \n\t" 00312 "psraw $2, %%mm0 \n\t" 00313 "psraw $2, %%mm4 \n\t" 00314 "paddw %%mm1, %%mm0 \n\t" 00315 "paddw %%mm5, %%mm4 \n\t" 00316 "movq %%mm0, (%0) \n\t" 00317 "movq %%mm4, 8(%0) \n\t" 00318 :: "r"(&b[i]), "r"(&ref[i]) 00319 : "memory" 00320 ); 00321 } 00322 snow_horizontal_compose_liftS_lead_out(i, b, b, ref, width, w_l); 00323 } 00324 00325 { // Lift 3 00326 IDWTELEM * const src = b+w2; 00327 i = 0; 00328 00329 for(; i<w_r-7; i+=8){ 00330 __asm__ volatile( 00331 "movq 2(%1), %%mm2 \n\t" 00332 "movq 10(%1), %%mm6 \n\t" 00333 "paddw (%1), %%mm2 \n\t" 00334 "paddw 8(%1), %%mm6 \n\t" 00335 "movq (%0), %%mm0 \n\t" 00336 "movq 8(%0), %%mm4 \n\t" 00337 "paddw %%mm2, %%mm0 \n\t" 00338 "paddw %%mm6, %%mm4 \n\t" 00339 "psraw $1, %%mm2 \n\t" 00340 "psraw $1, %%mm6 \n\t" 00341 "paddw %%mm0, %%mm2 \n\t" 00342 "paddw %%mm4, %%mm6 \n\t" 00343 "movq %%mm2, (%2) \n\t" 00344 "movq %%mm6, 8(%2) \n\t" 00345 :: "r"(&src[i]), "r"(&b[i]), "r"(&temp[i]) 00346 : "memory" 00347 ); 00348 } 00349 snow_horizontal_compose_lift_lead_out(i, temp, src, b, width, w_r, 1, -W_AM, W_AO+1, W_AS); 00350 } 00351 00352 { 00353 snow_interleave_line_header(&i, width, b, temp); 00354 00355 for (; (i & 0x1E) != 0x1E; i-=2){ 00356 b[i+1] = temp[i>>1]; 00357 b[i] = b[i>>1]; 00358 } 00359 for (i-=30; i>=0; i-=32){ 00360 __asm__ volatile( 00361 "movq (%1), %%mm0 \n\t" 00362 "movq 8(%1), %%mm2 \n\t" 00363 "movq 16(%1), %%mm4 \n\t" 00364 "movq 24(%1), %%mm6 \n\t" 00365 "movq (%1), %%mm1 \n\t" 00366 "movq 8(%1), %%mm3 \n\t" 00367 "movq 16(%1), %%mm5 \n\t" 00368 "movq 24(%1), %%mm7 \n\t" 00369 "punpcklwd (%2), %%mm0 \n\t" 00370 "punpcklwd 8(%2), %%mm2 \n\t" 00371 "punpcklwd 16(%2), %%mm4 \n\t" 00372 "punpcklwd 24(%2), %%mm6 \n\t" 00373 "movq %%mm0, (%0) \n\t" 00374 "movq %%mm2, 16(%0) \n\t" 00375 "movq %%mm4, 32(%0) \n\t" 00376 "movq %%mm6, 48(%0) \n\t" 00377 "punpckhwd (%2), %%mm1 \n\t" 00378 "punpckhwd 8(%2), %%mm3 \n\t" 00379 "punpckhwd 16(%2), %%mm5 \n\t" 00380 "punpckhwd 24(%2), %%mm7 \n\t" 00381 "movq %%mm1, 8(%0) \n\t" 00382 "movq %%mm3, 24(%0) \n\t" 00383 "movq %%mm5, 40(%0) \n\t" 00384 "movq %%mm7, 56(%0) \n\t" 00385 :: "r"(&b[i]), "r"(&b[i>>1]), "r"(&temp[i>>1]) 00386 : "memory" 00387 ); 00388 } 00389 } 00390 } 00391 00392 #if HAVE_7REGS 00393 #define snow_vertical_compose_sse2_load_add(op,r,t0,t1,t2,t3)\ 00394 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\ 00395 ""op" 16("r",%%"REG_d"), %%"t1" \n\t"\ 00396 ""op" 32("r",%%"REG_d"), %%"t2" \n\t"\ 00397 ""op" 48("r",%%"REG_d"), %%"t3" \n\t" 00398 00399 #define snow_vertical_compose_sse2_load(r,t0,t1,t2,t3)\ 00400 snow_vertical_compose_sse2_load_add("movdqa",r,t0,t1,t2,t3) 00401 00402 #define snow_vertical_compose_sse2_add(r,t0,t1,t2,t3)\ 00403 snow_vertical_compose_sse2_load_add("paddw",r,t0,t1,t2,t3) 00404 00405 #define snow_vertical_compose_r2r_sub(s0,s1,s2,s3,t0,t1,t2,t3)\ 00406 "psubw %%"s0", %%"t0" \n\t"\ 00407 "psubw %%"s1", %%"t1" \n\t"\ 00408 "psubw %%"s2", %%"t2" \n\t"\ 00409 "psubw %%"s3", %%"t3" \n\t" 00410 00411 #define snow_vertical_compose_sse2_store(w,s0,s1,s2,s3)\ 00412 "movdqa %%"s0", ("w",%%"REG_d") \n\t"\ 00413 "movdqa %%"s1", 16("w",%%"REG_d") \n\t"\ 00414 "movdqa %%"s2", 32("w",%%"REG_d") \n\t"\ 00415 "movdqa %%"s3", 48("w",%%"REG_d") \n\t" 00416 00417 #define snow_vertical_compose_sra(n,t0,t1,t2,t3)\ 00418 "psraw $"n", %%"t0" \n\t"\ 00419 "psraw $"n", %%"t1" \n\t"\ 00420 "psraw $"n", %%"t2" \n\t"\ 00421 "psraw $"n", %%"t3" \n\t" 00422 00423 #define snow_vertical_compose_r2r_add(s0,s1,s2,s3,t0,t1,t2,t3)\ 00424 "paddw %%"s0", %%"t0" \n\t"\ 00425 "paddw %%"s1", %%"t1" \n\t"\ 00426 "paddw %%"s2", %%"t2" \n\t"\ 00427 "paddw %%"s3", %%"t3" \n\t" 00428 00429 #define snow_vertical_compose_r2r_pmulhw(s0,s1,s2,s3,t0,t1,t2,t3)\ 00430 "pmulhw %%"s0", %%"t0" \n\t"\ 00431 "pmulhw %%"s1", %%"t1" \n\t"\ 00432 "pmulhw %%"s2", %%"t2" \n\t"\ 00433 "pmulhw %%"s3", %%"t3" \n\t" 00434 00435 #define snow_vertical_compose_sse2_move(s0,s1,s2,s3,t0,t1,t2,t3)\ 00436 "movdqa %%"s0", %%"t0" \n\t"\ 00437 "movdqa %%"s1", %%"t1" \n\t"\ 00438 "movdqa %%"s2", %%"t2" \n\t"\ 00439 "movdqa %%"s3", %%"t3" \n\t" 00440 00441 static void ff_snow_vertical_compose97i_sse2(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ 00442 x86_reg i = width; 00443 00444 while(i & 0x1F) 00445 { 00446 i--; 00447 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; 00448 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; 00449 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; 00450 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; 00451 } 00452 i+=i; 00453 00454 __asm__ volatile ( 00455 "jmp 2f \n\t" 00456 "1: \n\t" 00457 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6") 00458 snow_vertical_compose_sse2_add("%6","xmm0","xmm2","xmm4","xmm6") 00459 00460 00461 "pcmpeqw %%xmm0, %%xmm0 \n\t" 00462 "pcmpeqw %%xmm2, %%xmm2 \n\t" 00463 "paddw %%xmm2, %%xmm2 \n\t" 00464 "paddw %%xmm0, %%xmm2 \n\t" 00465 "psllw $13, %%xmm2 \n\t" 00466 snow_vertical_compose_r2r_add("xmm0","xmm0","xmm0","xmm0","xmm1","xmm3","xmm5","xmm7") 00467 snow_vertical_compose_r2r_pmulhw("xmm2","xmm2","xmm2","xmm2","xmm1","xmm3","xmm5","xmm7") 00468 snow_vertical_compose_sse2_add("%5","xmm1","xmm3","xmm5","xmm7") 00469 snow_vertical_compose_sse2_store("%5","xmm1","xmm3","xmm5","xmm7") 00470 snow_vertical_compose_sse2_load("%4","xmm0","xmm2","xmm4","xmm6") 00471 snow_vertical_compose_sse2_add("%3","xmm1","xmm3","xmm5","xmm7") 00472 snow_vertical_compose_r2r_sub("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") 00473 snow_vertical_compose_sse2_store("%4","xmm0","xmm2","xmm4","xmm6") 00474 00475 "pcmpeqw %%xmm7, %%xmm7 \n\t" 00476 "pcmpeqw %%xmm5, %%xmm5 \n\t" 00477 "psllw $15, %%xmm7 \n\t" 00478 "psrlw $13, %%xmm5 \n\t" 00479 "paddw %%xmm7, %%xmm5 \n\t" 00480 snow_vertical_compose_r2r_add("xmm5","xmm5","xmm5","xmm5","xmm0","xmm2","xmm4","xmm6") 00481 "movq (%2,%%"REG_d"), %%xmm1 \n\t" 00482 "movq 8(%2,%%"REG_d"), %%xmm3 \n\t" 00483 "paddw %%xmm7, %%xmm1 \n\t" 00484 "paddw %%xmm7, %%xmm3 \n\t" 00485 "pavgw %%xmm1, %%xmm0 \n\t" 00486 "pavgw %%xmm3, %%xmm2 \n\t" 00487 "movq 16(%2,%%"REG_d"), %%xmm1 \n\t" 00488 "movq 24(%2,%%"REG_d"), %%xmm3 \n\t" 00489 "paddw %%xmm7, %%xmm1 \n\t" 00490 "paddw %%xmm7, %%xmm3 \n\t" 00491 "pavgw %%xmm1, %%xmm4 \n\t" 00492 "pavgw %%xmm3, %%xmm6 \n\t" 00493 snow_vertical_compose_r2r_sub("xmm7","xmm7","xmm7","xmm7","xmm0","xmm2","xmm4","xmm6") 00494 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6") 00495 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6") 00496 00497 snow_vertical_compose_sra("2","xmm0","xmm2","xmm4","xmm6") 00498 snow_vertical_compose_sse2_add("%3","xmm0","xmm2","xmm4","xmm6") 00499 snow_vertical_compose_sse2_store("%3","xmm0","xmm2","xmm4","xmm6") 00500 snow_vertical_compose_sse2_add("%1","xmm0","xmm2","xmm4","xmm6") 00501 snow_vertical_compose_sse2_move("xmm0","xmm2","xmm4","xmm6","xmm1","xmm3","xmm5","xmm7") 00502 snow_vertical_compose_sra("1","xmm0","xmm2","xmm4","xmm6") 00503 snow_vertical_compose_r2r_add("xmm1","xmm3","xmm5","xmm7","xmm0","xmm2","xmm4","xmm6") 00504 snow_vertical_compose_sse2_add("%2","xmm0","xmm2","xmm4","xmm6") 00505 snow_vertical_compose_sse2_store("%2","xmm0","xmm2","xmm4","xmm6") 00506 00507 "2: \n\t" 00508 "sub $64, %%"REG_d" \n\t" 00509 "jge 1b \n\t" 00510 :"+d"(i) 00511 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5)); 00512 } 00513 00514 #define snow_vertical_compose_mmx_load_add(op,r,t0,t1,t2,t3)\ 00515 ""op" ("r",%%"REG_d"), %%"t0" \n\t"\ 00516 ""op" 8("r",%%"REG_d"), %%"t1" \n\t"\ 00517 ""op" 16("r",%%"REG_d"), %%"t2" \n\t"\ 00518 ""op" 24("r",%%"REG_d"), %%"t3" \n\t" 00519 00520 #define snow_vertical_compose_mmx_load(r,t0,t1,t2,t3)\ 00521 snow_vertical_compose_mmx_load_add("movq",r,t0,t1,t2,t3) 00522 00523 #define snow_vertical_compose_mmx_add(r,t0,t1,t2,t3)\ 00524 snow_vertical_compose_mmx_load_add("paddw",r,t0,t1,t2,t3) 00525 00526 #define snow_vertical_compose_mmx_store(w,s0,s1,s2,s3)\ 00527 "movq %%"s0", ("w",%%"REG_d") \n\t"\ 00528 "movq %%"s1", 8("w",%%"REG_d") \n\t"\ 00529 "movq %%"s2", 16("w",%%"REG_d") \n\t"\ 00530 "movq %%"s3", 24("w",%%"REG_d") \n\t" 00531 00532 #define snow_vertical_compose_mmx_move(s0,s1,s2,s3,t0,t1,t2,t3)\ 00533 "movq %%"s0", %%"t0" \n\t"\ 00534 "movq %%"s1", %%"t1" \n\t"\ 00535 "movq %%"s2", %%"t2" \n\t"\ 00536 "movq %%"s3", %%"t3" \n\t" 00537 00538 00539 static void ff_snow_vertical_compose97i_mmx(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width){ 00540 x86_reg i = width; 00541 while(i & 15) 00542 { 00543 i--; 00544 b4[i] -= (W_DM*(b3[i] + b5[i])+W_DO)>>W_DS; 00545 b3[i] -= (W_CM*(b2[i] + b4[i])+W_CO)>>W_CS; 00546 b2[i] += (W_BM*(b1[i] + b3[i])+4*b2[i]+W_BO)>>W_BS; 00547 b1[i] += (W_AM*(b0[i] + b2[i])+W_AO)>>W_AS; 00548 } 00549 i+=i; 00550 __asm__ volatile( 00551 "jmp 2f \n\t" 00552 "1: \n\t" 00553 00554 snow_vertical_compose_mmx_load("%4","mm1","mm3","mm5","mm7") 00555 snow_vertical_compose_mmx_add("%6","mm1","mm3","mm5","mm7") 00556 "pcmpeqw %%mm0, %%mm0 \n\t" 00557 "pcmpeqw %%mm2, %%mm2 \n\t" 00558 "paddw %%mm2, %%mm2 \n\t" 00559 "paddw %%mm0, %%mm2 \n\t" 00560 "psllw $13, %%mm2 \n\t" 00561 snow_vertical_compose_r2r_add("mm0","mm0","mm0","mm0","mm1","mm3","mm5","mm7") 00562 snow_vertical_compose_r2r_pmulhw("mm2","mm2","mm2","mm2","mm1","mm3","mm5","mm7") 00563 snow_vertical_compose_mmx_add("%5","mm1","mm3","mm5","mm7") 00564 snow_vertical_compose_mmx_store("%5","mm1","mm3","mm5","mm7") 00565 snow_vertical_compose_mmx_load("%4","mm0","mm2","mm4","mm6") 00566 snow_vertical_compose_mmx_add("%3","mm1","mm3","mm5","mm7") 00567 snow_vertical_compose_r2r_sub("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6") 00568 snow_vertical_compose_mmx_store("%4","mm0","mm2","mm4","mm6") 00569 "pcmpeqw %%mm7, %%mm7 \n\t" 00570 "pcmpeqw %%mm5, %%mm5 \n\t" 00571 "psllw $15, %%mm7 \n\t" 00572 "psrlw $13, %%mm5 \n\t" 00573 "paddw %%mm7, %%mm5 \n\t" 00574 snow_vertical_compose_r2r_add("mm5","mm5","mm5","mm5","mm0","mm2","mm4","mm6") 00575 "movq (%2,%%"REG_d"), %%mm1 \n\t" 00576 "movq 8(%2,%%"REG_d"), %%mm3 \n\t" 00577 "paddw %%mm7, %%mm1 \n\t" 00578 "paddw %%mm7, %%mm3 \n\t" 00579 "pavgw %%mm1, %%mm0 \n\t" 00580 "pavgw %%mm3, %%mm2 \n\t" 00581 "movq 16(%2,%%"REG_d"), %%mm1 \n\t" 00582 "movq 24(%2,%%"REG_d"), %%mm3 \n\t" 00583 "paddw %%mm7, %%mm1 \n\t" 00584 "paddw %%mm7, %%mm3 \n\t" 00585 "pavgw %%mm1, %%mm4 \n\t" 00586 "pavgw %%mm3, %%mm6 \n\t" 00587 snow_vertical_compose_r2r_sub("mm7","mm7","mm7","mm7","mm0","mm2","mm4","mm6") 00588 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6") 00589 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6") 00590 00591 snow_vertical_compose_sra("2","mm0","mm2","mm4","mm6") 00592 snow_vertical_compose_mmx_add("%3","mm0","mm2","mm4","mm6") 00593 snow_vertical_compose_mmx_store("%3","mm0","mm2","mm4","mm6") 00594 snow_vertical_compose_mmx_add("%1","mm0","mm2","mm4","mm6") 00595 snow_vertical_compose_mmx_move("mm0","mm2","mm4","mm6","mm1","mm3","mm5","mm7") 00596 snow_vertical_compose_sra("1","mm0","mm2","mm4","mm6") 00597 snow_vertical_compose_r2r_add("mm1","mm3","mm5","mm7","mm0","mm2","mm4","mm6") 00598 snow_vertical_compose_mmx_add("%2","mm0","mm2","mm4","mm6") 00599 snow_vertical_compose_mmx_store("%2","mm0","mm2","mm4","mm6") 00600 00601 "2: \n\t" 00602 "sub $32, %%"REG_d" \n\t" 00603 "jge 1b \n\t" 00604 :"+d"(i) 00605 :"r"(b0),"r"(b1),"r"(b2),"r"(b3),"r"(b4),"r"(b5)); 00606 } 00607 #endif //HAVE_7REGS 00608 00609 #define snow_inner_add_yblock_sse2_header \ 00610 IDWTELEM * * dst_array = sb->line + src_y;\ 00611 x86_reg tmp;\ 00612 __asm__ volatile(\ 00613 "mov %7, %%"REG_c" \n\t"\ 00614 "mov %6, %2 \n\t"\ 00615 "mov %4, %%"REG_S" \n\t"\ 00616 "pxor %%xmm7, %%xmm7 \n\t" /* 0 */\ 00617 "pcmpeqd %%xmm3, %%xmm3 \n\t"\ 00618 "psllw $15, %%xmm3 \n\t"\ 00619 "psrlw $12, %%xmm3 \n\t" /* FRAC_BITS >> 1 */\ 00620 "1: \n\t"\ 00621 "mov %1, %%"REG_D" \n\t"\ 00622 "mov (%%"REG_D"), %%"REG_D" \n\t"\ 00623 "add %3, %%"REG_D" \n\t" 00624 00625 #define snow_inner_add_yblock_sse2_start_8(out_reg1, out_reg2, ptr_offset, s_offset)\ 00626 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ 00627 "movq (%%"REG_d"), %%"out_reg1" \n\t"\ 00628 "movq (%%"REG_d", %%"REG_c"), %%"out_reg2" \n\t"\ 00629 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\ 00630 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\ 00631 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\ 00632 "movq "s_offset"+16(%%"REG_S"), %%xmm4 \n\t"\ 00633 "punpcklbw %%xmm7, %%xmm0 \n\t"\ 00634 "punpcklbw %%xmm7, %%xmm4 \n\t"\ 00635 "pmullw %%xmm0, %%"out_reg1" \n\t"\ 00636 "pmullw %%xmm4, %%"out_reg2" \n\t" 00637 00638 #define snow_inner_add_yblock_sse2_start_16(out_reg1, out_reg2, ptr_offset, s_offset)\ 00639 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ 00640 "movq (%%"REG_d"), %%"out_reg1" \n\t"\ 00641 "movq 8(%%"REG_d"), %%"out_reg2" \n\t"\ 00642 "punpcklbw %%xmm7, %%"out_reg1" \n\t"\ 00643 "punpcklbw %%xmm7, %%"out_reg2" \n\t"\ 00644 "movq "s_offset"(%%"REG_S"), %%xmm0 \n\t"\ 00645 "movq "s_offset"+8(%%"REG_S"), %%xmm4 \n\t"\ 00646 "punpcklbw %%xmm7, %%xmm0 \n\t"\ 00647 "punpcklbw %%xmm7, %%xmm4 \n\t"\ 00648 "pmullw %%xmm0, %%"out_reg1" \n\t"\ 00649 "pmullw %%xmm4, %%"out_reg2" \n\t" 00650 00651 #define snow_inner_add_yblock_sse2_accum_8(ptr_offset, s_offset) \ 00652 snow_inner_add_yblock_sse2_start_8("xmm2", "xmm6", ptr_offset, s_offset)\ 00653 "paddusw %%xmm2, %%xmm1 \n\t"\ 00654 "paddusw %%xmm6, %%xmm5 \n\t" 00655 00656 #define snow_inner_add_yblock_sse2_accum_16(ptr_offset, s_offset) \ 00657 snow_inner_add_yblock_sse2_start_16("xmm2", "xmm6", ptr_offset, s_offset)\ 00658 "paddusw %%xmm2, %%xmm1 \n\t"\ 00659 "paddusw %%xmm6, %%xmm5 \n\t" 00660 00661 #define snow_inner_add_yblock_sse2_end_common1\ 00662 "add $32, %%"REG_S" \n\t"\ 00663 "add %%"REG_c", %0 \n\t"\ 00664 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\ 00665 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\ 00666 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\ 00667 "add %%"REG_c", (%%"REG_a") \n\t" 00668 00669 #define snow_inner_add_yblock_sse2_end_common2\ 00670 "jnz 1b \n\t"\ 00671 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ 00672 :\ 00673 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ 00674 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); 00675 00676 #define snow_inner_add_yblock_sse2_end_8\ 00677 "sal $1, %%"REG_c" \n\t"\ 00678 "add $"PTR_SIZE"*2, %1 \n\t"\ 00679 snow_inner_add_yblock_sse2_end_common1\ 00680 "sar $1, %%"REG_c" \n\t"\ 00681 "sub $2, %2 \n\t"\ 00682 snow_inner_add_yblock_sse2_end_common2 00683 00684 #define snow_inner_add_yblock_sse2_end_16\ 00685 "add $"PTR_SIZE"*1, %1 \n\t"\ 00686 snow_inner_add_yblock_sse2_end_common1\ 00687 "dec %2 \n\t"\ 00688 snow_inner_add_yblock_sse2_end_common2 00689 00690 static void inner_add_yblock_bw_8_obmc_16_bh_even_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, 00691 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00692 snow_inner_add_yblock_sse2_header 00693 snow_inner_add_yblock_sse2_start_8("xmm1", "xmm5", "3", "0") 00694 snow_inner_add_yblock_sse2_accum_8("2", "8") 00695 snow_inner_add_yblock_sse2_accum_8("1", "128") 00696 snow_inner_add_yblock_sse2_accum_8("0", "136") 00697 00698 "mov %0, %%"REG_d" \n\t" 00699 "movdqa (%%"REG_D"), %%xmm0 \n\t" 00700 "movdqa %%xmm1, %%xmm2 \n\t" 00701 00702 "punpckhwd %%xmm7, %%xmm1 \n\t" 00703 "punpcklwd %%xmm7, %%xmm2 \n\t" 00704 "paddd %%xmm2, %%xmm0 \n\t" 00705 "movdqa 16(%%"REG_D"), %%xmm2 \n\t" 00706 "paddd %%xmm1, %%xmm2 \n\t" 00707 "paddd %%xmm3, %%xmm0 \n\t" 00708 "paddd %%xmm3, %%xmm2 \n\t" 00709 00710 "mov %1, %%"REG_D" \n\t" 00711 "mov "PTR_SIZE"(%%"REG_D"), %%"REG_D";\n\t" 00712 "add %3, %%"REG_D" \n\t" 00713 00714 "movdqa (%%"REG_D"), %%xmm4 \n\t" 00715 "movdqa %%xmm5, %%xmm6 \n\t" 00716 "punpckhwd %%xmm7, %%xmm5 \n\t" 00717 "punpcklwd %%xmm7, %%xmm6 \n\t" 00718 "paddd %%xmm6, %%xmm4 \n\t" 00719 "movdqa 16(%%"REG_D"), %%xmm6 \n\t" 00720 "paddd %%xmm5, %%xmm6 \n\t" 00721 "paddd %%xmm3, %%xmm4 \n\t" 00722 "paddd %%xmm3, %%xmm6 \n\t" 00723 00724 "psrad $8, %%xmm0 \n\t" /* FRAC_BITS. */ 00725 "psrad $8, %%xmm2 \n\t" /* FRAC_BITS. */ 00726 "packssdw %%xmm2, %%xmm0 \n\t" 00727 "packuswb %%xmm7, %%xmm0 \n\t" 00728 "movq %%xmm0, (%%"REG_d") \n\t" 00729 00730 "psrad $8, %%xmm4 \n\t" /* FRAC_BITS. */ 00731 "psrad $8, %%xmm6 \n\t" /* FRAC_BITS. */ 00732 "packssdw %%xmm6, %%xmm4 \n\t" 00733 "packuswb %%xmm7, %%xmm4 \n\t" 00734 "movq %%xmm4, (%%"REG_d",%%"REG_c");\n\t" 00735 snow_inner_add_yblock_sse2_end_8 00736 } 00737 00738 static void inner_add_yblock_bw_16_obmc_32_sse2(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, 00739 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00740 snow_inner_add_yblock_sse2_header 00741 snow_inner_add_yblock_sse2_start_16("xmm1", "xmm5", "3", "0") 00742 snow_inner_add_yblock_sse2_accum_16("2", "16") 00743 snow_inner_add_yblock_sse2_accum_16("1", "512") 00744 snow_inner_add_yblock_sse2_accum_16("0", "528") 00745 00746 "mov %0, %%"REG_d" \n\t" 00747 "psrlw $4, %%xmm1 \n\t" 00748 "psrlw $4, %%xmm5 \n\t" 00749 "paddw (%%"REG_D"), %%xmm1 \n\t" 00750 "paddw 16(%%"REG_D"), %%xmm5 \n\t" 00751 "paddw %%xmm3, %%xmm1 \n\t" 00752 "paddw %%xmm3, %%xmm5 \n\t" 00753 "psraw $4, %%xmm1 \n\t" /* FRAC_BITS. */ 00754 "psraw $4, %%xmm5 \n\t" /* FRAC_BITS. */ 00755 "packuswb %%xmm5, %%xmm1 \n\t" 00756 00757 "movdqu %%xmm1, (%%"REG_d") \n\t" 00758 00759 snow_inner_add_yblock_sse2_end_16 00760 } 00761 00762 #define snow_inner_add_yblock_mmx_header \ 00763 IDWTELEM * * dst_array = sb->line + src_y;\ 00764 x86_reg tmp;\ 00765 __asm__ volatile(\ 00766 "mov %7, %%"REG_c" \n\t"\ 00767 "mov %6, %2 \n\t"\ 00768 "mov %4, %%"REG_S" \n\t"\ 00769 "pxor %%mm7, %%mm7 \n\t" /* 0 */\ 00770 "pcmpeqd %%mm3, %%mm3 \n\t"\ 00771 "psllw $15, %%mm3 \n\t"\ 00772 "psrlw $12, %%mm3 \n\t" /* FRAC_BITS >> 1 */\ 00773 "1: \n\t"\ 00774 "mov %1, %%"REG_D" \n\t"\ 00775 "mov (%%"REG_D"), %%"REG_D" \n\t"\ 00776 "add %3, %%"REG_D" \n\t" 00777 00778 #define snow_inner_add_yblock_mmx_start(out_reg1, out_reg2, ptr_offset, s_offset, d_offset)\ 00779 "mov "PTR_SIZE"*"ptr_offset"(%%"REG_a"), %%"REG_d"; \n\t"\ 00780 "movd "d_offset"(%%"REG_d"), %%"out_reg1" \n\t"\ 00781 "movd "d_offset"+4(%%"REG_d"), %%"out_reg2" \n\t"\ 00782 "punpcklbw %%mm7, %%"out_reg1" \n\t"\ 00783 "punpcklbw %%mm7, %%"out_reg2" \n\t"\ 00784 "movd "s_offset"(%%"REG_S"), %%mm0 \n\t"\ 00785 "movd "s_offset"+4(%%"REG_S"), %%mm4 \n\t"\ 00786 "punpcklbw %%mm7, %%mm0 \n\t"\ 00787 "punpcklbw %%mm7, %%mm4 \n\t"\ 00788 "pmullw %%mm0, %%"out_reg1" \n\t"\ 00789 "pmullw %%mm4, %%"out_reg2" \n\t" 00790 00791 #define snow_inner_add_yblock_mmx_accum(ptr_offset, s_offset, d_offset) \ 00792 snow_inner_add_yblock_mmx_start("mm2", "mm6", ptr_offset, s_offset, d_offset)\ 00793 "paddusw %%mm2, %%mm1 \n\t"\ 00794 "paddusw %%mm6, %%mm5 \n\t" 00795 00796 #define snow_inner_add_yblock_mmx_mix(read_offset, write_offset)\ 00797 "mov %0, %%"REG_d" \n\t"\ 00798 "psrlw $4, %%mm1 \n\t"\ 00799 "psrlw $4, %%mm5 \n\t"\ 00800 "paddw "read_offset"(%%"REG_D"), %%mm1 \n\t"\ 00801 "paddw "read_offset"+8(%%"REG_D"), %%mm5 \n\t"\ 00802 "paddw %%mm3, %%mm1 \n\t"\ 00803 "paddw %%mm3, %%mm5 \n\t"\ 00804 "psraw $4, %%mm1 \n\t"\ 00805 "psraw $4, %%mm5 \n\t"\ 00806 "packuswb %%mm5, %%mm1 \n\t"\ 00807 "movq %%mm1, "write_offset"(%%"REG_d") \n\t" 00808 00809 #define snow_inner_add_yblock_mmx_end(s_step)\ 00810 "add $"s_step", %%"REG_S" \n\t"\ 00811 "add %%"REG_c", "PTR_SIZE"*3(%%"REG_a");\n\t"\ 00812 "add %%"REG_c", "PTR_SIZE"*2(%%"REG_a");\n\t"\ 00813 "add %%"REG_c", "PTR_SIZE"*1(%%"REG_a");\n\t"\ 00814 "add %%"REG_c", (%%"REG_a") \n\t"\ 00815 "add"OPSIZE " $"PTR_SIZE"*1, %1 \n\t"\ 00816 "add %%"REG_c", %0 \n\t"\ 00817 "dec %2 \n\t"\ 00818 "jnz 1b \n\t"\ 00819 :"+m"(dst8),"+m"(dst_array),"=&r"(tmp)\ 00820 :\ 00821 "rm"((x86_reg)(src_x<<1)),"m"(obmc),"a"(block),"m"(b_h),"m"(src_stride):\ 00822 "%"REG_c"","%"REG_S"","%"REG_D"","%"REG_d""); 00823 00824 static void inner_add_yblock_bw_8_obmc_16_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, 00825 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00826 snow_inner_add_yblock_mmx_header 00827 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0") 00828 snow_inner_add_yblock_mmx_accum("2", "8", "0") 00829 snow_inner_add_yblock_mmx_accum("1", "128", "0") 00830 snow_inner_add_yblock_mmx_accum("0", "136", "0") 00831 snow_inner_add_yblock_mmx_mix("0", "0") 00832 snow_inner_add_yblock_mmx_end("16") 00833 } 00834 00835 static void inner_add_yblock_bw_16_obmc_32_mmx(const uint8_t *obmc, const x86_reg obmc_stride, uint8_t * * block, int b_w, x86_reg b_h, 00836 int src_x, int src_y, x86_reg src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00837 snow_inner_add_yblock_mmx_header 00838 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "0", "0") 00839 snow_inner_add_yblock_mmx_accum("2", "16", "0") 00840 snow_inner_add_yblock_mmx_accum("1", "512", "0") 00841 snow_inner_add_yblock_mmx_accum("0", "528", "0") 00842 snow_inner_add_yblock_mmx_mix("0", "0") 00843 00844 snow_inner_add_yblock_mmx_start("mm1", "mm5", "3", "8", "8") 00845 snow_inner_add_yblock_mmx_accum("2", "24", "8") 00846 snow_inner_add_yblock_mmx_accum("1", "520", "8") 00847 snow_inner_add_yblock_mmx_accum("0", "536", "8") 00848 snow_inner_add_yblock_mmx_mix("16", "8") 00849 snow_inner_add_yblock_mmx_end("32") 00850 } 00851 00852 static void ff_snow_inner_add_yblock_sse2(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, 00853 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00854 00855 if (b_w == 16) 00856 inner_add_yblock_bw_16_obmc_32_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00857 else if (b_w == 8 && obmc_stride == 16) { 00858 if (!(b_h & 1)) 00859 inner_add_yblock_bw_8_obmc_16_bh_even_sse2(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00860 else 00861 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00862 } else 00863 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00864 } 00865 00866 static void ff_snow_inner_add_yblock_mmx(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, 00867 int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8){ 00868 if (b_w == 16) 00869 inner_add_yblock_bw_16_obmc_32_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00870 else if (b_w == 8 && obmc_stride == 16) 00871 inner_add_yblock_bw_8_obmc_16_mmx(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00872 else 00873 ff_snow_inner_add_yblock(obmc, obmc_stride, block, b_w, b_h, src_x,src_y, src_stride, sb, add, dst8); 00874 } 00875 00876 void ff_dwt_init_x86(DWTContext *c) 00877 { 00878 int mm_flags = av_get_cpu_flags(); 00879 00880 if (mm_flags & AV_CPU_FLAG_MMX) { 00881 if(mm_flags & AV_CPU_FLAG_SSE2 & 0){ 00882 c->horizontal_compose97i = ff_snow_horizontal_compose97i_sse2; 00883 #if HAVE_7REGS 00884 c->vertical_compose97i = ff_snow_vertical_compose97i_sse2; 00885 #endif 00886 c->inner_add_yblock = ff_snow_inner_add_yblock_sse2; 00887 } 00888 else{ 00889 if(mm_flags & AV_CPU_FLAG_MMX2){ 00890 c->horizontal_compose97i = ff_snow_horizontal_compose97i_mmx; 00891 #if HAVE_7REGS 00892 c->vertical_compose97i = ff_snow_vertical_compose97i_mmx; 00893 #endif 00894 } 00895 c->inner_add_yblock = ff_snow_inner_add_yblock_mmx; 00896 } 00897 } 00898 }