• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • File List
  • Globals

libavcodec/x86/dsputil_mmx.c

Go to the documentation of this file.
00001 /*
00002  * MMX optimized DSP utils
00003  * Copyright (c) 2000, 2001 Fabrice Bellard
00004  * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or
00009  * modify it under the terms of the GNU Lesser General Public
00010  * License as published by the Free Software Foundation; either
00011  * version 2.1 of the License, or (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00016  * Lesser General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU Lesser General Public
00019  * License along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  *
00022  * MMX optimization by Nick Kurshev <nickols_k@mail.ru>
00023  */
00024 
00025 #include "libavutil/x86_cpu.h"
00026 #include "libavcodec/dsputil.h"
00027 #include "libavcodec/h264dsp.h"
00028 #include "libavcodec/mpegvideo.h"
00029 #include "libavcodec/simple_idct.h"
00030 #include "dsputil_mmx.h"
00031 #include "vp3dsp_mmx.h"
00032 #include "vp3dsp_sse2.h"
00033 #include "vp6dsp_mmx.h"
00034 #include "vp6dsp_sse2.h"
00035 #include "idct_xvid.h"
00036 
00037 //#undef NDEBUG
00038 //#include <assert.h>
00039 
00040 int mm_flags; /* multimedia extension flags */
00041 
00042 /* pixel operations */
00043 DECLARE_ALIGNED(8,  const uint64_t, ff_bone) = 0x0101010101010101ULL;
00044 DECLARE_ALIGNED(8,  const uint64_t, ff_wtwo) = 0x0002000200020002ULL;
00045 
00046 DECLARE_ALIGNED(16, const uint64_t, ff_pdw_80000000)[2] =
00047 {0x8000000080000000ULL, 0x8000000080000000ULL};
00048 
00049 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_3  ) = 0x0003000300030003ULL;
00050 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_4  ) = 0x0004000400040004ULL;
00051 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_5  ) = {0x0005000500050005ULL, 0x0005000500050005ULL};
00052 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_8  ) = {0x0008000800080008ULL, 0x0008000800080008ULL};
00053 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_15 ) = 0x000F000F000F000FULL;
00054 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_16 ) = {0x0010001000100010ULL, 0x0010001000100010ULL};
00055 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_20 ) = 0x0014001400140014ULL;
00056 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_28 ) = {0x001C001C001C001CULL, 0x001C001C001C001CULL};
00057 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_32 ) = {0x0020002000200020ULL, 0x0020002000200020ULL};
00058 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_42 ) = 0x002A002A002A002AULL;
00059 DECLARE_ALIGNED(16, const xmm_reg,  ff_pw_64 ) = {0x0040004000400040ULL, 0x0040004000400040ULL};
00060 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_96 ) = 0x0060006000600060ULL;
00061 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_128) = 0x0080008000800080ULL;
00062 DECLARE_ALIGNED(8,  const uint64_t, ff_pw_255) = 0x00ff00ff00ff00ffULL;
00063 
00064 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1  ) = 0x0101010101010101ULL;
00065 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3  ) = 0x0303030303030303ULL;
00066 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_7  ) = 0x0707070707070707ULL;
00067 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_1F ) = 0x1F1F1F1F1F1F1F1FULL;
00068 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_3F ) = 0x3F3F3F3F3F3F3F3FULL;
00069 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_81 ) = 0x8181818181818181ULL;
00070 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_A1 ) = 0xA1A1A1A1A1A1A1A1ULL;
00071 DECLARE_ALIGNED(8,  const uint64_t, ff_pb_FC ) = 0xFCFCFCFCFCFCFCFCULL;
00072 
00073 DECLARE_ALIGNED(16, const double, ff_pd_1)[2] = { 1.0, 1.0 };
00074 DECLARE_ALIGNED(16, const double, ff_pd_2)[2] = { 2.0, 2.0 };
00075 
00076 #define JUMPALIGN() __asm__ volatile (ASMALIGN(3)::)
00077 #define MOVQ_ZERO(regd)  __asm__ volatile ("pxor %%" #regd ", %%" #regd ::)
00078 
00079 #define MOVQ_BFE(regd) \
00080     __asm__ volatile ( \
00081     "pcmpeqd %%" #regd ", %%" #regd " \n\t"\
00082     "paddb %%" #regd ", %%" #regd " \n\t" ::)
00083 
00084 #ifndef PIC
00085 #define MOVQ_BONE(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_bone))
00086 #define MOVQ_WTWO(regd)  __asm__ volatile ("movq %0, %%" #regd " \n\t" ::"m"(ff_wtwo))
00087 #else
00088 // for shared library it's better to use this way for accessing constants
00089 // pcmpeqd -> -1
00090 #define MOVQ_BONE(regd) \
00091     __asm__ volatile ( \
00092     "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00093     "psrlw $15, %%" #regd " \n\t" \
00094     "packuswb %%" #regd ", %%" #regd " \n\t" ::)
00095 
00096 #define MOVQ_WTWO(regd) \
00097     __asm__ volatile ( \
00098     "pcmpeqd %%" #regd ", %%" #regd " \n\t" \
00099     "psrlw $15, %%" #regd " \n\t" \
00100     "psllw $1, %%" #regd " \n\t"::)
00101 
00102 #endif
00103 
00104 // using regr as temporary and for the output result
00105 // first argument is unmodifed and second is trashed
00106 // regfe is supposed to contain 0xfefefefefefefefe
00107 #define PAVGB_MMX_NO_RND(rega, regb, regr, regfe) \
00108     "movq " #rega ", " #regr "  \n\t"\
00109     "pand " #regb ", " #regr "  \n\t"\
00110     "pxor " #rega ", " #regb "  \n\t"\
00111     "pand " #regfe "," #regb "  \n\t"\
00112     "psrlq $1, " #regb "        \n\t"\
00113     "paddb " #regb ", " #regr " \n\t"
00114 
00115 #define PAVGB_MMX(rega, regb, regr, regfe) \
00116     "movq " #rega ", " #regr "  \n\t"\
00117     "por  " #regb ", " #regr "  \n\t"\
00118     "pxor " #rega ", " #regb "  \n\t"\
00119     "pand " #regfe "," #regb "  \n\t"\
00120     "psrlq $1, " #regb "        \n\t"\
00121     "psubb " #regb ", " #regr " \n\t"
00122 
00123 // mm6 is supposed to contain 0xfefefefefefefefe
00124 #define PAVGBP_MMX_NO_RND(rega, regb, regr,  regc, regd, regp) \
00125     "movq " #rega ", " #regr "  \n\t"\
00126     "movq " #regc ", " #regp "  \n\t"\
00127     "pand " #regb ", " #regr "  \n\t"\
00128     "pand " #regd ", " #regp "  \n\t"\
00129     "pxor " #rega ", " #regb "  \n\t"\
00130     "pxor " #regc ", " #regd "  \n\t"\
00131     "pand %%mm6, " #regb "      \n\t"\
00132     "pand %%mm6, " #regd "      \n\t"\
00133     "psrlq $1, " #regb "        \n\t"\
00134     "psrlq $1, " #regd "        \n\t"\
00135     "paddb " #regb ", " #regr " \n\t"\
00136     "paddb " #regd ", " #regp " \n\t"
00137 
00138 #define PAVGBP_MMX(rega, regb, regr, regc, regd, regp) \
00139     "movq " #rega ", " #regr "  \n\t"\
00140     "movq " #regc ", " #regp "  \n\t"\
00141     "por  " #regb ", " #regr "  \n\t"\
00142     "por  " #regd ", " #regp "  \n\t"\
00143     "pxor " #rega ", " #regb "  \n\t"\
00144     "pxor " #regc ", " #regd "  \n\t"\
00145     "pand %%mm6, " #regb "      \n\t"\
00146     "pand %%mm6, " #regd "      \n\t"\
00147     "psrlq $1, " #regd "        \n\t"\
00148     "psrlq $1, " #regb "        \n\t"\
00149     "psubb " #regb ", " #regr " \n\t"\
00150     "psubb " #regd ", " #regp " \n\t"
00151 
00152 /***********************************/
00153 /* MMX no rounding */
00154 #define DEF(x, y) x ## _no_rnd_ ## y ##_mmx
00155 #define SET_RND  MOVQ_WONE
00156 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX_NO_RND(a, b, c, d, e, f)
00157 #define PAVGB(a, b, c, e)               PAVGB_MMX_NO_RND(a, b, c, e)
00158 #define OP_AVG(a, b, c, e)              PAVGB_MMX(a, b, c, e)
00159 
00160 #include "dsputil_mmx_rnd_template.c"
00161 
00162 #undef DEF
00163 #undef SET_RND
00164 #undef PAVGBP
00165 #undef PAVGB
00166 /***********************************/
00167 /* MMX rounding */
00168 
00169 #define DEF(x, y) x ## _ ## y ##_mmx
00170 #define SET_RND  MOVQ_WTWO
00171 #define PAVGBP(a, b, c, d, e, f)        PAVGBP_MMX(a, b, c, d, e, f)
00172 #define PAVGB(a, b, c, e)               PAVGB_MMX(a, b, c, e)
00173 
00174 #include "dsputil_mmx_rnd_template.c"
00175 
00176 #undef DEF
00177 #undef SET_RND
00178 #undef PAVGBP
00179 #undef PAVGB
00180 #undef OP_AVG
00181 
00182 /***********************************/
00183 /* 3Dnow specific */
00184 
00185 #define DEF(x) x ## _3dnow
00186 #define PAVGB "pavgusb"
00187 #define OP_AVG PAVGB
00188 
00189 #include "dsputil_mmx_avg_template.c"
00190 
00191 #undef DEF
00192 #undef PAVGB
00193 #undef OP_AVG
00194 
00195 /***********************************/
00196 /* MMX2 specific */
00197 
00198 #define DEF(x) x ## _mmx2
00199 
00200 /* Introduced only in MMX2 set */
00201 #define PAVGB "pavgb"
00202 #define OP_AVG PAVGB
00203 
00204 #include "dsputil_mmx_avg_template.c"
00205 
00206 #undef DEF
00207 #undef PAVGB
00208 #undef OP_AVG
00209 
00210 #define put_no_rnd_pixels16_mmx put_pixels16_mmx
00211 #define put_no_rnd_pixels8_mmx put_pixels8_mmx
00212 #define put_pixels16_mmx2 put_pixels16_mmx
00213 #define put_pixels8_mmx2 put_pixels8_mmx
00214 #define put_pixels4_mmx2 put_pixels4_mmx
00215 #define put_no_rnd_pixels16_mmx2 put_no_rnd_pixels16_mmx
00216 #define put_no_rnd_pixels8_mmx2 put_no_rnd_pixels8_mmx
00217 #define put_pixels16_3dnow put_pixels16_mmx
00218 #define put_pixels8_3dnow put_pixels8_mmx
00219 #define put_pixels4_3dnow put_pixels4_mmx
00220 #define put_no_rnd_pixels16_3dnow put_no_rnd_pixels16_mmx
00221 #define put_no_rnd_pixels8_3dnow put_no_rnd_pixels8_mmx
00222 
00223 /***********************************/
00224 /* standard MMX */
00225 
00226 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00227 {
00228     const DCTELEM *p;
00229     uint8_t *pix;
00230 
00231     /* read the pixels */
00232     p = block;
00233     pix = pixels;
00234     /* unrolled loop */
00235         __asm__ volatile(
00236                 "movq   %3, %%mm0               \n\t"
00237                 "movq   8%3, %%mm1              \n\t"
00238                 "movq   16%3, %%mm2             \n\t"
00239                 "movq   24%3, %%mm3             \n\t"
00240                 "movq   32%3, %%mm4             \n\t"
00241                 "movq   40%3, %%mm5             \n\t"
00242                 "movq   48%3, %%mm6             \n\t"
00243                 "movq   56%3, %%mm7             \n\t"
00244                 "packuswb %%mm1, %%mm0          \n\t"
00245                 "packuswb %%mm3, %%mm2          \n\t"
00246                 "packuswb %%mm5, %%mm4          \n\t"
00247                 "packuswb %%mm7, %%mm6          \n\t"
00248                 "movq   %%mm0, (%0)             \n\t"
00249                 "movq   %%mm2, (%0, %1)         \n\t"
00250                 "movq   %%mm4, (%0, %1, 2)      \n\t"
00251                 "movq   %%mm6, (%0, %2)         \n\t"
00252                 ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "m"(*p)
00253                 :"memory");
00254         pix += line_size*4;
00255         p += 32;
00256 
00257     // if here would be an exact copy of the code above
00258     // compiler would generate some very strange code
00259     // thus using "r"
00260     __asm__ volatile(
00261             "movq       (%3), %%mm0             \n\t"
00262             "movq       8(%3), %%mm1            \n\t"
00263             "movq       16(%3), %%mm2           \n\t"
00264             "movq       24(%3), %%mm3           \n\t"
00265             "movq       32(%3), %%mm4           \n\t"
00266             "movq       40(%3), %%mm5           \n\t"
00267             "movq       48(%3), %%mm6           \n\t"
00268             "movq       56(%3), %%mm7           \n\t"
00269             "packuswb %%mm1, %%mm0              \n\t"
00270             "packuswb %%mm3, %%mm2              \n\t"
00271             "packuswb %%mm5, %%mm4              \n\t"
00272             "packuswb %%mm7, %%mm6              \n\t"
00273             "movq       %%mm0, (%0)             \n\t"
00274             "movq       %%mm2, (%0, %1)         \n\t"
00275             "movq       %%mm4, (%0, %1, 2)      \n\t"
00276             "movq       %%mm6, (%0, %2)         \n\t"
00277             ::"r" (pix), "r" ((x86_reg)line_size), "r" ((x86_reg)line_size*3), "r"(p)
00278             :"memory");
00279 }
00280 
00281 DECLARE_ASM_CONST(8, uint8_t, ff_vector128)[8] =
00282   { 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 };
00283 
00284 #define put_signed_pixels_clamped_mmx_half(off) \
00285             "movq    "#off"(%2), %%mm1          \n\t"\
00286             "movq 16+"#off"(%2), %%mm2          \n\t"\
00287             "movq 32+"#off"(%2), %%mm3          \n\t"\
00288             "movq 48+"#off"(%2), %%mm4          \n\t"\
00289             "packsswb  8+"#off"(%2), %%mm1      \n\t"\
00290             "packsswb 24+"#off"(%2), %%mm2      \n\t"\
00291             "packsswb 40+"#off"(%2), %%mm3      \n\t"\
00292             "packsswb 56+"#off"(%2), %%mm4      \n\t"\
00293             "paddb %%mm0, %%mm1                 \n\t"\
00294             "paddb %%mm0, %%mm2                 \n\t"\
00295             "paddb %%mm0, %%mm3                 \n\t"\
00296             "paddb %%mm0, %%mm4                 \n\t"\
00297             "movq %%mm1, (%0)                   \n\t"\
00298             "movq %%mm2, (%0, %3)               \n\t"\
00299             "movq %%mm3, (%0, %3, 2)            \n\t"\
00300             "movq %%mm4, (%0, %1)               \n\t"
00301 
00302 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00303 {
00304     x86_reg line_skip = line_size;
00305     x86_reg line_skip3;
00306 
00307     __asm__ volatile (
00308             "movq "MANGLE(ff_vector128)", %%mm0 \n\t"
00309             "lea (%3, %3, 2), %1                \n\t"
00310             put_signed_pixels_clamped_mmx_half(0)
00311             "lea (%0, %3, 4), %0                \n\t"
00312             put_signed_pixels_clamped_mmx_half(64)
00313             :"+&r" (pixels), "=&r" (line_skip3)
00314             :"r" (block), "r"(line_skip)
00315             :"memory");
00316 }
00317 
00318 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size)
00319 {
00320     const DCTELEM *p;
00321     uint8_t *pix;
00322     int i;
00323 
00324     /* read the pixels */
00325     p = block;
00326     pix = pixels;
00327     MOVQ_ZERO(mm7);
00328     i = 4;
00329     do {
00330         __asm__ volatile(
00331                 "movq   (%2), %%mm0     \n\t"
00332                 "movq   8(%2), %%mm1    \n\t"
00333                 "movq   16(%2), %%mm2   \n\t"
00334                 "movq   24(%2), %%mm3   \n\t"
00335                 "movq   %0, %%mm4       \n\t"
00336                 "movq   %1, %%mm6       \n\t"
00337                 "movq   %%mm4, %%mm5    \n\t"
00338                 "punpcklbw %%mm7, %%mm4 \n\t"
00339                 "punpckhbw %%mm7, %%mm5 \n\t"
00340                 "paddsw %%mm4, %%mm0    \n\t"
00341                 "paddsw %%mm5, %%mm1    \n\t"
00342                 "movq   %%mm6, %%mm5    \n\t"
00343                 "punpcklbw %%mm7, %%mm6 \n\t"
00344                 "punpckhbw %%mm7, %%mm5 \n\t"
00345                 "paddsw %%mm6, %%mm2    \n\t"
00346                 "paddsw %%mm5, %%mm3    \n\t"
00347                 "packuswb %%mm1, %%mm0  \n\t"
00348                 "packuswb %%mm3, %%mm2  \n\t"
00349                 "movq   %%mm0, %0       \n\t"
00350                 "movq   %%mm2, %1       \n\t"
00351                 :"+m"(*pix), "+m"(*(pix+line_size))
00352                 :"r"(p)
00353                 :"memory");
00354         pix += line_size*2;
00355         p += 16;
00356     } while (--i);
00357 }
00358 
00359 static void put_pixels4_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00360 {
00361     __asm__ volatile(
00362          "lea (%3, %3), %%"REG_a"       \n\t"
00363          ASMALIGN(3)
00364          "1:                            \n\t"
00365          "movd (%1), %%mm0              \n\t"
00366          "movd (%1, %3), %%mm1          \n\t"
00367          "movd %%mm0, (%2)              \n\t"
00368          "movd %%mm1, (%2, %3)          \n\t"
00369          "add %%"REG_a", %1             \n\t"
00370          "add %%"REG_a", %2             \n\t"
00371          "movd (%1), %%mm0              \n\t"
00372          "movd (%1, %3), %%mm1          \n\t"
00373          "movd %%mm0, (%2)              \n\t"
00374          "movd %%mm1, (%2, %3)          \n\t"
00375          "add %%"REG_a", %1             \n\t"
00376          "add %%"REG_a", %2             \n\t"
00377          "subl $4, %0                   \n\t"
00378          "jnz 1b                        \n\t"
00379          : "+g"(h), "+r" (pixels),  "+r" (block)
00380          : "r"((x86_reg)line_size)
00381          : "%"REG_a, "memory"
00382         );
00383 }
00384 
00385 static void put_pixels8_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00386 {
00387     __asm__ volatile(
00388          "lea (%3, %3), %%"REG_a"       \n\t"
00389          ASMALIGN(3)
00390          "1:                            \n\t"
00391          "movq (%1), %%mm0              \n\t"
00392          "movq (%1, %3), %%mm1          \n\t"
00393          "movq %%mm0, (%2)              \n\t"
00394          "movq %%mm1, (%2, %3)          \n\t"
00395          "add %%"REG_a", %1             \n\t"
00396          "add %%"REG_a", %2             \n\t"
00397          "movq (%1), %%mm0              \n\t"
00398          "movq (%1, %3), %%mm1          \n\t"
00399          "movq %%mm0, (%2)              \n\t"
00400          "movq %%mm1, (%2, %3)          \n\t"
00401          "add %%"REG_a", %1             \n\t"
00402          "add %%"REG_a", %2             \n\t"
00403          "subl $4, %0                   \n\t"
00404          "jnz 1b                        \n\t"
00405          : "+g"(h), "+r" (pixels),  "+r" (block)
00406          : "r"((x86_reg)line_size)
00407          : "%"REG_a, "memory"
00408         );
00409 }
00410 
00411 static void put_pixels16_mmx(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00412 {
00413     __asm__ volatile(
00414          "lea (%3, %3), %%"REG_a"       \n\t"
00415          ASMALIGN(3)
00416          "1:                            \n\t"
00417          "movq (%1), %%mm0              \n\t"
00418          "movq 8(%1), %%mm4             \n\t"
00419          "movq (%1, %3), %%mm1          \n\t"
00420          "movq 8(%1, %3), %%mm5         \n\t"
00421          "movq %%mm0, (%2)              \n\t"
00422          "movq %%mm4, 8(%2)             \n\t"
00423          "movq %%mm1, (%2, %3)          \n\t"
00424          "movq %%mm5, 8(%2, %3)         \n\t"
00425          "add %%"REG_a", %1             \n\t"
00426          "add %%"REG_a", %2             \n\t"
00427          "movq (%1), %%mm0              \n\t"
00428          "movq 8(%1), %%mm4             \n\t"
00429          "movq (%1, %3), %%mm1          \n\t"
00430          "movq 8(%1, %3), %%mm5         \n\t"
00431          "movq %%mm0, (%2)              \n\t"
00432          "movq %%mm4, 8(%2)             \n\t"
00433          "movq %%mm1, (%2, %3)          \n\t"
00434          "movq %%mm5, 8(%2, %3)         \n\t"
00435          "add %%"REG_a", %1             \n\t"
00436          "add %%"REG_a", %2             \n\t"
00437          "subl $4, %0                   \n\t"
00438          "jnz 1b                        \n\t"
00439          : "+g"(h), "+r" (pixels),  "+r" (block)
00440          : "r"((x86_reg)line_size)
00441          : "%"REG_a, "memory"
00442         );
00443 }
00444 
00445 static void put_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00446 {
00447     __asm__ volatile(
00448          "1:                            \n\t"
00449          "movdqu (%1), %%xmm0           \n\t"
00450          "movdqu (%1,%3), %%xmm1        \n\t"
00451          "movdqu (%1,%3,2), %%xmm2      \n\t"
00452          "movdqu (%1,%4), %%xmm3        \n\t"
00453          "movdqa %%xmm0, (%2)           \n\t"
00454          "movdqa %%xmm1, (%2,%3)        \n\t"
00455          "movdqa %%xmm2, (%2,%3,2)      \n\t"
00456          "movdqa %%xmm3, (%2,%4)        \n\t"
00457          "subl $4, %0                   \n\t"
00458          "lea (%1,%3,4), %1             \n\t"
00459          "lea (%2,%3,4), %2             \n\t"
00460          "jnz 1b                        \n\t"
00461          : "+g"(h), "+r" (pixels),  "+r" (block)
00462          : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00463          : "memory"
00464         );
00465 }
00466 
00467 static void avg_pixels16_sse2(uint8_t *block, const uint8_t *pixels, int line_size, int h)
00468 {
00469     __asm__ volatile(
00470          "1:                            \n\t"
00471          "movdqu (%1), %%xmm0           \n\t"
00472          "movdqu (%1,%3), %%xmm1        \n\t"
00473          "movdqu (%1,%3,2), %%xmm2      \n\t"
00474          "movdqu (%1,%4), %%xmm3        \n\t"
00475          "pavgb  (%2), %%xmm0           \n\t"
00476          "pavgb  (%2,%3), %%xmm1        \n\t"
00477          "pavgb  (%2,%3,2), %%xmm2      \n\t"
00478          "pavgb  (%2,%4), %%xmm3        \n\t"
00479          "movdqa %%xmm0, (%2)           \n\t"
00480          "movdqa %%xmm1, (%2,%3)        \n\t"
00481          "movdqa %%xmm2, (%2,%3,2)      \n\t"
00482          "movdqa %%xmm3, (%2,%4)        \n\t"
00483          "subl $4, %0                   \n\t"
00484          "lea (%1,%3,4), %1             \n\t"
00485          "lea (%2,%3,4), %2             \n\t"
00486          "jnz 1b                        \n\t"
00487          : "+g"(h), "+r" (pixels),  "+r" (block)
00488          : "r"((x86_reg)line_size), "r"((x86_reg)3L*line_size)
00489          : "memory"
00490         );
00491 }
00492 
00493 #define CLEAR_BLOCKS(name,n) \
00494 static void name(DCTELEM *blocks)\
00495 {\
00496     __asm__ volatile(\
00497                 "pxor %%mm7, %%mm7              \n\t"\
00498                 "mov     %1, %%"REG_a"          \n\t"\
00499                 "1:                             \n\t"\
00500                 "movq %%mm7, (%0, %%"REG_a")    \n\t"\
00501                 "movq %%mm7, 8(%0, %%"REG_a")   \n\t"\
00502                 "movq %%mm7, 16(%0, %%"REG_a")  \n\t"\
00503                 "movq %%mm7, 24(%0, %%"REG_a")  \n\t"\
00504                 "add $32, %%"REG_a"             \n\t"\
00505                 " js 1b                         \n\t"\
00506                 : : "r" (((uint8_t *)blocks)+128*n),\
00507                     "i" (-128*n)\
00508                 : "%"REG_a\
00509         );\
00510 }
00511 CLEAR_BLOCKS(clear_blocks_mmx, 6)
00512 CLEAR_BLOCKS(clear_block_mmx, 1)
00513 
00514 static void clear_block_sse(DCTELEM *block)
00515 {
00516     __asm__ volatile(
00517         "xorps  %%xmm0, %%xmm0  \n"
00518         "movaps %%xmm0,    (%0) \n"
00519         "movaps %%xmm0,  16(%0) \n"
00520         "movaps %%xmm0,  32(%0) \n"
00521         "movaps %%xmm0,  48(%0) \n"
00522         "movaps %%xmm0,  64(%0) \n"
00523         "movaps %%xmm0,  80(%0) \n"
00524         "movaps %%xmm0,  96(%0) \n"
00525         "movaps %%xmm0, 112(%0) \n"
00526         :: "r"(block)
00527         : "memory"
00528     );
00529 }
00530 
00531 static void clear_blocks_sse(DCTELEM *blocks)
00532 {\
00533     __asm__ volatile(
00534         "xorps  %%xmm0, %%xmm0  \n"
00535         "mov     %1, %%"REG_a"  \n"
00536         "1:                     \n"
00537         "movaps %%xmm0,    (%0, %%"REG_a") \n"
00538         "movaps %%xmm0,  16(%0, %%"REG_a") \n"
00539         "movaps %%xmm0,  32(%0, %%"REG_a") \n"
00540         "movaps %%xmm0,  48(%0, %%"REG_a") \n"
00541         "movaps %%xmm0,  64(%0, %%"REG_a") \n"
00542         "movaps %%xmm0,  80(%0, %%"REG_a") \n"
00543         "movaps %%xmm0,  96(%0, %%"REG_a") \n"
00544         "movaps %%xmm0, 112(%0, %%"REG_a") \n"
00545         "add $128, %%"REG_a"    \n"
00546         " js 1b                 \n"
00547         : : "r" (((uint8_t *)blocks)+128*6),
00548             "i" (-128*6)
00549         : "%"REG_a
00550     );
00551 }
00552 
00553 static void add_bytes_mmx(uint8_t *dst, uint8_t *src, int w){
00554     x86_reg i=0;
00555     __asm__ volatile(
00556         "jmp 2f                         \n\t"
00557         "1:                             \n\t"
00558         "movq  (%1, %0), %%mm0          \n\t"
00559         "movq  (%2, %0), %%mm1          \n\t"
00560         "paddb %%mm0, %%mm1             \n\t"
00561         "movq %%mm1, (%2, %0)           \n\t"
00562         "movq 8(%1, %0), %%mm0          \n\t"
00563         "movq 8(%2, %0), %%mm1          \n\t"
00564         "paddb %%mm0, %%mm1             \n\t"
00565         "movq %%mm1, 8(%2, %0)          \n\t"
00566         "add $16, %0                    \n\t"
00567         "2:                             \n\t"
00568         "cmp %3, %0                     \n\t"
00569         " js 1b                         \n\t"
00570         : "+r" (i)
00571         : "r"(src), "r"(dst), "r"((x86_reg)w-15)
00572     );
00573     for(; i<w; i++)
00574         dst[i+0] += src[i+0];
00575 }
00576 
00577 static void add_bytes_l2_mmx(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w){
00578     x86_reg i=0;
00579     __asm__ volatile(
00580         "jmp 2f                         \n\t"
00581         "1:                             \n\t"
00582         "movq   (%2, %0), %%mm0         \n\t"
00583         "movq  8(%2, %0), %%mm1         \n\t"
00584         "paddb  (%3, %0), %%mm0         \n\t"
00585         "paddb 8(%3, %0), %%mm1         \n\t"
00586         "movq %%mm0,  (%1, %0)          \n\t"
00587         "movq %%mm1, 8(%1, %0)          \n\t"
00588         "add $16, %0                    \n\t"
00589         "2:                             \n\t"
00590         "cmp %4, %0                     \n\t"
00591         " js 1b                         \n\t"
00592         : "+r" (i)
00593         : "r"(dst), "r"(src1), "r"(src2), "r"((x86_reg)w-15)
00594     );
00595     for(; i<w; i++)
00596         dst[i] = src1[i] + src2[i];
00597 }
00598 
00599 #if HAVE_7REGS && HAVE_TEN_OPERANDS
00600 static void add_hfyu_median_prediction_cmov(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top) {
00601     x86_reg w2 = -w;
00602     x86_reg x;
00603     int l = *left & 0xff;
00604     int tl = *left_top & 0xff;
00605     int t;
00606     __asm__ volatile(
00607         "mov    %7, %3 \n"
00608         "1: \n"
00609         "movzx (%3,%4), %2 \n"
00610         "mov    %2, %k3 \n"
00611         "sub   %b1, %b3 \n"
00612         "add   %b0, %b3 \n"
00613         "mov    %2, %1 \n"
00614         "cmp    %0, %2 \n"
00615         "cmovg  %0, %2 \n"
00616         "cmovg  %1, %0 \n"
00617         "cmp   %k3, %0 \n"
00618         "cmovg %k3, %0 \n"
00619         "mov    %7, %3 \n"
00620         "cmp    %2, %0 \n"
00621         "cmovl  %2, %0 \n"
00622         "add (%6,%4), %b0 \n"
00623         "mov   %b0, (%5,%4) \n"
00624         "inc    %4 \n"
00625         "jl 1b \n"
00626         :"+&q"(l), "+&q"(tl), "=&r"(t), "=&q"(x), "+&r"(w2)
00627         :"r"(dst+w), "r"(diff+w), "rm"(top+w)
00628     );
00629     *left = l;
00630     *left_top = tl;
00631 }
00632 #endif
00633 
00634 #define H263_LOOP_FILTER \
00635         "pxor %%mm7, %%mm7              \n\t"\
00636         "movq  %0, %%mm0                \n\t"\
00637         "movq  %0, %%mm1                \n\t"\
00638         "movq  %3, %%mm2                \n\t"\
00639         "movq  %3, %%mm3                \n\t"\
00640         "punpcklbw %%mm7, %%mm0         \n\t"\
00641         "punpckhbw %%mm7, %%mm1         \n\t"\
00642         "punpcklbw %%mm7, %%mm2         \n\t"\
00643         "punpckhbw %%mm7, %%mm3         \n\t"\
00644         "psubw %%mm2, %%mm0             \n\t"\
00645         "psubw %%mm3, %%mm1             \n\t"\
00646         "movq  %1, %%mm2                \n\t"\
00647         "movq  %1, %%mm3                \n\t"\
00648         "movq  %2, %%mm4                \n\t"\
00649         "movq  %2, %%mm5                \n\t"\
00650         "punpcklbw %%mm7, %%mm2         \n\t"\
00651         "punpckhbw %%mm7, %%mm3         \n\t"\
00652         "punpcklbw %%mm7, %%mm4         \n\t"\
00653         "punpckhbw %%mm7, %%mm5         \n\t"\
00654         "psubw %%mm2, %%mm4             \n\t"\
00655         "psubw %%mm3, %%mm5             \n\t"\
00656         "psllw $2, %%mm4                \n\t"\
00657         "psllw $2, %%mm5                \n\t"\
00658         "paddw %%mm0, %%mm4             \n\t"\
00659         "paddw %%mm1, %%mm5             \n\t"\
00660         "pxor %%mm6, %%mm6              \n\t"\
00661         "pcmpgtw %%mm4, %%mm6           \n\t"\
00662         "pcmpgtw %%mm5, %%mm7           \n\t"\
00663         "pxor %%mm6, %%mm4              \n\t"\
00664         "pxor %%mm7, %%mm5              \n\t"\
00665         "psubw %%mm6, %%mm4             \n\t"\
00666         "psubw %%mm7, %%mm5             \n\t"\
00667         "psrlw $3, %%mm4                \n\t"\
00668         "psrlw $3, %%mm5                \n\t"\
00669         "packuswb %%mm5, %%mm4          \n\t"\
00670         "packsswb %%mm7, %%mm6          \n\t"\
00671         "pxor %%mm7, %%mm7              \n\t"\
00672         "movd %4, %%mm2                 \n\t"\
00673         "punpcklbw %%mm2, %%mm2         \n\t"\
00674         "punpcklbw %%mm2, %%mm2         \n\t"\
00675         "punpcklbw %%mm2, %%mm2         \n\t"\
00676         "psubusb %%mm4, %%mm2           \n\t"\
00677         "movq %%mm2, %%mm3              \n\t"\
00678         "psubusb %%mm4, %%mm3           \n\t"\
00679         "psubb %%mm3, %%mm2             \n\t"\
00680         "movq %1, %%mm3                 \n\t"\
00681         "movq %2, %%mm4                 \n\t"\
00682         "pxor %%mm6, %%mm3              \n\t"\
00683         "pxor %%mm6, %%mm4              \n\t"\
00684         "paddusb %%mm2, %%mm3           \n\t"\
00685         "psubusb %%mm2, %%mm4           \n\t"\
00686         "pxor %%mm6, %%mm3              \n\t"\
00687         "pxor %%mm6, %%mm4              \n\t"\
00688         "paddusb %%mm2, %%mm2           \n\t"\
00689         "packsswb %%mm1, %%mm0          \n\t"\
00690         "pcmpgtb %%mm0, %%mm7           \n\t"\
00691         "pxor %%mm7, %%mm0              \n\t"\
00692         "psubb %%mm7, %%mm0             \n\t"\
00693         "movq %%mm0, %%mm1              \n\t"\
00694         "psubusb %%mm2, %%mm0           \n\t"\
00695         "psubb %%mm0, %%mm1             \n\t"\
00696         "pand %5, %%mm1                 \n\t"\
00697         "psrlw $2, %%mm1                \n\t"\
00698         "pxor %%mm7, %%mm1              \n\t"\
00699         "psubb %%mm7, %%mm1             \n\t"\
00700         "movq %0, %%mm5                 \n\t"\
00701         "movq %3, %%mm6                 \n\t"\
00702         "psubb %%mm1, %%mm5             \n\t"\
00703         "paddb %%mm1, %%mm6             \n\t"
00704 
00705 static void h263_v_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00706     if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00707     const int strength= ff_h263_loop_filter_strength[qscale];
00708 
00709     __asm__ volatile(
00710 
00711         H263_LOOP_FILTER
00712 
00713         "movq %%mm3, %1                 \n\t"
00714         "movq %%mm4, %2                 \n\t"
00715         "movq %%mm5, %0                 \n\t"
00716         "movq %%mm6, %3                 \n\t"
00717         : "+m" (*(uint64_t*)(src - 2*stride)),
00718           "+m" (*(uint64_t*)(src - 1*stride)),
00719           "+m" (*(uint64_t*)(src + 0*stride)),
00720           "+m" (*(uint64_t*)(src + 1*stride))
00721         : "g" (2*strength), "m"(ff_pb_FC)
00722     );
00723     }
00724 }
00725 
00726 static inline void transpose4x4(uint8_t *dst, uint8_t *src, int dst_stride, int src_stride){
00727     __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ...
00728         "movd  %4, %%mm0                \n\t"
00729         "movd  %5, %%mm1                \n\t"
00730         "movd  %6, %%mm2                \n\t"
00731         "movd  %7, %%mm3                \n\t"
00732         "punpcklbw %%mm1, %%mm0         \n\t"
00733         "punpcklbw %%mm3, %%mm2         \n\t"
00734         "movq %%mm0, %%mm1              \n\t"
00735         "punpcklwd %%mm2, %%mm0         \n\t"
00736         "punpckhwd %%mm2, %%mm1         \n\t"
00737         "movd  %%mm0, %0                \n\t"
00738         "punpckhdq %%mm0, %%mm0         \n\t"
00739         "movd  %%mm0, %1                \n\t"
00740         "movd  %%mm1, %2                \n\t"
00741         "punpckhdq %%mm1, %%mm1         \n\t"
00742         "movd  %%mm1, %3                \n\t"
00743 
00744         : "=m" (*(uint32_t*)(dst + 0*dst_stride)),
00745           "=m" (*(uint32_t*)(dst + 1*dst_stride)),
00746           "=m" (*(uint32_t*)(dst + 2*dst_stride)),
00747           "=m" (*(uint32_t*)(dst + 3*dst_stride))
00748         :  "m" (*(uint32_t*)(src + 0*src_stride)),
00749            "m" (*(uint32_t*)(src + 1*src_stride)),
00750            "m" (*(uint32_t*)(src + 2*src_stride)),
00751            "m" (*(uint32_t*)(src + 3*src_stride))
00752     );
00753 }
00754 
00755 static void h263_h_loop_filter_mmx(uint8_t *src, int stride, int qscale){
00756     if(CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
00757     const int strength= ff_h263_loop_filter_strength[qscale];
00758     DECLARE_ALIGNED(8, uint64_t, temp)[4];
00759     uint8_t *btemp= (uint8_t*)temp;
00760 
00761     src -= 2;
00762 
00763     transpose4x4(btemp  , src           , 8, stride);
00764     transpose4x4(btemp+4, src + 4*stride, 8, stride);
00765     __asm__ volatile(
00766         H263_LOOP_FILTER // 5 3 4 6
00767 
00768         : "+m" (temp[0]),
00769           "+m" (temp[1]),
00770           "+m" (temp[2]),
00771           "+m" (temp[3])
00772         : "g" (2*strength), "m"(ff_pb_FC)
00773     );
00774 
00775     __asm__ volatile(
00776         "movq %%mm5, %%mm1              \n\t"
00777         "movq %%mm4, %%mm0              \n\t"
00778         "punpcklbw %%mm3, %%mm5         \n\t"
00779         "punpcklbw %%mm6, %%mm4         \n\t"
00780         "punpckhbw %%mm3, %%mm1         \n\t"
00781         "punpckhbw %%mm6, %%mm0         \n\t"
00782         "movq %%mm5, %%mm3              \n\t"
00783         "movq %%mm1, %%mm6              \n\t"
00784         "punpcklwd %%mm4, %%mm5         \n\t"
00785         "punpcklwd %%mm0, %%mm1         \n\t"
00786         "punpckhwd %%mm4, %%mm3         \n\t"
00787         "punpckhwd %%mm0, %%mm6         \n\t"
00788         "movd %%mm5, (%0)               \n\t"
00789         "punpckhdq %%mm5, %%mm5         \n\t"
00790         "movd %%mm5, (%0,%2)            \n\t"
00791         "movd %%mm3, (%0,%2,2)          \n\t"
00792         "punpckhdq %%mm3, %%mm3         \n\t"
00793         "movd %%mm3, (%0,%3)            \n\t"
00794         "movd %%mm1, (%1)               \n\t"
00795         "punpckhdq %%mm1, %%mm1         \n\t"
00796         "movd %%mm1, (%1,%2)            \n\t"
00797         "movd %%mm6, (%1,%2,2)          \n\t"
00798         "punpckhdq %%mm6, %%mm6         \n\t"
00799         "movd %%mm6, (%1,%3)            \n\t"
00800         :: "r" (src),
00801            "r" (src + 4*stride),
00802            "r" ((x86_reg)   stride ),
00803            "r" ((x86_reg)(3*stride))
00804     );
00805     }
00806 }
00807 
00808 /* draw the edges of width 'w' of an image of size width, height
00809    this mmx version can only handle w==8 || w==16 */
00810 static void draw_edges_mmx(uint8_t *buf, int wrap, int width, int height, int w)
00811 {
00812     uint8_t *ptr, *last_line;
00813     int i;
00814 
00815     last_line = buf + (height - 1) * wrap;
00816     /* left and right */
00817     ptr = buf;
00818     if(w==8)
00819     {
00820         __asm__ volatile(
00821                 "1:                             \n\t"
00822                 "movd (%0), %%mm0               \n\t"
00823                 "punpcklbw %%mm0, %%mm0         \n\t"
00824                 "punpcklwd %%mm0, %%mm0         \n\t"
00825                 "punpckldq %%mm0, %%mm0         \n\t"
00826                 "movq %%mm0, -8(%0)             \n\t"
00827                 "movq -8(%0, %2), %%mm1         \n\t"
00828                 "punpckhbw %%mm1, %%mm1         \n\t"
00829                 "punpckhwd %%mm1, %%mm1         \n\t"
00830                 "punpckhdq %%mm1, %%mm1         \n\t"
00831                 "movq %%mm1, (%0, %2)           \n\t"
00832                 "add %1, %0                     \n\t"
00833                 "cmp %3, %0                     \n\t"
00834                 " jb 1b                         \n\t"
00835                 : "+r" (ptr)
00836                 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00837         );
00838     }
00839     else
00840     {
00841         __asm__ volatile(
00842                 "1:                             \n\t"
00843                 "movd (%0), %%mm0               \n\t"
00844                 "punpcklbw %%mm0, %%mm0         \n\t"
00845                 "punpcklwd %%mm0, %%mm0         \n\t"
00846                 "punpckldq %%mm0, %%mm0         \n\t"
00847                 "movq %%mm0, -8(%0)             \n\t"
00848                 "movq %%mm0, -16(%0)            \n\t"
00849                 "movq -8(%0, %2), %%mm1         \n\t"
00850                 "punpckhbw %%mm1, %%mm1         \n\t"
00851                 "punpckhwd %%mm1, %%mm1         \n\t"
00852                 "punpckhdq %%mm1, %%mm1         \n\t"
00853                 "movq %%mm1, (%0, %2)           \n\t"
00854                 "movq %%mm1, 8(%0, %2)          \n\t"
00855                 "add %1, %0                     \n\t"
00856                 "cmp %3, %0                     \n\t"
00857                 " jb 1b                         \n\t"
00858                 : "+r" (ptr)
00859                 : "r" ((x86_reg)wrap), "r" ((x86_reg)width), "r" (ptr + wrap*height)
00860         );
00861     }
00862 
00863     for(i=0;i<w;i+=4) {
00864         /* top and bottom (and hopefully also the corners) */
00865         ptr= buf - (i + 1) * wrap - w;
00866         __asm__ volatile(
00867                 "1:                             \n\t"
00868                 "movq (%1, %0), %%mm0           \n\t"
00869                 "movq %%mm0, (%0)               \n\t"
00870                 "movq %%mm0, (%0, %2)           \n\t"
00871                 "movq %%mm0, (%0, %2, 2)        \n\t"
00872                 "movq %%mm0, (%0, %3)           \n\t"
00873                 "add $8, %0                     \n\t"
00874                 "cmp %4, %0                     \n\t"
00875                 " jb 1b                         \n\t"
00876                 : "+r" (ptr)
00877                 : "r" ((x86_reg)buf - (x86_reg)ptr - w), "r" ((x86_reg)-wrap), "r" ((x86_reg)-wrap*3), "r" (ptr+width+2*w)
00878         );
00879         ptr= last_line + (i + 1) * wrap - w;
00880         __asm__ volatile(
00881                 "1:                             \n\t"
00882                 "movq (%1, %0), %%mm0           \n\t"
00883                 "movq %%mm0, (%0)               \n\t"
00884                 "movq %%mm0, (%0, %2)           \n\t"
00885                 "movq %%mm0, (%0, %2, 2)        \n\t"
00886                 "movq %%mm0, (%0, %3)           \n\t"
00887                 "add $8, %0                     \n\t"
00888                 "cmp %4, %0                     \n\t"
00889                 " jb 1b                         \n\t"
00890                 : "+r" (ptr)
00891                 : "r" ((x86_reg)last_line - (x86_reg)ptr - w), "r" ((x86_reg)wrap), "r" ((x86_reg)wrap*3), "r" (ptr+width+2*w)
00892         );
00893     }
00894 }
00895 
00896 #define PAETH(cpu, abs3)\
00897 static void add_png_paeth_prediction_##cpu(uint8_t *dst, uint8_t *src, uint8_t *top, int w, int bpp)\
00898 {\
00899     x86_reg i = -bpp;\
00900     x86_reg end = w-3;\
00901     __asm__ volatile(\
00902         "pxor      %%mm7, %%mm7 \n"\
00903         "movd    (%1,%0), %%mm0 \n"\
00904         "movd    (%2,%0), %%mm1 \n"\
00905         "punpcklbw %%mm7, %%mm0 \n"\
00906         "punpcklbw %%mm7, %%mm1 \n"\
00907         "add       %4, %0 \n"\
00908         "1: \n"\
00909         "movq      %%mm1, %%mm2 \n"\
00910         "movd    (%2,%0), %%mm1 \n"\
00911         "movq      %%mm2, %%mm3 \n"\
00912         "punpcklbw %%mm7, %%mm1 \n"\
00913         "movq      %%mm2, %%mm4 \n"\
00914         "psubw     %%mm1, %%mm3 \n"\
00915         "psubw     %%mm0, %%mm4 \n"\
00916         "movq      %%mm3, %%mm5 \n"\
00917         "paddw     %%mm4, %%mm5 \n"\
00918         abs3\
00919         "movq      %%mm4, %%mm6 \n"\
00920         "pminsw    %%mm5, %%mm6 \n"\
00921         "pcmpgtw   %%mm6, %%mm3 \n"\
00922         "pcmpgtw   %%mm5, %%mm4 \n"\
00923         "movq      %%mm4, %%mm6 \n"\
00924         "pand      %%mm3, %%mm4 \n"\
00925         "pandn     %%mm3, %%mm6 \n"\
00926         "pandn     %%mm0, %%mm3 \n"\
00927         "movd    (%3,%0), %%mm0 \n"\
00928         "pand      %%mm1, %%mm6 \n"\
00929         "pand      %%mm4, %%mm2 \n"\
00930         "punpcklbw %%mm7, %%mm0 \n"\
00931         "movq      %6,    %%mm5 \n"\
00932         "paddw     %%mm6, %%mm0 \n"\
00933         "paddw     %%mm2, %%mm3 \n"\
00934         "paddw     %%mm3, %%mm0 \n"\
00935         "pand      %%mm5, %%mm0 \n"\
00936         "movq      %%mm0, %%mm3 \n"\
00937         "packuswb  %%mm3, %%mm3 \n"\
00938         "movd      %%mm3, (%1,%0) \n"\
00939         "add       %4, %0 \n"\
00940         "cmp       %5, %0 \n"\
00941         "jle 1b \n"\
00942         :"+r"(i)\
00943         :"r"(dst), "r"(top), "r"(src), "r"((x86_reg)bpp), "g"(end),\
00944          "m"(ff_pw_255)\
00945         :"memory"\
00946     );\
00947 }
00948 
00949 #define ABS3_MMX2\
00950         "psubw     %%mm5, %%mm7 \n"\
00951         "pmaxsw    %%mm7, %%mm5 \n"\
00952         "pxor      %%mm6, %%mm6 \n"\
00953         "pxor      %%mm7, %%mm7 \n"\
00954         "psubw     %%mm3, %%mm6 \n"\
00955         "psubw     %%mm4, %%mm7 \n"\
00956         "pmaxsw    %%mm6, %%mm3 \n"\
00957         "pmaxsw    %%mm7, %%mm4 \n"\
00958         "pxor      %%mm7, %%mm7 \n"
00959 
00960 #define ABS3_SSSE3\
00961         "pabsw     %%mm3, %%mm3 \n"\
00962         "pabsw     %%mm4, %%mm4 \n"\
00963         "pabsw     %%mm5, %%mm5 \n"
00964 
00965 PAETH(mmx2, ABS3_MMX2)
00966 #if HAVE_SSSE3
00967 PAETH(ssse3, ABS3_SSSE3)
00968 #endif
00969 
00970 #define QPEL_V_LOW(m3,m4,m5,m6, pw_20, pw_3, rnd, in0, in1, in2, in7, out, OP)\
00971         "paddw " #m4 ", " #m3 "           \n\t" /* x1 */\
00972         "movq "MANGLE(ff_pw_20)", %%mm4   \n\t" /* 20 */\
00973         "pmullw " #m3 ", %%mm4            \n\t" /* 20x1 */\
00974         "movq "#in7", " #m3 "             \n\t" /* d */\
00975         "movq "#in0", %%mm5               \n\t" /* D */\
00976         "paddw " #m3 ", %%mm5             \n\t" /* x4 */\
00977         "psubw %%mm5, %%mm4               \n\t" /* 20x1 - x4 */\
00978         "movq "#in1", %%mm5               \n\t" /* C */\
00979         "movq "#in2", %%mm6               \n\t" /* B */\
00980         "paddw " #m6 ", %%mm5             \n\t" /* x3 */\
00981         "paddw " #m5 ", %%mm6             \n\t" /* x2 */\
00982         "paddw %%mm6, %%mm6               \n\t" /* 2x2 */\
00983         "psubw %%mm6, %%mm5               \n\t" /* -2x2 + x3 */\
00984         "pmullw "MANGLE(ff_pw_3)", %%mm5  \n\t" /* -6x2 + 3x3 */\
00985         "paddw " #rnd ", %%mm4            \n\t" /* x2 */\
00986         "paddw %%mm4, %%mm5               \n\t" /* 20x1 - 6x2 + 3x3 - x4 */\
00987         "psraw $5, %%mm5                  \n\t"\
00988         "packuswb %%mm5, %%mm5            \n\t"\
00989         OP(%%mm5, out, %%mm7, d)
00990 
00991 #define QPEL_BASE(OPNAME, ROUNDER, RND, OP_MMX2, OP_3DNOW)\
00992 static void OPNAME ## mpeg4_qpel16_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
00993     uint64_t temp;\
00994 \
00995     __asm__ volatile(\
00996         "pxor %%mm7, %%mm7                \n\t"\
00997         "1:                               \n\t"\
00998         "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
00999         "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
01000         "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
01001         "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
01002         "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
01003         "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
01004         "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
01005         "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
01006         "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
01007         "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
01008         "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
01009         "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
01010         "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
01011         "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
01012         "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
01013         "paddw %%mm3, %%mm5               \n\t" /* b */\
01014         "paddw %%mm2, %%mm6               \n\t" /* c */\
01015         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
01016         "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
01017         "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
01018         "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
01019         "paddw %%mm4, %%mm0               \n\t" /* a */\
01020         "paddw %%mm1, %%mm5               \n\t" /* d */\
01021         "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
01022         "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
01023         "paddw %6, %%mm6                  \n\t"\
01024         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
01025         "psraw $5, %%mm0                  \n\t"\
01026         "movq %%mm0, %5                   \n\t"\
01027         /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
01028         \
01029         "movq 5(%0), %%mm0                \n\t" /* FGHIJKLM */\
01030         "movq %%mm0, %%mm5                \n\t" /* FGHIJKLM */\
01031         "movq %%mm0, %%mm6                \n\t" /* FGHIJKLM */\
01032         "psrlq $8, %%mm0                  \n\t" /* GHIJKLM0 */\
01033         "psrlq $16, %%mm5                 \n\t" /* HIJKLM00 */\
01034         "punpcklbw %%mm7, %%mm0           \n\t" /* 0G0H0I0J */\
01035         "punpcklbw %%mm7, %%mm5           \n\t" /* 0H0I0J0K */\
01036         "paddw %%mm0, %%mm2               \n\t" /* b */\
01037         "paddw %%mm5, %%mm3               \n\t" /* c */\
01038         "paddw %%mm2, %%mm2               \n\t" /* 2b */\
01039         "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
01040         "movq %%mm6, %%mm2                \n\t" /* FGHIJKLM */\
01041         "psrlq $24, %%mm6                 \n\t" /* IJKLM000 */\
01042         "punpcklbw %%mm7, %%mm2           \n\t" /* 0F0G0H0I */\
01043         "punpcklbw %%mm7, %%mm6           \n\t" /* 0I0J0K0L */\
01044         "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
01045         "paddw %%mm2, %%mm1               \n\t" /* a */\
01046         "paddw %%mm6, %%mm4               \n\t" /* d */\
01047         "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
01048         "psubw %%mm4, %%mm3               \n\t" /* - 6b +3c - d */\
01049         "paddw %6, %%mm1                  \n\t"\
01050         "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b +3c - d */\
01051         "psraw $5, %%mm3                  \n\t"\
01052         "movq %5, %%mm1                   \n\t"\
01053         "packuswb %%mm3, %%mm1            \n\t"\
01054         OP_MMX2(%%mm1, (%1),%%mm4, q)\
01055         /* mm0= GHIJ, mm2=FGHI, mm5=HIJK, mm6=IJKL, mm7=0 */\
01056         \
01057         "movq 9(%0), %%mm1                \n\t" /* JKLMNOPQ */\
01058         "movq %%mm1, %%mm4                \n\t" /* JKLMNOPQ */\
01059         "movq %%mm1, %%mm3                \n\t" /* JKLMNOPQ */\
01060         "psrlq $8, %%mm1                  \n\t" /* KLMNOPQ0 */\
01061         "psrlq $16, %%mm4                 \n\t" /* LMNOPQ00 */\
01062         "punpcklbw %%mm7, %%mm1           \n\t" /* 0K0L0M0N */\
01063         "punpcklbw %%mm7, %%mm4           \n\t" /* 0L0M0N0O */\
01064         "paddw %%mm1, %%mm5               \n\t" /* b */\
01065         "paddw %%mm4, %%mm0               \n\t" /* c */\
01066         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
01067         "psubw %%mm5, %%mm0               \n\t" /* c - 2b */\
01068         "movq %%mm3, %%mm5                \n\t" /* JKLMNOPQ */\
01069         "psrlq $24, %%mm3                 \n\t" /* MNOPQ000 */\
01070         "pmullw "MANGLE(ff_pw_3)", %%mm0  \n\t" /* 3c - 6b */\
01071         "punpcklbw %%mm7, %%mm3           \n\t" /* 0M0N0O0P */\
01072         "paddw %%mm3, %%mm2               \n\t" /* d */\
01073         "psubw %%mm2, %%mm0               \n\t" /* -6b + 3c - d */\
01074         "movq %%mm5, %%mm2                \n\t" /* JKLMNOPQ */\
01075         "punpcklbw %%mm7, %%mm2           \n\t" /* 0J0K0L0M */\
01076         "punpckhbw %%mm7, %%mm5           \n\t" /* 0N0O0P0Q */\
01077         "paddw %%mm2, %%mm6               \n\t" /* a */\
01078         "pmullw "MANGLE(ff_pw_20)", %%mm6 \n\t" /* 20a */\
01079         "paddw %6, %%mm0                  \n\t"\
01080         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
01081         "psraw $5, %%mm0                  \n\t"\
01082         /* mm1=KLMN, mm2=JKLM, mm3=MNOP, mm4=LMNO, mm5=NOPQ mm7=0 */\
01083         \
01084         "paddw %%mm5, %%mm3               \n\t" /* a */\
01085         "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0O0P0Q0Q */\
01086         "paddw %%mm4, %%mm6               \n\t" /* b */\
01087         "pshufw $0xBE, %%mm5, %%mm4       \n\t" /* 0P0Q0Q0P */\
01088         "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0Q0Q0P0O */\
01089         "paddw %%mm1, %%mm4               \n\t" /* c */\
01090         "paddw %%mm2, %%mm5               \n\t" /* d */\
01091         "paddw %%mm6, %%mm6               \n\t" /* 2b */\
01092         "psubw %%mm6, %%mm4               \n\t" /* c - 2b */\
01093         "pmullw "MANGLE(ff_pw_20)", %%mm3 \n\t" /* 20a */\
01094         "pmullw "MANGLE(ff_pw_3)", %%mm4  \n\t" /* 3c - 6b */\
01095         "psubw %%mm5, %%mm3               \n\t" /* -6b + 3c - d */\
01096         "paddw %6, %%mm4                  \n\t"\
01097         "paddw %%mm3, %%mm4               \n\t" /* 20a - 6b + 3c - d */\
01098         "psraw $5, %%mm4                  \n\t"\
01099         "packuswb %%mm4, %%mm0            \n\t"\
01100         OP_MMX2(%%mm0, 8(%1), %%mm4, q)\
01101         \
01102         "add %3, %0                       \n\t"\
01103         "add %4, %1                       \n\t"\
01104         "decl %2                          \n\t"\
01105         " jnz 1b                          \n\t"\
01106         : "+a"(src), "+c"(dst), "+D"(h)\
01107         : "d"((x86_reg)srcStride), "S"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(temp), "m"(ROUNDER)\
01108         : "memory"\
01109     );\
01110 }\
01111 \
01112 static void OPNAME ## mpeg4_qpel16_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01113     int i;\
01114     int16_t temp[16];\
01115     /* quick HACK, XXX FIXME MUST be optimized */\
01116     for(i=0; i<h; i++)\
01117     {\
01118         temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01119         temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01120         temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01121         temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01122         temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01123         temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 9]);\
01124         temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 9])*3 - (src[ 3]+src[10]);\
01125         temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 9])*6 + (src[ 5]+src[10])*3 - (src[ 4]+src[11]);\
01126         temp[ 8]= (src[ 8]+src[ 9])*20 - (src[ 7]+src[10])*6 + (src[ 6]+src[11])*3 - (src[ 5]+src[12]);\
01127         temp[ 9]= (src[ 9]+src[10])*20 - (src[ 8]+src[11])*6 + (src[ 7]+src[12])*3 - (src[ 6]+src[13]);\
01128         temp[10]= (src[10]+src[11])*20 - (src[ 9]+src[12])*6 + (src[ 8]+src[13])*3 - (src[ 7]+src[14]);\
01129         temp[11]= (src[11]+src[12])*20 - (src[10]+src[13])*6 + (src[ 9]+src[14])*3 - (src[ 8]+src[15]);\
01130         temp[12]= (src[12]+src[13])*20 - (src[11]+src[14])*6 + (src[10]+src[15])*3 - (src[ 9]+src[16]);\
01131         temp[13]= (src[13]+src[14])*20 - (src[12]+src[15])*6 + (src[11]+src[16])*3 - (src[10]+src[16]);\
01132         temp[14]= (src[14]+src[15])*20 - (src[13]+src[16])*6 + (src[12]+src[16])*3 - (src[11]+src[15]);\
01133         temp[15]= (src[15]+src[16])*20 - (src[14]+src[16])*6 + (src[13]+src[15])*3 - (src[12]+src[14]);\
01134         __asm__ volatile(\
01135             "movq (%0), %%mm0               \n\t"\
01136             "movq 8(%0), %%mm1              \n\t"\
01137             "paddw %2, %%mm0                \n\t"\
01138             "paddw %2, %%mm1                \n\t"\
01139             "psraw $5, %%mm0                \n\t"\
01140             "psraw $5, %%mm1                \n\t"\
01141             "packuswb %%mm1, %%mm0          \n\t"\
01142             OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01143             "movq 16(%0), %%mm0             \n\t"\
01144             "movq 24(%0), %%mm1             \n\t"\
01145             "paddw %2, %%mm0                \n\t"\
01146             "paddw %2, %%mm1                \n\t"\
01147             "psraw $5, %%mm0                \n\t"\
01148             "psraw $5, %%mm1                \n\t"\
01149             "packuswb %%mm1, %%mm0          \n\t"\
01150             OP_3DNOW(%%mm0, 8(%1), %%mm1, q)\
01151             :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01152             : "memory"\
01153         );\
01154         dst+=dstStride;\
01155         src+=srcStride;\
01156     }\
01157 }\
01158 \
01159 static void OPNAME ## mpeg4_qpel8_h_lowpass_mmx2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01160     __asm__ volatile(\
01161         "pxor %%mm7, %%mm7                \n\t"\
01162         "1:                               \n\t"\
01163         "movq  (%0), %%mm0                \n\t" /* ABCDEFGH */\
01164         "movq %%mm0, %%mm1                \n\t" /* ABCDEFGH */\
01165         "movq %%mm0, %%mm2                \n\t" /* ABCDEFGH */\
01166         "punpcklbw %%mm7, %%mm0           \n\t" /* 0A0B0C0D */\
01167         "punpckhbw %%mm7, %%mm1           \n\t" /* 0E0F0G0H */\
01168         "pshufw $0x90, %%mm0, %%mm5       \n\t" /* 0A0A0B0C */\
01169         "pshufw $0x41, %%mm0, %%mm6       \n\t" /* 0B0A0A0B */\
01170         "movq %%mm2, %%mm3                \n\t" /* ABCDEFGH */\
01171         "movq %%mm2, %%mm4                \n\t" /* ABCDEFGH */\
01172         "psllq $8, %%mm2                  \n\t" /* 0ABCDEFG */\
01173         "psllq $16, %%mm3                 \n\t" /* 00ABCDEF */\
01174         "psllq $24, %%mm4                 \n\t" /* 000ABCDE */\
01175         "punpckhbw %%mm7, %%mm2           \n\t" /* 0D0E0F0G */\
01176         "punpckhbw %%mm7, %%mm3           \n\t" /* 0C0D0E0F */\
01177         "punpckhbw %%mm7, %%mm4           \n\t" /* 0B0C0D0E */\
01178         "paddw %%mm3, %%mm5               \n\t" /* b */\
01179         "paddw %%mm2, %%mm6               \n\t" /* c */\
01180         "paddw %%mm5, %%mm5               \n\t" /* 2b */\
01181         "psubw %%mm5, %%mm6               \n\t" /* c - 2b */\
01182         "pshufw $0x06, %%mm0, %%mm5       \n\t" /* 0C0B0A0A */\
01183         "pmullw "MANGLE(ff_pw_3)", %%mm6  \n\t" /* 3c - 6b */\
01184         "paddw %%mm4, %%mm0               \n\t" /* a */\
01185         "paddw %%mm1, %%mm5               \n\t" /* d */\
01186         "pmullw "MANGLE(ff_pw_20)", %%mm0 \n\t" /* 20a */\
01187         "psubw %%mm5, %%mm0               \n\t" /* 20a - d */\
01188         "paddw %5, %%mm6                  \n\t"\
01189         "paddw %%mm6, %%mm0               \n\t" /* 20a - 6b + 3c - d */\
01190         "psraw $5, %%mm0                  \n\t"\
01191         /* mm1=EFGH, mm2=DEFG, mm3=CDEF, mm4=BCDE, mm7=0 */\
01192         \
01193         "movd 5(%0), %%mm5                \n\t" /* FGHI */\
01194         "punpcklbw %%mm7, %%mm5           \n\t" /* 0F0G0H0I */\
01195         "pshufw $0xF9, %%mm5, %%mm6       \n\t" /* 0G0H0I0I */\
01196         "paddw %%mm5, %%mm1               \n\t" /* a */\
01197         "paddw %%mm6, %%mm2               \n\t" /* b */\
01198         "pshufw $0xBE, %%mm5, %%mm6       \n\t" /* 0H0I0I0H */\
01199         "pshufw $0x6F, %%mm5, %%mm5       \n\t" /* 0I0I0H0G */\
01200         "paddw %%mm6, %%mm3               \n\t" /* c */\
01201         "paddw %%mm5, %%mm4               \n\t" /* d */\
01202         "paddw %%mm2, %%mm2               \n\t" /* 2b */\
01203         "psubw %%mm2, %%mm3               \n\t" /* c - 2b */\
01204         "pmullw "MANGLE(ff_pw_20)", %%mm1 \n\t" /* 20a */\
01205         "pmullw "MANGLE(ff_pw_3)", %%mm3  \n\t" /* 3c - 6b */\
01206         "psubw %%mm4, %%mm3               \n\t" /* -6b + 3c - d */\
01207         "paddw %5, %%mm1                  \n\t"\
01208         "paddw %%mm1, %%mm3               \n\t" /* 20a - 6b + 3c - d */\
01209         "psraw $5, %%mm3                  \n\t"\
01210         "packuswb %%mm3, %%mm0            \n\t"\
01211         OP_MMX2(%%mm0, (%1), %%mm4, q)\
01212         \
01213         "add %3, %0                       \n\t"\
01214         "add %4, %1                       \n\t"\
01215         "decl %2                          \n\t"\
01216         " jnz 1b                          \n\t"\
01217         : "+a"(src), "+c"(dst), "+d"(h)\
01218         : "S"((x86_reg)srcStride), "D"((x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER)\
01219         : "memory"\
01220     );\
01221 }\
01222 \
01223 static void OPNAME ## mpeg4_qpel8_h_lowpass_3dnow(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h){\
01224     int i;\
01225     int16_t temp[8];\
01226     /* quick HACK, XXX FIXME MUST be optimized */\
01227     for(i=0; i<h; i++)\
01228     {\
01229         temp[ 0]= (src[ 0]+src[ 1])*20 - (src[ 0]+src[ 2])*6 + (src[ 1]+src[ 3])*3 - (src[ 2]+src[ 4]);\
01230         temp[ 1]= (src[ 1]+src[ 2])*20 - (src[ 0]+src[ 3])*6 + (src[ 0]+src[ 4])*3 - (src[ 1]+src[ 5]);\
01231         temp[ 2]= (src[ 2]+src[ 3])*20 - (src[ 1]+src[ 4])*6 + (src[ 0]+src[ 5])*3 - (src[ 0]+src[ 6]);\
01232         temp[ 3]= (src[ 3]+src[ 4])*20 - (src[ 2]+src[ 5])*6 + (src[ 1]+src[ 6])*3 - (src[ 0]+src[ 7]);\
01233         temp[ 4]= (src[ 4]+src[ 5])*20 - (src[ 3]+src[ 6])*6 + (src[ 2]+src[ 7])*3 - (src[ 1]+src[ 8]);\
01234         temp[ 5]= (src[ 5]+src[ 6])*20 - (src[ 4]+src[ 7])*6 + (src[ 3]+src[ 8])*3 - (src[ 2]+src[ 8]);\
01235         temp[ 6]= (src[ 6]+src[ 7])*20 - (src[ 5]+src[ 8])*6 + (src[ 4]+src[ 8])*3 - (src[ 3]+src[ 7]);\
01236         temp[ 7]= (src[ 7]+src[ 8])*20 - (src[ 6]+src[ 8])*6 + (src[ 5]+src[ 7])*3 - (src[ 4]+src[ 6]);\
01237         __asm__ volatile(\
01238             "movq (%0), %%mm0           \n\t"\
01239             "movq 8(%0), %%mm1          \n\t"\
01240             "paddw %2, %%mm0            \n\t"\
01241             "paddw %2, %%mm1            \n\t"\
01242             "psraw $5, %%mm0            \n\t"\
01243             "psraw $5, %%mm1            \n\t"\
01244             "packuswb %%mm1, %%mm0      \n\t"\
01245             OP_3DNOW(%%mm0, (%1), %%mm1, q)\
01246             :: "r"(temp), "r"(dst), "m"(ROUNDER)\
01247             :"memory"\
01248         );\
01249         dst+=dstStride;\
01250         src+=srcStride;\
01251     }\
01252 }
01253 
01254 #define QPEL_OP(OPNAME, ROUNDER, RND, OP, MMX)\
01255 \
01256 static void OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01257     uint64_t temp[17*4];\
01258     uint64_t *temp_ptr= temp;\
01259     int count= 17;\
01260 \
01261     /*FIXME unroll */\
01262     __asm__ volatile(\
01263         "pxor %%mm7, %%mm7              \n\t"\
01264         "1:                             \n\t"\
01265         "movq (%0), %%mm0               \n\t"\
01266         "movq (%0), %%mm1               \n\t"\
01267         "movq 8(%0), %%mm2              \n\t"\
01268         "movq 8(%0), %%mm3              \n\t"\
01269         "punpcklbw %%mm7, %%mm0         \n\t"\
01270         "punpckhbw %%mm7, %%mm1         \n\t"\
01271         "punpcklbw %%mm7, %%mm2         \n\t"\
01272         "punpckhbw %%mm7, %%mm3         \n\t"\
01273         "movq %%mm0, (%1)               \n\t"\
01274         "movq %%mm1, 17*8(%1)           \n\t"\
01275         "movq %%mm2, 2*17*8(%1)         \n\t"\
01276         "movq %%mm3, 3*17*8(%1)         \n\t"\
01277         "add $8, %1                     \n\t"\
01278         "add %3, %0                     \n\t"\
01279         "decl %2                        \n\t"\
01280         " jnz 1b                        \n\t"\
01281         : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01282         : "r" ((x86_reg)srcStride)\
01283         : "memory"\
01284     );\
01285     \
01286     temp_ptr= temp;\
01287     count=4;\
01288     \
01289 /*FIXME reorder for speed */\
01290     __asm__ volatile(\
01291         /*"pxor %%mm7, %%mm7              \n\t"*/\
01292         "1:                             \n\t"\
01293         "movq (%0), %%mm0               \n\t"\
01294         "movq 8(%0), %%mm1              \n\t"\
01295         "movq 16(%0), %%mm2             \n\t"\
01296         "movq 24(%0), %%mm3             \n\t"\
01297         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
01298         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
01299         "add %4, %1                     \n\t"\
01300         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
01301         \
01302         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01303         "add %4, %1                     \n\t"\
01304         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01305         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 72(%0), (%1, %3), OP)\
01306         "add %4, %1                     \n\t"\
01307         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 80(%0), (%1), OP)\
01308         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 88(%0), (%1, %3), OP)\
01309         "add %4, %1                     \n\t"\
01310         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 40(%0), 48(%0), 56(%0), 96(%0), (%1), OP)\
01311         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 48(%0), 56(%0), 64(%0),104(%0), (%1, %3), OP)\
01312         "add %4, %1                     \n\t"\
01313         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 56(%0), 64(%0), 72(%0),112(%0), (%1), OP)\
01314         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 64(%0), 72(%0), 80(%0),120(%0), (%1, %3), OP)\
01315         "add %4, %1                     \n\t"\
01316         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 72(%0), 80(%0), 88(%0),128(%0), (%1), OP)\
01317         \
01318         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 80(%0), 88(%0), 96(%0),128(%0), (%1, %3), OP)\
01319         "add %4, %1                     \n\t"  \
01320         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 88(%0), 96(%0),104(%0),120(%0), (%1), OP)\
01321         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 96(%0),104(%0),112(%0),112(%0), (%1, %3), OP)\
01322         \
01323         "add $136, %0                   \n\t"\
01324         "add %6, %1                     \n\t"\
01325         "decl %2                        \n\t"\
01326         " jnz 1b                        \n\t"\
01327         \
01328         : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01329         : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-14*(x86_reg)dstStride)\
01330         :"memory"\
01331     );\
01332 }\
01333 \
01334 static void OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(uint8_t *dst, uint8_t *src, int dstStride, int srcStride){\
01335     uint64_t temp[9*2];\
01336     uint64_t *temp_ptr= temp;\
01337     int count= 9;\
01338 \
01339     /*FIXME unroll */\
01340     __asm__ volatile(\
01341         "pxor %%mm7, %%mm7              \n\t"\
01342         "1:                             \n\t"\
01343         "movq (%0), %%mm0               \n\t"\
01344         "movq (%0), %%mm1               \n\t"\
01345         "punpcklbw %%mm7, %%mm0         \n\t"\
01346         "punpckhbw %%mm7, %%mm1         \n\t"\
01347         "movq %%mm0, (%1)               \n\t"\
01348         "movq %%mm1, 9*8(%1)            \n\t"\
01349         "add $8, %1                     \n\t"\
01350         "add %3, %0                     \n\t"\
01351         "decl %2                        \n\t"\
01352         " jnz 1b                        \n\t"\
01353         : "+r" (src), "+r" (temp_ptr), "+r"(count)\
01354         : "r" ((x86_reg)srcStride)\
01355         : "memory"\
01356     );\
01357     \
01358     temp_ptr= temp;\
01359     count=2;\
01360     \
01361 /*FIXME reorder for speed */\
01362     __asm__ volatile(\
01363         /*"pxor %%mm7, %%mm7              \n\t"*/\
01364         "1:                             \n\t"\
01365         "movq (%0), %%mm0               \n\t"\
01366         "movq 8(%0), %%mm1              \n\t"\
01367         "movq 16(%0), %%mm2             \n\t"\
01368         "movq 24(%0), %%mm3             \n\t"\
01369         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5, 16(%0),  8(%0),   (%0), 32(%0), (%1), OP)\
01370         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5,  8(%0),   (%0),   (%0), 40(%0), (%1, %3), OP)\
01371         "add %4, %1                     \n\t"\
01372         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5,   (%0),   (%0),  8(%0), 48(%0), (%1), OP)\
01373         \
01374         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5,   (%0),  8(%0), 16(%0), 56(%0), (%1, %3), OP)\
01375         "add %4, %1                     \n\t"\
01376         QPEL_V_LOW(%%mm0, %%mm1, %%mm2, %%mm3, %5, %6, %5,  8(%0), 16(%0), 24(%0), 64(%0), (%1), OP)\
01377         \
01378         QPEL_V_LOW(%%mm1, %%mm2, %%mm3, %%mm0, %5, %6, %5, 16(%0), 24(%0), 32(%0), 64(%0), (%1, %3), OP)\
01379         "add %4, %1                     \n\t"\
01380         QPEL_V_LOW(%%mm2, %%mm3, %%mm0, %%mm1, %5, %6, %5, 24(%0), 32(%0), 40(%0), 56(%0), (%1), OP)\
01381         QPEL_V_LOW(%%mm3, %%mm0, %%mm1, %%mm2, %5, %6, %5, 32(%0), 40(%0), 48(%0), 48(%0), (%1, %3), OP)\
01382                 \
01383         "add $72, %0                    \n\t"\
01384         "add %6, %1                     \n\t"\
01385         "decl %2                        \n\t"\
01386         " jnz 1b                        \n\t"\
01387          \
01388         : "+r"(temp_ptr), "+r"(dst), "+g"(count)\
01389         : "r"((x86_reg)dstStride), "r"(2*(x86_reg)dstStride), /*"m"(ff_pw_20), "m"(ff_pw_3),*/ "m"(ROUNDER), "g"(4-6*(x86_reg)dstStride)\
01390         : "memory"\
01391    );\
01392 }\
01393 \
01394 static void OPNAME ## qpel8_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01395     OPNAME ## pixels8_ ## MMX(dst, src, stride, 8);\
01396 }\
01397 \
01398 static void OPNAME ## qpel8_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01399     uint64_t temp[8];\
01400     uint8_t * const half= (uint8_t*)temp;\
01401     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01402     OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01403 }\
01404 \
01405 static void OPNAME ## qpel8_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01406     OPNAME ## mpeg4_qpel8_h_lowpass_ ## MMX(dst, src, stride, stride, 8);\
01407 }\
01408 \
01409 static void OPNAME ## qpel8_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01410     uint64_t temp[8];\
01411     uint8_t * const half= (uint8_t*)temp;\
01412     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(half, src, 8, stride, 8);\
01413     OPNAME ## pixels8_l2_ ## MMX(dst, src+1, half, stride, stride, 8);\
01414 }\
01415 \
01416 static void OPNAME ## qpel8_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01417     uint64_t temp[8];\
01418     uint8_t * const half= (uint8_t*)temp;\
01419     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01420     OPNAME ## pixels8_l2_ ## MMX(dst, src, half, stride, stride, 8);\
01421 }\
01422 \
01423 static void OPNAME ## qpel8_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01424     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, src, stride, stride);\
01425 }\
01426 \
01427 static void OPNAME ## qpel8_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01428     uint64_t temp[8];\
01429     uint8_t * const half= (uint8_t*)temp;\
01430     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(half, src, 8, stride);\
01431     OPNAME ## pixels8_l2_ ## MMX(dst, src+stride, half, stride, stride, 8);\
01432 }\
01433 static void OPNAME ## qpel8_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01434     uint64_t half[8 + 9];\
01435     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01436     uint8_t * const halfHV= ((uint8_t*)half);\
01437     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01438     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01439     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01440     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01441 }\
01442 static void OPNAME ## qpel8_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01443     uint64_t half[8 + 9];\
01444     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01445     uint8_t * const halfHV= ((uint8_t*)half);\
01446     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01447     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01448     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01449     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01450 }\
01451 static void OPNAME ## qpel8_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01452     uint64_t half[8 + 9];\
01453     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01454     uint8_t * const halfHV= ((uint8_t*)half);\
01455     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01456     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01457     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01458     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01459 }\
01460 static void OPNAME ## qpel8_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01461     uint64_t half[8 + 9];\
01462     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01463     uint8_t * const halfHV= ((uint8_t*)half);\
01464     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01465     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01466     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01467     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01468 }\
01469 static void OPNAME ## qpel8_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01470     uint64_t half[8 + 9];\
01471     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01472     uint8_t * const halfHV= ((uint8_t*)half);\
01473     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01474     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01475     OPNAME ## pixels8_l2_ ## MMX(dst, halfH, halfHV, stride, 8, 8);\
01476 }\
01477 static void OPNAME ## qpel8_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01478     uint64_t half[8 + 9];\
01479     uint8_t * const halfH= ((uint8_t*)half) + 64;\
01480     uint8_t * const halfHV= ((uint8_t*)half);\
01481     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01482     put ## RND ## mpeg4_qpel8_v_lowpass_ ## MMX(halfHV, halfH, 8, 8);\
01483     OPNAME ## pixels8_l2_ ## MMX(dst, halfH+8, halfHV, stride, 8, 8);\
01484 }\
01485 static void OPNAME ## qpel8_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01486     uint64_t half[8 + 9];\
01487     uint8_t * const halfH= ((uint8_t*)half);\
01488     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01489     put ## RND ## pixels8_l2_ ## MMX(halfH, src, halfH, 8, stride, 9);\
01490     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01491 }\
01492 static void OPNAME ## qpel8_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01493     uint64_t half[8 + 9];\
01494     uint8_t * const halfH= ((uint8_t*)half);\
01495     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01496     put ## RND ## pixels8_l2_ ## MMX(halfH, src+1, halfH, 8, stride, 9);\
01497     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01498 }\
01499 static void OPNAME ## qpel8_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01500     uint64_t half[9];\
01501     uint8_t * const halfH= ((uint8_t*)half);\
01502     put ## RND ## mpeg4_qpel8_h_lowpass_ ## MMX(halfH, src, 8, stride, 9);\
01503     OPNAME ## mpeg4_qpel8_v_lowpass_ ## MMX(dst, halfH, stride, 8);\
01504 }\
01505 static void OPNAME ## qpel16_mc00_ ## MMX (uint8_t *dst, uint8_t *src, int stride){\
01506     OPNAME ## pixels16_ ## MMX(dst, src, stride, 16);\
01507 }\
01508 \
01509 static void OPNAME ## qpel16_mc10_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01510     uint64_t temp[32];\
01511     uint8_t * const half= (uint8_t*)temp;\
01512     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01513     OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01514 }\
01515 \
01516 static void OPNAME ## qpel16_mc20_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01517     OPNAME ## mpeg4_qpel16_h_lowpass_ ## MMX(dst, src, stride, stride, 16);\
01518 }\
01519 \
01520 static void OPNAME ## qpel16_mc30_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01521     uint64_t temp[32];\
01522     uint8_t * const half= (uint8_t*)temp;\
01523     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(half, src, 16, stride, 16);\
01524     OPNAME ## pixels16_l2_ ## MMX(dst, src+1, half, stride, stride, 16);\
01525 }\
01526 \
01527 static void OPNAME ## qpel16_mc01_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01528     uint64_t temp[32];\
01529     uint8_t * const half= (uint8_t*)temp;\
01530     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01531     OPNAME ## pixels16_l2_ ## MMX(dst, src, half, stride, stride, 16);\
01532 }\
01533 \
01534 static void OPNAME ## qpel16_mc02_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01535     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, src, stride, stride);\
01536 }\
01537 \
01538 static void OPNAME ## qpel16_mc03_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01539     uint64_t temp[32];\
01540     uint8_t * const half= (uint8_t*)temp;\
01541     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(half, src, 16, stride);\
01542     OPNAME ## pixels16_l2_ ## MMX(dst, src+stride, half, stride, stride, 16);\
01543 }\
01544 static void OPNAME ## qpel16_mc11_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01545     uint64_t half[16*2 + 17*2];\
01546     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01547     uint8_t * const halfHV= ((uint8_t*)half);\
01548     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01549     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01550     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01551     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01552 }\
01553 static void OPNAME ## qpel16_mc31_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01554     uint64_t half[16*2 + 17*2];\
01555     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01556     uint8_t * const halfHV= ((uint8_t*)half);\
01557     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01558     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01559     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01560     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01561 }\
01562 static void OPNAME ## qpel16_mc13_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01563     uint64_t half[16*2 + 17*2];\
01564     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01565     uint8_t * const halfHV= ((uint8_t*)half);\
01566     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01567     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01568     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01569     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01570 }\
01571 static void OPNAME ## qpel16_mc33_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01572     uint64_t half[16*2 + 17*2];\
01573     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01574     uint8_t * const halfHV= ((uint8_t*)half);\
01575     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01576     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01577     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01578     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01579 }\
01580 static void OPNAME ## qpel16_mc21_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01581     uint64_t half[16*2 + 17*2];\
01582     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01583     uint8_t * const halfHV= ((uint8_t*)half);\
01584     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01585     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01586     OPNAME ## pixels16_l2_ ## MMX(dst, halfH, halfHV, stride, 16, 16);\
01587 }\
01588 static void OPNAME ## qpel16_mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01589     uint64_t half[16*2 + 17*2];\
01590     uint8_t * const halfH= ((uint8_t*)half) + 256;\
01591     uint8_t * const halfHV= ((uint8_t*)half);\
01592     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01593     put ## RND ## mpeg4_qpel16_v_lowpass_ ## MMX(halfHV, halfH, 16, 16);\
01594     OPNAME ## pixels16_l2_ ## MMX(dst, halfH+16, halfHV, stride, 16, 16);\
01595 }\
01596 static void OPNAME ## qpel16_mc12_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01597     uint64_t half[17*2];\
01598     uint8_t * const halfH= ((uint8_t*)half);\
01599     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01600     put ## RND ## pixels16_l2_ ## MMX(halfH, src, halfH, 16, stride, 17);\
01601     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01602 }\
01603 static void OPNAME ## qpel16_mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01604     uint64_t half[17*2];\
01605     uint8_t * const halfH= ((uint8_t*)half);\
01606     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01607     put ## RND ## pixels16_l2_ ## MMX(halfH, src+1, halfH, 16, stride, 17);\
01608     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01609 }\
01610 static void OPNAME ## qpel16_mc22_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01611     uint64_t half[17*2];\
01612     uint8_t * const halfH= ((uint8_t*)half);\
01613     put ## RND ## mpeg4_qpel16_h_lowpass_ ## MMX(halfH, src, 16, stride, 17);\
01614     OPNAME ## mpeg4_qpel16_v_lowpass_ ## MMX(dst, halfH, stride, 16);\
01615 }
01616 
01617 #define PUT_OP(a,b,temp, size) "mov" #size " " #a ", " #b "        \n\t"
01618 #define AVG_3DNOW_OP(a,b,temp, size) \
01619 "mov" #size " " #b ", " #temp "   \n\t"\
01620 "pavgusb " #temp ", " #a "        \n\t"\
01621 "mov" #size " " #a ", " #b "      \n\t"
01622 #define AVG_MMX2_OP(a,b,temp, size) \
01623 "mov" #size " " #b ", " #temp "   \n\t"\
01624 "pavgb " #temp ", " #a "          \n\t"\
01625 "mov" #size " " #a ", " #b "      \n\t"
01626 
01627 QPEL_BASE(put_       , ff_pw_16, _       , PUT_OP, PUT_OP)
01628 QPEL_BASE(avg_       , ff_pw_16, _       , AVG_MMX2_OP, AVG_3DNOW_OP)
01629 QPEL_BASE(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, PUT_OP)
01630 QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, 3dnow)
01631 QPEL_OP(avg_       , ff_pw_16, _       , AVG_3DNOW_OP, 3dnow)
01632 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, 3dnow)
01633 QPEL_OP(put_       , ff_pw_16, _       , PUT_OP, mmx2)
01634 QPEL_OP(avg_       , ff_pw_16, _       , AVG_MMX2_OP, mmx2)
01635 QPEL_OP(put_no_rnd_, ff_pw_15, _no_rnd_, PUT_OP, mmx2)
01636 
01637 /***********************************/
01638 /* bilinear qpel: not compliant to any spec, only for -lavdopts fast */
01639 
01640 #define QPEL_2TAP_XY(OPNAME, SIZE, MMX, XY, HPEL)\
01641 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01642     OPNAME ## pixels ## SIZE ## HPEL(dst, src, stride, SIZE);\
01643 }
01644 #define QPEL_2TAP_L3(OPNAME, SIZE, MMX, XY, S0, S1, S2)\
01645 static void OPNAME ## 2tap_qpel ## SIZE ## _mc ## XY ## _ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01646     OPNAME ## 2tap_qpel ## SIZE ## _l3_ ## MMX(dst, src+S0, stride, SIZE, S1, S2);\
01647 }
01648 
01649 #define QPEL_2TAP(OPNAME, SIZE, MMX)\
01650 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 20, _x2_ ## MMX)\
01651 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 02, _y2_ ## MMX)\
01652 QPEL_2TAP_XY(OPNAME, SIZE, MMX, 22, _xy2_mmx)\
01653 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc00_ ## MMX =\
01654                           OPNAME ## qpel ## SIZE ## _mc00_ ## MMX;\
01655 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc21_ ## MMX =\
01656                           OPNAME ## 2tap_qpel ## SIZE ## _mc20_ ## MMX;\
01657 static const qpel_mc_func OPNAME ## 2tap_qpel ## SIZE ## _mc12_ ## MMX =\
01658                           OPNAME ## 2tap_qpel ## SIZE ## _mc02_ ## MMX;\
01659 static void OPNAME ## 2tap_qpel ## SIZE ## _mc32_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01660     OPNAME ## pixels ## SIZE ## _y2_ ## MMX(dst, src+1, stride, SIZE);\
01661 }\
01662 static void OPNAME ## 2tap_qpel ## SIZE ## _mc23_ ## MMX(uint8_t *dst, uint8_t *src, int stride){\
01663     OPNAME ## pixels ## SIZE ## _x2_ ## MMX(dst, src+stride, stride, SIZE);\
01664 }\
01665 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 10, 0,         1,       0)\
01666 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 30, 1,        -1,       0)\
01667 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 01, 0,         stride,  0)\
01668 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 03, stride,   -stride,  0)\
01669 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 11, 0,         stride,  1)\
01670 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 31, 1,         stride, -1)\
01671 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 13, stride,   -stride,  1)\
01672 QPEL_2TAP_L3(OPNAME, SIZE, MMX, 33, stride+1, -stride, -1)\
01673 
01674 QPEL_2TAP(put_, 16, mmx2)
01675 QPEL_2TAP(avg_, 16, mmx2)
01676 QPEL_2TAP(put_,  8, mmx2)
01677 QPEL_2TAP(avg_,  8, mmx2)
01678 QPEL_2TAP(put_, 16, 3dnow)
01679 QPEL_2TAP(avg_, 16, 3dnow)
01680 QPEL_2TAP(put_,  8, 3dnow)
01681 QPEL_2TAP(avg_,  8, 3dnow)
01682 
01683 
01684 #if 0
01685 static void just_return(void) { return; }
01686 #endif
01687 
01688 static void gmc_mmx(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
01689                     int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height){
01690     const int w = 8;
01691     const int ix = ox>>(16+shift);
01692     const int iy = oy>>(16+shift);
01693     const int oxs = ox>>4;
01694     const int oys = oy>>4;
01695     const int dxxs = dxx>>4;
01696     const int dxys = dxy>>4;
01697     const int dyxs = dyx>>4;
01698     const int dyys = dyy>>4;
01699     const uint16_t r4[4] = {r,r,r,r};
01700     const uint16_t dxy4[4] = {dxys,dxys,dxys,dxys};
01701     const uint16_t dyy4[4] = {dyys,dyys,dyys,dyys};
01702     const uint64_t shift2 = 2*shift;
01703     uint8_t edge_buf[(h+1)*stride];
01704     int x, y;
01705 
01706     const int dxw = (dxx-(1<<(16+shift)))*(w-1);
01707     const int dyh = (dyy-(1<<(16+shift)))*(h-1);
01708     const int dxh = dxy*(h-1);
01709     const int dyw = dyx*(w-1);
01710     if( // non-constant fullpel offset (3% of blocks)
01711         ((ox^(ox+dxw)) | (ox^(ox+dxh)) | (ox^(ox+dxw+dxh)) |
01712          (oy^(oy+dyw)) | (oy^(oy+dyh)) | (oy^(oy+dyw+dyh))) >> (16+shift)
01713         // uses more than 16 bits of subpel mv (only at huge resolution)
01714         || (dxx|dxy|dyx|dyy)&15 )
01715     {
01716         //FIXME could still use mmx for some of the rows
01717         ff_gmc_c(dst, src, stride, h, ox, oy, dxx, dxy, dyx, dyy, shift, r, width, height);
01718         return;
01719     }
01720 
01721     src += ix + iy*stride;
01722     if( (unsigned)ix >= width-w ||
01723         (unsigned)iy >= height-h )
01724     {
01725         ff_emulated_edge_mc(edge_buf, src, stride, w+1, h+1, ix, iy, width, height);
01726         src = edge_buf;
01727     }
01728 
01729     __asm__ volatile(
01730         "movd         %0, %%mm6 \n\t"
01731         "pxor      %%mm7, %%mm7 \n\t"
01732         "punpcklwd %%mm6, %%mm6 \n\t"
01733         "punpcklwd %%mm6, %%mm6 \n\t"
01734         :: "r"(1<<shift)
01735     );
01736 
01737     for(x=0; x<w; x+=4){
01738         uint16_t dx4[4] = { oxs - dxys + dxxs*(x+0),
01739                             oxs - dxys + dxxs*(x+1),
01740                             oxs - dxys + dxxs*(x+2),
01741                             oxs - dxys + dxxs*(x+3) };
01742         uint16_t dy4[4] = { oys - dyys + dyxs*(x+0),
01743                             oys - dyys + dyxs*(x+1),
01744                             oys - dyys + dyxs*(x+2),
01745                             oys - dyys + dyxs*(x+3) };
01746 
01747         for(y=0; y<h; y++){
01748             __asm__ volatile(
01749                 "movq   %0,  %%mm4 \n\t"
01750                 "movq   %1,  %%mm5 \n\t"
01751                 "paddw  %2,  %%mm4 \n\t"
01752                 "paddw  %3,  %%mm5 \n\t"
01753                 "movq   %%mm4, %0  \n\t"
01754                 "movq   %%mm5, %1  \n\t"
01755                 "psrlw  $12, %%mm4 \n\t"
01756                 "psrlw  $12, %%mm5 \n\t"
01757                 : "+m"(*dx4), "+m"(*dy4)
01758                 : "m"(*dxy4), "m"(*dyy4)
01759             );
01760 
01761             __asm__ volatile(
01762                 "movq   %%mm6, %%mm2 \n\t"
01763                 "movq   %%mm6, %%mm1 \n\t"
01764                 "psubw  %%mm4, %%mm2 \n\t"
01765                 "psubw  %%mm5, %%mm1 \n\t"
01766                 "movq   %%mm2, %%mm0 \n\t"
01767                 "movq   %%mm4, %%mm3 \n\t"
01768                 "pmullw %%mm1, %%mm0 \n\t" // (s-dx)*(s-dy)
01769                 "pmullw %%mm5, %%mm3 \n\t" // dx*dy
01770                 "pmullw %%mm5, %%mm2 \n\t" // (s-dx)*dy
01771                 "pmullw %%mm4, %%mm1 \n\t" // dx*(s-dy)
01772 
01773                 "movd   %4,    %%mm5 \n\t"
01774                 "movd   %3,    %%mm4 \n\t"
01775                 "punpcklbw %%mm7, %%mm5 \n\t"
01776                 "punpcklbw %%mm7, %%mm4 \n\t"
01777                 "pmullw %%mm5, %%mm3 \n\t" // src[1,1] * dx*dy
01778                 "pmullw %%mm4, %%mm2 \n\t" // src[0,1] * (s-dx)*dy
01779 
01780                 "movd   %2,    %%mm5 \n\t"
01781                 "movd   %1,    %%mm4 \n\t"
01782                 "punpcklbw %%mm7, %%mm5 \n\t"
01783                 "punpcklbw %%mm7, %%mm4 \n\t"
01784                 "pmullw %%mm5, %%mm1 \n\t" // src[1,0] * dx*(s-dy)
01785                 "pmullw %%mm4, %%mm0 \n\t" // src[0,0] * (s-dx)*(s-dy)
01786                 "paddw  %5,    %%mm1 \n\t"
01787                 "paddw  %%mm3, %%mm2 \n\t"
01788                 "paddw  %%mm1, %%mm0 \n\t"
01789                 "paddw  %%mm2, %%mm0 \n\t"
01790 
01791                 "psrlw    %6,    %%mm0 \n\t"
01792                 "packuswb %%mm0, %%mm0 \n\t"
01793                 "movd     %%mm0, %0    \n\t"
01794 
01795                 : "=m"(dst[x+y*stride])
01796                 : "m"(src[0]), "m"(src[1]),
01797                   "m"(src[stride]), "m"(src[stride+1]),
01798                   "m"(*r4), "m"(shift2)
01799             );
01800             src += stride;
01801         }
01802         src += 4-h*stride;
01803     }
01804 }
01805 
01806 #define PREFETCH(name, op) \
01807 static void name(void *mem, int stride, int h){\
01808     const uint8_t *p= mem;\
01809     do{\
01810         __asm__ volatile(#op" %0" :: "m"(*p));\
01811         p+= stride;\
01812     }while(--h);\
01813 }
01814 PREFETCH(prefetch_mmx2,  prefetcht0)
01815 PREFETCH(prefetch_3dnow, prefetch)
01816 #undef PREFETCH
01817 
01818 #include "h264dsp_mmx.c"
01819 #include "rv40dsp_mmx.c"
01820 
01821 /* CAVS specific */
01822 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01823     put_pixels8_mmx(dst, src, stride, 8);
01824 }
01825 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01826     avg_pixels8_mmx(dst, src, stride, 8);
01827 }
01828 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01829     put_pixels16_mmx(dst, src, stride, 16);
01830 }
01831 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride) {
01832     avg_pixels16_mmx(dst, src, stride, 16);
01833 }
01834 
01835 /* VC1 specific */
01836 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01837     put_pixels8_mmx(dst, src, stride, 8);
01838 }
01839 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd) {
01840     avg_pixels8_mmx2(dst, src, stride, 8);
01841 }
01842 
01843 /* XXX: those functions should be suppressed ASAP when all IDCTs are
01844    converted */
01845 #if CONFIG_GPL
01846 static void ff_libmpeg2mmx_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01847 {
01848     ff_mmx_idct (block);
01849     put_pixels_clamped_mmx(block, dest, line_size);
01850 }
01851 static void ff_libmpeg2mmx_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01852 {
01853     ff_mmx_idct (block);
01854     add_pixels_clamped_mmx(block, dest, line_size);
01855 }
01856 static void ff_libmpeg2mmx2_idct_put(uint8_t *dest, int line_size, DCTELEM *block)
01857 {
01858     ff_mmxext_idct (block);
01859     put_pixels_clamped_mmx(block, dest, line_size);
01860 }
01861 static void ff_libmpeg2mmx2_idct_add(uint8_t *dest, int line_size, DCTELEM *block)
01862 {
01863     ff_mmxext_idct (block);
01864     add_pixels_clamped_mmx(block, dest, line_size);
01865 }
01866 #endif
01867 static void ff_idct_xvid_mmx_put(uint8_t *dest, int line_size, DCTELEM *block)
01868 {
01869     ff_idct_xvid_mmx (block);
01870     put_pixels_clamped_mmx(block, dest, line_size);
01871 }
01872 static void ff_idct_xvid_mmx_add(uint8_t *dest, int line_size, DCTELEM *block)
01873 {
01874     ff_idct_xvid_mmx (block);
01875     add_pixels_clamped_mmx(block, dest, line_size);
01876 }
01877 static void ff_idct_xvid_mmx2_put(uint8_t *dest, int line_size, DCTELEM *block)
01878 {
01879     ff_idct_xvid_mmx2 (block);
01880     put_pixels_clamped_mmx(block, dest, line_size);
01881 }
01882 static void ff_idct_xvid_mmx2_add(uint8_t *dest, int line_size, DCTELEM *block)
01883 {
01884     ff_idct_xvid_mmx2 (block);
01885     add_pixels_clamped_mmx(block, dest, line_size);
01886 }
01887 
01888 static void vorbis_inverse_coupling_3dnow(float *mag, float *ang, int blocksize)
01889 {
01890     int i;
01891     __asm__ volatile("pxor %%mm7, %%mm7":);
01892     for(i=0; i<blocksize; i+=2) {
01893         __asm__ volatile(
01894             "movq    %0,    %%mm0 \n\t"
01895             "movq    %1,    %%mm1 \n\t"
01896             "movq    %%mm0, %%mm2 \n\t"
01897             "movq    %%mm1, %%mm3 \n\t"
01898             "pfcmpge %%mm7, %%mm2 \n\t" // m <= 0.0
01899             "pfcmpge %%mm7, %%mm3 \n\t" // a <= 0.0
01900             "pslld   $31,   %%mm2 \n\t" // keep only the sign bit
01901             "pxor    %%mm2, %%mm1 \n\t"
01902             "movq    %%mm3, %%mm4 \n\t"
01903             "pand    %%mm1, %%mm3 \n\t"
01904             "pandn   %%mm1, %%mm4 \n\t"
01905             "pfadd   %%mm0, %%mm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
01906             "pfsub   %%mm4, %%mm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
01907             "movq    %%mm3, %1    \n\t"
01908             "movq    %%mm0, %0    \n\t"
01909             :"+m"(mag[i]), "+m"(ang[i])
01910             ::"memory"
01911         );
01912     }
01913     __asm__ volatile("femms");
01914 }
01915 static void vorbis_inverse_coupling_sse(float *mag, float *ang, int blocksize)
01916 {
01917     int i;
01918 
01919     __asm__ volatile(
01920             "movaps  %0,     %%xmm5 \n\t"
01921         ::"m"(ff_pdw_80000000[0])
01922     );
01923     for(i=0; i<blocksize; i+=4) {
01924         __asm__ volatile(
01925             "movaps  %0,     %%xmm0 \n\t"
01926             "movaps  %1,     %%xmm1 \n\t"
01927             "xorps   %%xmm2, %%xmm2 \n\t"
01928             "xorps   %%xmm3, %%xmm3 \n\t"
01929             "cmpleps %%xmm0, %%xmm2 \n\t" // m <= 0.0
01930             "cmpleps %%xmm1, %%xmm3 \n\t" // a <= 0.0
01931             "andps   %%xmm5, %%xmm2 \n\t" // keep only the sign bit
01932             "xorps   %%xmm2, %%xmm1 \n\t"
01933             "movaps  %%xmm3, %%xmm4 \n\t"
01934             "andps   %%xmm1, %%xmm3 \n\t"
01935             "andnps  %%xmm1, %%xmm4 \n\t"
01936             "addps   %%xmm0, %%xmm3 \n\t" // a = m + ((a<0) & (a ^ sign(m)))
01937             "subps   %%xmm4, %%xmm0 \n\t" // m = m + ((a>0) & (a ^ sign(m)))
01938             "movaps  %%xmm3, %1     \n\t"
01939             "movaps  %%xmm0, %0     \n\t"
01940             :"+m"(mag[i]), "+m"(ang[i])
01941             ::"memory"
01942         );
01943     }
01944 }
01945 
01946 #define IF1(x) x
01947 #define IF0(x)
01948 
01949 #define MIX5(mono,stereo)\
01950     __asm__ volatile(\
01951         "movss          0(%2), %%xmm5 \n"\
01952         "movss          8(%2), %%xmm6 \n"\
01953         "movss         24(%2), %%xmm7 \n"\
01954         "shufps    $0, %%xmm5, %%xmm5 \n"\
01955         "shufps    $0, %%xmm6, %%xmm6 \n"\
01956         "shufps    $0, %%xmm7, %%xmm7 \n"\
01957         "1: \n"\
01958         "movaps       (%0,%1), %%xmm0 \n"\
01959         "movaps  0x400(%0,%1), %%xmm1 \n"\
01960         "movaps  0x800(%0,%1), %%xmm2 \n"\
01961         "movaps  0xc00(%0,%1), %%xmm3 \n"\
01962         "movaps 0x1000(%0,%1), %%xmm4 \n"\
01963         "mulps         %%xmm5, %%xmm0 \n"\
01964         "mulps         %%xmm6, %%xmm1 \n"\
01965         "mulps         %%xmm5, %%xmm2 \n"\
01966         "mulps         %%xmm7, %%xmm3 \n"\
01967         "mulps         %%xmm7, %%xmm4 \n"\
01968  stereo("addps         %%xmm1, %%xmm0 \n")\
01969         "addps         %%xmm1, %%xmm2 \n"\
01970         "addps         %%xmm3, %%xmm0 \n"\
01971         "addps         %%xmm4, %%xmm2 \n"\
01972    mono("addps         %%xmm2, %%xmm0 \n")\
01973         "movaps  %%xmm0,      (%0,%1) \n"\
01974  stereo("movaps  %%xmm2, 0x400(%0,%1) \n")\
01975         "add $16, %0 \n"\
01976         "jl 1b \n"\
01977         :"+&r"(i)\
01978         :"r"(samples[0]+len), "r"(matrix)\
01979         :"memory"\
01980     );
01981 
01982 #define MIX_MISC(stereo)\
01983     __asm__ volatile(\
01984         "1: \n"\
01985         "movaps  (%3,%0), %%xmm0 \n"\
01986  stereo("movaps   %%xmm0, %%xmm1 \n")\
01987         "mulps    %%xmm6, %%xmm0 \n"\
01988  stereo("mulps    %%xmm7, %%xmm1 \n")\
01989         "lea 1024(%3,%0), %1 \n"\
01990         "mov %5, %2 \n"\
01991         "2: \n"\
01992         "movaps   (%1),   %%xmm2 \n"\
01993  stereo("movaps   %%xmm2, %%xmm3 \n")\
01994         "mulps   (%4,%2), %%xmm2 \n"\
01995  stereo("mulps 16(%4,%2), %%xmm3 \n")\
01996         "addps    %%xmm2, %%xmm0 \n"\
01997  stereo("addps    %%xmm3, %%xmm1 \n")\
01998         "add $1024, %1 \n"\
01999         "add $32, %2 \n"\
02000         "jl 2b \n"\
02001         "movaps   %%xmm0,     (%3,%0) \n"\
02002  stereo("movaps   %%xmm1, 1024(%3,%0) \n")\
02003         "add $16, %0 \n"\
02004         "jl 1b \n"\
02005         :"+&r"(i), "=&r"(j), "=&r"(k)\
02006         :"r"(samples[0]+len), "r"(matrix_simd+in_ch), "g"((intptr_t)-32*(in_ch-1))\
02007         :"memory"\
02008     );
02009 
02010 static void ac3_downmix_sse(float (*samples)[256], float (*matrix)[2], int out_ch, int in_ch, int len)
02011 {
02012     int (*matrix_cmp)[2] = (int(*)[2])matrix;
02013     intptr_t i,j,k;
02014 
02015     i = -len*sizeof(float);
02016     if(in_ch == 5 && out_ch == 2 && !(matrix_cmp[0][1]|matrix_cmp[2][0]|matrix_cmp[3][1]|matrix_cmp[4][0]|(matrix_cmp[1][0]^matrix_cmp[1][1])|(matrix_cmp[0][0]^matrix_cmp[2][1]))) {
02017         MIX5(IF0,IF1);
02018     } else if(in_ch == 5 && out_ch == 1 && matrix_cmp[0][0]==matrix_cmp[2][0] && matrix_cmp[3][0]==matrix_cmp[4][0]) {
02019         MIX5(IF1,IF0);
02020     } else {
02021         DECLARE_ALIGNED(16, float, matrix_simd)[in_ch][2][4];
02022         j = 2*in_ch*sizeof(float);
02023         __asm__ volatile(
02024             "1: \n"
02025             "sub $8, %0 \n"
02026             "movss     (%2,%0), %%xmm6 \n"
02027             "movss    4(%2,%0), %%xmm7 \n"
02028             "shufps $0, %%xmm6, %%xmm6 \n"
02029             "shufps $0, %%xmm7, %%xmm7 \n"
02030             "movaps %%xmm6,   (%1,%0,4) \n"
02031             "movaps %%xmm7, 16(%1,%0,4) \n"
02032             "jg 1b \n"
02033             :"+&r"(j)
02034             :"r"(matrix_simd), "r"(matrix)
02035             :"memory"
02036         );
02037         if(out_ch == 2) {
02038             MIX_MISC(IF1);
02039         } else {
02040             MIX_MISC(IF0);
02041         }
02042     }
02043 }
02044 
02045 static void vector_fmul_3dnow(float *dst, const float *src, int len){
02046     x86_reg i = (len-4)*4;
02047     __asm__ volatile(
02048         "1: \n\t"
02049         "movq    (%1,%0), %%mm0 \n\t"
02050         "movq   8(%1,%0), %%mm1 \n\t"
02051         "pfmul   (%2,%0), %%mm0 \n\t"
02052         "pfmul  8(%2,%0), %%mm1 \n\t"
02053         "movq   %%mm0,  (%1,%0) \n\t"
02054         "movq   %%mm1, 8(%1,%0) \n\t"
02055         "sub  $16, %0 \n\t"
02056         "jge 1b \n\t"
02057         "femms  \n\t"
02058         :"+r"(i)
02059         :"r"(dst), "r"(src)
02060         :"memory"
02061     );
02062 }
02063 static void vector_fmul_sse(float *dst, const float *src, int len){
02064     x86_reg i = (len-8)*4;
02065     __asm__ volatile(
02066         "1: \n\t"
02067         "movaps    (%1,%0), %%xmm0 \n\t"
02068         "movaps  16(%1,%0), %%xmm1 \n\t"
02069         "mulps     (%2,%0), %%xmm0 \n\t"
02070         "mulps   16(%2,%0), %%xmm1 \n\t"
02071         "movaps  %%xmm0,   (%1,%0) \n\t"
02072         "movaps  %%xmm1, 16(%1,%0) \n\t"
02073         "sub  $32, %0 \n\t"
02074         "jge 1b \n\t"
02075         :"+r"(i)
02076         :"r"(dst), "r"(src)
02077         :"memory"
02078     );
02079 }
02080 
02081 static void vector_fmul_reverse_3dnow2(float *dst, const float *src0, const float *src1, int len){
02082     x86_reg i = len*4-16;
02083     __asm__ volatile(
02084         "1: \n\t"
02085         "pswapd   8(%1), %%mm0 \n\t"
02086         "pswapd    (%1), %%mm1 \n\t"
02087         "pfmul  (%3,%0), %%mm0 \n\t"
02088         "pfmul 8(%3,%0), %%mm1 \n\t"
02089         "movq  %%mm0,  (%2,%0) \n\t"
02090         "movq  %%mm1, 8(%2,%0) \n\t"
02091         "add   $16, %1 \n\t"
02092         "sub   $16, %0 \n\t"
02093         "jge   1b \n\t"
02094         :"+r"(i), "+r"(src1)
02095         :"r"(dst), "r"(src0)
02096     );
02097     __asm__ volatile("femms");
02098 }
02099 static void vector_fmul_reverse_sse(float *dst, const float *src0, const float *src1, int len){
02100     x86_reg i = len*4-32;
02101     __asm__ volatile(
02102         "1: \n\t"
02103         "movaps        16(%1), %%xmm0 \n\t"
02104         "movaps          (%1), %%xmm1 \n\t"
02105         "shufps $0x1b, %%xmm0, %%xmm0 \n\t"
02106         "shufps $0x1b, %%xmm1, %%xmm1 \n\t"
02107         "mulps        (%3,%0), %%xmm0 \n\t"
02108         "mulps      16(%3,%0), %%xmm1 \n\t"
02109         "movaps     %%xmm0,   (%2,%0) \n\t"
02110         "movaps     %%xmm1, 16(%2,%0) \n\t"
02111         "add    $32, %1 \n\t"
02112         "sub    $32, %0 \n\t"
02113         "jge    1b \n\t"
02114         :"+r"(i), "+r"(src1)
02115         :"r"(dst), "r"(src0)
02116     );
02117 }
02118 
02119 static void vector_fmul_add_3dnow(float *dst, const float *src0, const float *src1,
02120                                   const float *src2, int len){
02121     x86_reg i = (len-4)*4;
02122     __asm__ volatile(
02123         "1: \n\t"
02124         "movq    (%2,%0), %%mm0 \n\t"
02125         "movq   8(%2,%0), %%mm1 \n\t"
02126         "pfmul   (%3,%0), %%mm0 \n\t"
02127         "pfmul  8(%3,%0), %%mm1 \n\t"
02128         "pfadd   (%4,%0), %%mm0 \n\t"
02129         "pfadd  8(%4,%0), %%mm1 \n\t"
02130         "movq  %%mm0,   (%1,%0) \n\t"
02131         "movq  %%mm1,  8(%1,%0) \n\t"
02132         "sub  $16, %0 \n\t"
02133         "jge  1b \n\t"
02134         :"+r"(i)
02135         :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02136         :"memory"
02137     );
02138     __asm__ volatile("femms");
02139 }
02140 static void vector_fmul_add_sse(float *dst, const float *src0, const float *src1,
02141                                 const float *src2, int len){
02142     x86_reg i = (len-8)*4;
02143     __asm__ volatile(
02144         "1: \n\t"
02145         "movaps   (%2,%0), %%xmm0 \n\t"
02146         "movaps 16(%2,%0), %%xmm1 \n\t"
02147         "mulps    (%3,%0), %%xmm0 \n\t"
02148         "mulps  16(%3,%0), %%xmm1 \n\t"
02149         "addps    (%4,%0), %%xmm0 \n\t"
02150         "addps  16(%4,%0), %%xmm1 \n\t"
02151         "movaps %%xmm0,   (%1,%0) \n\t"
02152         "movaps %%xmm1, 16(%1,%0) \n\t"
02153         "sub  $32, %0 \n\t"
02154         "jge  1b \n\t"
02155         :"+r"(i)
02156         :"r"(dst), "r"(src0), "r"(src1), "r"(src2)
02157         :"memory"
02158     );
02159 }
02160 
02161 static void vector_fmul_window_3dnow2(float *dst, const float *src0, const float *src1,
02162                                       const float *win, float add_bias, int len){
02163 #if HAVE_6REGS
02164     if(add_bias == 0){
02165         x86_reg i = -len*4;
02166         x86_reg j = len*4-8;
02167         __asm__ volatile(
02168             "1: \n"
02169             "pswapd  (%5,%1), %%mm1 \n"
02170             "movq    (%5,%0), %%mm0 \n"
02171             "pswapd  (%4,%1), %%mm5 \n"
02172             "movq    (%3,%0), %%mm4 \n"
02173             "movq      %%mm0, %%mm2 \n"
02174             "movq      %%mm1, %%mm3 \n"
02175             "pfmul     %%mm4, %%mm2 \n" // src0[len+i]*win[len+i]
02176             "pfmul     %%mm5, %%mm3 \n" // src1[    j]*win[len+j]
02177             "pfmul     %%mm4, %%mm1 \n" // src0[len+i]*win[len+j]
02178             "pfmul     %%mm5, %%mm0 \n" // src1[    j]*win[len+i]
02179             "pfadd     %%mm3, %%mm2 \n"
02180             "pfsub     %%mm0, %%mm1 \n"
02181             "pswapd    %%mm2, %%mm2 \n"
02182             "movq      %%mm1, (%2,%0) \n"
02183             "movq      %%mm2, (%2,%1) \n"
02184             "sub $8, %1 \n"
02185             "add $8, %0 \n"
02186             "jl 1b \n"
02187             "femms \n"
02188             :"+r"(i), "+r"(j)
02189             :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02190         );
02191     }else
02192 #endif
02193         ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
02194 }
02195 
02196 static void vector_fmul_window_sse(float *dst, const float *src0, const float *src1,
02197                                    const float *win, float add_bias, int len){
02198 #if HAVE_6REGS
02199     if(add_bias == 0){
02200         x86_reg i = -len*4;
02201         x86_reg j = len*4-16;
02202         __asm__ volatile(
02203             "1: \n"
02204             "movaps       (%5,%1), %%xmm1 \n"
02205             "movaps       (%5,%0), %%xmm0 \n"
02206             "movaps       (%4,%1), %%xmm5 \n"
02207             "movaps       (%3,%0), %%xmm4 \n"
02208             "shufps $0x1b, %%xmm1, %%xmm1 \n"
02209             "shufps $0x1b, %%xmm5, %%xmm5 \n"
02210             "movaps        %%xmm0, %%xmm2 \n"
02211             "movaps        %%xmm1, %%xmm3 \n"
02212             "mulps         %%xmm4, %%xmm2 \n" // src0[len+i]*win[len+i]
02213             "mulps         %%xmm5, %%xmm3 \n" // src1[    j]*win[len+j]
02214             "mulps         %%xmm4, %%xmm1 \n" // src0[len+i]*win[len+j]
02215             "mulps         %%xmm5, %%xmm0 \n" // src1[    j]*win[len+i]
02216             "addps         %%xmm3, %%xmm2 \n"
02217             "subps         %%xmm0, %%xmm1 \n"
02218             "shufps $0x1b, %%xmm2, %%xmm2 \n"
02219             "movaps        %%xmm1, (%2,%0) \n"
02220             "movaps        %%xmm2, (%2,%1) \n"
02221             "sub $16, %1 \n"
02222             "add $16, %0 \n"
02223             "jl 1b \n"
02224             :"+r"(i), "+r"(j)
02225             :"r"(dst+len), "r"(src0+len), "r"(src1), "r"(win+len)
02226         );
02227     }else
02228 #endif
02229         ff_vector_fmul_window_c(dst, src0, src1, win, add_bias, len);
02230 }
02231 
02232 static void int32_to_float_fmul_scalar_sse(float *dst, const int *src, float mul, int len)
02233 {
02234     x86_reg i = -4*len;
02235     __asm__ volatile(
02236         "movss  %3, %%xmm4 \n"
02237         "shufps $0, %%xmm4, %%xmm4 \n"
02238         "1: \n"
02239         "cvtpi2ps   (%2,%0), %%xmm0 \n"
02240         "cvtpi2ps  8(%2,%0), %%xmm1 \n"
02241         "cvtpi2ps 16(%2,%0), %%xmm2 \n"
02242         "cvtpi2ps 24(%2,%0), %%xmm3 \n"
02243         "movlhps  %%xmm1,    %%xmm0 \n"
02244         "movlhps  %%xmm3,    %%xmm2 \n"
02245         "mulps    %%xmm4,    %%xmm0 \n"
02246         "mulps    %%xmm4,    %%xmm2 \n"
02247         "movaps   %%xmm0,   (%1,%0) \n"
02248         "movaps   %%xmm2, 16(%1,%0) \n"
02249         "add $32, %0 \n"
02250         "jl 1b \n"
02251         :"+r"(i)
02252         :"r"(dst+len), "r"(src+len), "m"(mul)
02253     );
02254 }
02255 
02256 static void int32_to_float_fmul_scalar_sse2(float *dst, const int *src, float mul, int len)
02257 {
02258     x86_reg i = -4*len;
02259     __asm__ volatile(
02260         "movss  %3, %%xmm4 \n"
02261         "shufps $0, %%xmm4, %%xmm4 \n"
02262         "1: \n"
02263         "cvtdq2ps   (%2,%0), %%xmm0 \n"
02264         "cvtdq2ps 16(%2,%0), %%xmm1 \n"
02265         "mulps    %%xmm4,    %%xmm0 \n"
02266         "mulps    %%xmm4,    %%xmm1 \n"
02267         "movaps   %%xmm0,   (%1,%0) \n"
02268         "movaps   %%xmm1, 16(%1,%0) \n"
02269         "add $32, %0 \n"
02270         "jl 1b \n"
02271         :"+r"(i)
02272         :"r"(dst+len), "r"(src+len), "m"(mul)
02273     );
02274 }
02275 
02276 static void vector_clipf_sse(float *dst, const float *src, float min, float max,
02277                              int len)
02278 {
02279     x86_reg i = (len-16)*4;
02280     __asm__ volatile(
02281         "movss  %3, %%xmm4 \n"
02282         "movss  %4, %%xmm5 \n"
02283         "shufps $0, %%xmm4, %%xmm4 \n"
02284         "shufps $0, %%xmm5, %%xmm5 \n"
02285         "1: \n\t"
02286         "movaps    (%2,%0), %%xmm0 \n\t" // 3/1 on intel
02287         "movaps  16(%2,%0), %%xmm1 \n\t"
02288         "movaps  32(%2,%0), %%xmm2 \n\t"
02289         "movaps  48(%2,%0), %%xmm3 \n\t"
02290         "maxps      %%xmm4, %%xmm0 \n\t"
02291         "maxps      %%xmm4, %%xmm1 \n\t"
02292         "maxps      %%xmm4, %%xmm2 \n\t"
02293         "maxps      %%xmm4, %%xmm3 \n\t"
02294         "minps      %%xmm5, %%xmm0 \n\t"
02295         "minps      %%xmm5, %%xmm1 \n\t"
02296         "minps      %%xmm5, %%xmm2 \n\t"
02297         "minps      %%xmm5, %%xmm3 \n\t"
02298         "movaps  %%xmm0,   (%1,%0) \n\t"
02299         "movaps  %%xmm1, 16(%1,%0) \n\t"
02300         "movaps  %%xmm2, 32(%1,%0) \n\t"
02301         "movaps  %%xmm3, 48(%1,%0) \n\t"
02302         "sub  $64, %0 \n\t"
02303         "jge 1b \n\t"
02304         :"+&r"(i)
02305         :"r"(dst), "r"(src), "m"(min), "m"(max)
02306         :"memory"
02307     );
02308 }
02309 
02310 static void float_to_int16_3dnow(int16_t *dst, const float *src, long len){
02311     x86_reg reglen = len;
02312     // not bit-exact: pf2id uses different rounding than C and SSE
02313     __asm__ volatile(
02314         "add        %0          , %0        \n\t"
02315         "lea         (%2,%0,2)  , %2        \n\t"
02316         "add        %0          , %1        \n\t"
02317         "neg        %0                      \n\t"
02318         "1:                                 \n\t"
02319         "pf2id       (%2,%0,2)  , %%mm0     \n\t"
02320         "pf2id      8(%2,%0,2)  , %%mm1     \n\t"
02321         "pf2id     16(%2,%0,2)  , %%mm2     \n\t"
02322         "pf2id     24(%2,%0,2)  , %%mm3     \n\t"
02323         "packssdw   %%mm1       , %%mm0     \n\t"
02324         "packssdw   %%mm3       , %%mm2     \n\t"
02325         "movq       %%mm0       ,  (%1,%0)  \n\t"
02326         "movq       %%mm2       , 8(%1,%0)  \n\t"
02327         "add        $16         , %0        \n\t"
02328         " js 1b                             \n\t"
02329         "femms                              \n\t"
02330         :"+r"(reglen), "+r"(dst), "+r"(src)
02331     );
02332 }
02333 static void float_to_int16_sse(int16_t *dst, const float *src, long len){
02334     x86_reg reglen = len;
02335     __asm__ volatile(
02336         "add        %0          , %0        \n\t"
02337         "lea         (%2,%0,2)  , %2        \n\t"
02338         "add        %0          , %1        \n\t"
02339         "neg        %0                      \n\t"
02340         "1:                                 \n\t"
02341         "cvtps2pi    (%2,%0,2)  , %%mm0     \n\t"
02342         "cvtps2pi   8(%2,%0,2)  , %%mm1     \n\t"
02343         "cvtps2pi  16(%2,%0,2)  , %%mm2     \n\t"
02344         "cvtps2pi  24(%2,%0,2)  , %%mm3     \n\t"
02345         "packssdw   %%mm1       , %%mm0     \n\t"
02346         "packssdw   %%mm3       , %%mm2     \n\t"
02347         "movq       %%mm0       ,  (%1,%0)  \n\t"
02348         "movq       %%mm2       , 8(%1,%0)  \n\t"
02349         "add        $16         , %0        \n\t"
02350         " js 1b                             \n\t"
02351         "emms                               \n\t"
02352         :"+r"(reglen), "+r"(dst), "+r"(src)
02353     );
02354 }
02355 
02356 static void float_to_int16_sse2(int16_t *dst, const float *src, long len){
02357     x86_reg reglen = len;
02358     __asm__ volatile(
02359         "add        %0          , %0        \n\t"
02360         "lea         (%2,%0,2)  , %2        \n\t"
02361         "add        %0          , %1        \n\t"
02362         "neg        %0                      \n\t"
02363         "1:                                 \n\t"
02364         "cvtps2dq    (%2,%0,2)  , %%xmm0    \n\t"
02365         "cvtps2dq  16(%2,%0,2)  , %%xmm1    \n\t"
02366         "packssdw   %%xmm1      , %%xmm0    \n\t"
02367         "movdqa     %%xmm0      ,  (%1,%0)  \n\t"
02368         "add        $16         , %0        \n\t"
02369         " js 1b                             \n\t"
02370         :"+r"(reglen), "+r"(dst), "+r"(src)
02371     );
02372 }
02373 
02374 void ff_float_to_int16_interleave6_sse(int16_t *dst, const float **src, int len);
02375 void ff_float_to_int16_interleave6_3dnow(int16_t *dst, const float **src, int len);
02376 void ff_float_to_int16_interleave6_3dn2(int16_t *dst, const float **src, int len);
02377 int32_t ff_scalarproduct_int16_mmx2(int16_t *v1, int16_t *v2, int order, int shift);
02378 int32_t ff_scalarproduct_int16_sse2(int16_t *v1, int16_t *v2, int order, int shift);
02379 int32_t ff_scalarproduct_and_madd_int16_mmx2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
02380 int32_t ff_scalarproduct_and_madd_int16_sse2(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
02381 int32_t ff_scalarproduct_and_madd_int16_ssse3(int16_t *v1, int16_t *v2, int16_t *v3, int order, int mul);
02382 void ff_add_hfyu_median_prediction_mmx2(uint8_t *dst, const uint8_t *top, const uint8_t *diff, int w, int *left, int *left_top);
02383 int  ff_add_hfyu_left_prediction_ssse3(uint8_t *dst, const uint8_t *src, int w, int left);
02384 int  ff_add_hfyu_left_prediction_sse4(uint8_t *dst, const uint8_t *src, int w, int left);
02385 void ff_x264_deblock_v_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
02386 void ff_x264_deblock_h_luma_sse2(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
02387 void ff_x264_deblock_h_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
02388 void ff_x264_deblock_v_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
02389 void ff_x264_deblock_h_luma_intra_sse2(uint8_t *pix, int stride, int alpha, int beta);
02390 
02391 #if HAVE_YASM && ARCH_X86_32
02392 void ff_x264_deblock_v8_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta);
02393 static void ff_x264_deblock_v_luma_intra_mmxext(uint8_t *pix, int stride, int alpha, int beta)
02394 {
02395     ff_x264_deblock_v8_luma_intra_mmxext(pix+0, stride, alpha, beta);
02396     ff_x264_deblock_v8_luma_intra_mmxext(pix+8, stride, alpha, beta);
02397 }
02398 #elif !HAVE_YASM
02399 #define ff_float_to_int16_interleave6_sse(a,b,c)   float_to_int16_interleave_misc_sse(a,b,c,6)
02400 #define ff_float_to_int16_interleave6_3dnow(a,b,c) float_to_int16_interleave_misc_3dnow(a,b,c,6)
02401 #define ff_float_to_int16_interleave6_3dn2(a,b,c)  float_to_int16_interleave_misc_3dnow(a,b,c,6)
02402 #endif
02403 #define ff_float_to_int16_interleave6_sse2 ff_float_to_int16_interleave6_sse
02404 
02405 #define FLOAT_TO_INT16_INTERLEAVE(cpu, body) \
02406 /* gcc pessimizes register allocation if this is in the same function as float_to_int16_interleave_sse2*/\
02407 static av_noinline void float_to_int16_interleave_misc_##cpu(int16_t *dst, const float **src, long len, int channels){\
02408     DECLARE_ALIGNED(16, int16_t, tmp)[len];\
02409     int i,j,c;\
02410     for(c=0; c<channels; c++){\
02411         float_to_int16_##cpu(tmp, src[c], len);\
02412         for(i=0, j=c; i<len; i++, j+=channels)\
02413             dst[j] = tmp[i];\
02414     }\
02415 }\
02416 \
02417 static void float_to_int16_interleave_##cpu(int16_t *dst, const float **src, long len, int channels){\
02418     if(channels==1)\
02419         float_to_int16_##cpu(dst, src[0], len);\
02420     else if(channels==2){\
02421         x86_reg reglen = len; \
02422         const float *src0 = src[0];\
02423         const float *src1 = src[1];\
02424         __asm__ volatile(\
02425             "shl $2, %0 \n"\
02426             "add %0, %1 \n"\
02427             "add %0, %2 \n"\
02428             "add %0, %3 \n"\
02429             "neg %0 \n"\
02430             body\
02431             :"+r"(reglen), "+r"(dst), "+r"(src0), "+r"(src1)\
02432         );\
02433     }else if(channels==6){\
02434         ff_float_to_int16_interleave6_##cpu(dst, src, len);\
02435     }else\
02436         float_to_int16_interleave_misc_##cpu(dst, src, len, channels);\
02437 }
02438 
02439 FLOAT_TO_INT16_INTERLEAVE(3dnow,
02440     "1:                         \n"
02441     "pf2id     (%2,%0), %%mm0   \n"
02442     "pf2id    8(%2,%0), %%mm1   \n"
02443     "pf2id     (%3,%0), %%mm2   \n"
02444     "pf2id    8(%3,%0), %%mm3   \n"
02445     "packssdw    %%mm1, %%mm0   \n"
02446     "packssdw    %%mm3, %%mm2   \n"
02447     "movq        %%mm0, %%mm1   \n"
02448     "punpcklwd   %%mm2, %%mm0   \n"
02449     "punpckhwd   %%mm2, %%mm1   \n"
02450     "movq        %%mm0,  (%1,%0)\n"
02451     "movq        %%mm1, 8(%1,%0)\n"
02452     "add $16, %0                \n"
02453     "js 1b                      \n"
02454     "femms                      \n"
02455 )
02456 
02457 FLOAT_TO_INT16_INTERLEAVE(sse,
02458     "1:                         \n"
02459     "cvtps2pi  (%2,%0), %%mm0   \n"
02460     "cvtps2pi 8(%2,%0), %%mm1   \n"
02461     "cvtps2pi  (%3,%0), %%mm2   \n"
02462     "cvtps2pi 8(%3,%0), %%mm3   \n"
02463     "packssdw    %%mm1, %%mm0   \n"
02464     "packssdw    %%mm3, %%mm2   \n"
02465     "movq        %%mm0, %%mm1   \n"
02466     "punpcklwd   %%mm2, %%mm0   \n"
02467     "punpckhwd   %%mm2, %%mm1   \n"
02468     "movq        %%mm0,  (%1,%0)\n"
02469     "movq        %%mm1, 8(%1,%0)\n"
02470     "add $16, %0                \n"
02471     "js 1b                      \n"
02472     "emms                       \n"
02473 )
02474 
02475 FLOAT_TO_INT16_INTERLEAVE(sse2,
02476     "1:                         \n"
02477     "cvtps2dq  (%2,%0), %%xmm0  \n"
02478     "cvtps2dq  (%3,%0), %%xmm1  \n"
02479     "packssdw   %%xmm1, %%xmm0  \n"
02480     "movhlps    %%xmm0, %%xmm1  \n"
02481     "punpcklwd  %%xmm1, %%xmm0  \n"
02482     "movdqa     %%xmm0, (%1,%0) \n"
02483     "add $16, %0                \n"
02484     "js 1b                      \n"
02485 )
02486 
02487 static void float_to_int16_interleave_3dn2(int16_t *dst, const float **src, long len, int channels){
02488     if(channels==6)
02489         ff_float_to_int16_interleave6_3dn2(dst, src, len);
02490     else
02491         float_to_int16_interleave_3dnow(dst, src, len, channels);
02492 }
02493 
02494 float ff_scalarproduct_float_sse(const float *v1, const float *v2, int order);
02495 
02496 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx)
02497 {
02498     mm_flags = mm_support();
02499 
02500     if (avctx->dsp_mask) {
02501         if (avctx->dsp_mask & FF_MM_FORCE)
02502             mm_flags |= (avctx->dsp_mask & 0xffff);
02503         else
02504             mm_flags &= ~(avctx->dsp_mask & 0xffff);
02505     }
02506 
02507 #if 0
02508     av_log(avctx, AV_LOG_INFO, "libavcodec: CPU flags:");
02509     if (mm_flags & FF_MM_MMX)
02510         av_log(avctx, AV_LOG_INFO, " mmx");
02511     if (mm_flags & FF_MM_MMX2)
02512         av_log(avctx, AV_LOG_INFO, " mmx2");
02513     if (mm_flags & FF_MM_3DNOW)
02514         av_log(avctx, AV_LOG_INFO, " 3dnow");
02515     if (mm_flags & FF_MM_SSE)
02516         av_log(avctx, AV_LOG_INFO, " sse");
02517     if (mm_flags & FF_MM_SSE2)
02518         av_log(avctx, AV_LOG_INFO, " sse2");
02519     av_log(avctx, AV_LOG_INFO, "\n");
02520 #endif
02521 
02522     if (mm_flags & FF_MM_MMX) {
02523         const int idct_algo= avctx->idct_algo;
02524 
02525         if(avctx->lowres==0){
02526             if(idct_algo==FF_IDCT_AUTO || idct_algo==FF_IDCT_SIMPLEMMX){
02527                 c->idct_put= ff_simple_idct_put_mmx;
02528                 c->idct_add= ff_simple_idct_add_mmx;
02529                 c->idct    = ff_simple_idct_mmx;
02530                 c->idct_permutation_type= FF_SIMPLE_IDCT_PERM;
02531 #if CONFIG_GPL
02532             }else if(idct_algo==FF_IDCT_LIBMPEG2MMX){
02533                 if(mm_flags & FF_MM_MMX2){
02534                     c->idct_put= ff_libmpeg2mmx2_idct_put;
02535                     c->idct_add= ff_libmpeg2mmx2_idct_add;
02536                     c->idct    = ff_mmxext_idct;
02537                 }else{
02538                     c->idct_put= ff_libmpeg2mmx_idct_put;
02539                     c->idct_add= ff_libmpeg2mmx_idct_add;
02540                     c->idct    = ff_mmx_idct;
02541                 }
02542                 c->idct_permutation_type= FF_LIBMPEG2_IDCT_PERM;
02543 #endif
02544             }else if((CONFIG_VP3_DECODER || CONFIG_VP5_DECODER || CONFIG_VP6_DECODER) &&
02545                      idct_algo==FF_IDCT_VP3){
02546                 if(mm_flags & FF_MM_SSE2){
02547                     c->idct_put= ff_vp3_idct_put_sse2;
02548                     c->idct_add= ff_vp3_idct_add_sse2;
02549                     c->idct    = ff_vp3_idct_sse2;
02550                     c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02551                 }else{
02552                     c->idct_put= ff_vp3_idct_put_mmx;
02553                     c->idct_add= ff_vp3_idct_add_mmx;
02554                     c->idct    = ff_vp3_idct_mmx;
02555                     c->idct_permutation_type= FF_PARTTRANS_IDCT_PERM;
02556                 }
02557             }else if(idct_algo==FF_IDCT_CAVS){
02558                     c->idct_permutation_type= FF_TRANSPOSE_IDCT_PERM;
02559             }else if(idct_algo==FF_IDCT_XVIDMMX){
02560                 if(mm_flags & FF_MM_SSE2){
02561                     c->idct_put= ff_idct_xvid_sse2_put;
02562                     c->idct_add= ff_idct_xvid_sse2_add;
02563                     c->idct    = ff_idct_xvid_sse2;
02564                     c->idct_permutation_type= FF_SSE2_IDCT_PERM;
02565                 }else if(mm_flags & FF_MM_MMX2){
02566                     c->idct_put= ff_idct_xvid_mmx2_put;
02567                     c->idct_add= ff_idct_xvid_mmx2_add;
02568                     c->idct    = ff_idct_xvid_mmx2;
02569                 }else{
02570                     c->idct_put= ff_idct_xvid_mmx_put;
02571                     c->idct_add= ff_idct_xvid_mmx_add;
02572                     c->idct    = ff_idct_xvid_mmx;
02573                 }
02574             }
02575         }
02576 
02577         c->put_pixels_clamped = put_pixels_clamped_mmx;
02578         c->put_signed_pixels_clamped = put_signed_pixels_clamped_mmx;
02579         c->add_pixels_clamped = add_pixels_clamped_mmx;
02580         c->clear_block  = clear_block_mmx;
02581         c->clear_blocks = clear_blocks_mmx;
02582         if ((mm_flags & FF_MM_SSE) &&
02583             !(CONFIG_MPEG_XVMC_DECODER && avctx->xvmc_acceleration > 1)){
02584             /* XvMCCreateBlocks() may not allocate 16-byte aligned blocks */
02585             c->clear_block  = clear_block_sse;
02586             c->clear_blocks = clear_blocks_sse;
02587         }
02588 
02589 #define SET_HPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02590         c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## SIZE ## _ ## CPU; \
02591         c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## SIZE ## _x2_ ## CPU; \
02592         c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## SIZE ## _y2_ ## CPU; \
02593         c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## SIZE ## _xy2_ ## CPU
02594 
02595         SET_HPEL_FUNCS(put, 0, 16, mmx);
02596         SET_HPEL_FUNCS(put_no_rnd, 0, 16, mmx);
02597         SET_HPEL_FUNCS(avg, 0, 16, mmx);
02598         SET_HPEL_FUNCS(avg_no_rnd, 0, 16, mmx);
02599         SET_HPEL_FUNCS(put, 1, 8, mmx);
02600         SET_HPEL_FUNCS(put_no_rnd, 1, 8, mmx);
02601         SET_HPEL_FUNCS(avg, 1, 8, mmx);
02602         SET_HPEL_FUNCS(avg_no_rnd, 1, 8, mmx);
02603 
02604         c->gmc= gmc_mmx;
02605 
02606         c->add_bytes= add_bytes_mmx;
02607         c->add_bytes_l2= add_bytes_l2_mmx;
02608 
02609         c->draw_edges = draw_edges_mmx;
02610 
02611         if (CONFIG_H263_DECODER || CONFIG_H263_ENCODER) {
02612             c->h263_v_loop_filter= h263_v_loop_filter_mmx;
02613             c->h263_h_loop_filter= h263_h_loop_filter_mmx;
02614         }
02615         c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_mmx_rnd;
02616         c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_mmx;
02617         c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_mmx_nornd;
02618 
02619         c->put_rv40_chroma_pixels_tab[0]= put_rv40_chroma_mc8_mmx;
02620         c->put_rv40_chroma_pixels_tab[1]= put_rv40_chroma_mc4_mmx;
02621 
02622         if (CONFIG_VP6_DECODER) {
02623             c->vp6_filter_diag4 = ff_vp6_filter_diag4_mmx;
02624         }
02625 
02626         if (mm_flags & FF_MM_MMX2) {
02627             c->prefetch = prefetch_mmx2;
02628 
02629             c->put_pixels_tab[0][1] = put_pixels16_x2_mmx2;
02630             c->put_pixels_tab[0][2] = put_pixels16_y2_mmx2;
02631 
02632             c->avg_pixels_tab[0][0] = avg_pixels16_mmx2;
02633             c->avg_pixels_tab[0][1] = avg_pixels16_x2_mmx2;
02634             c->avg_pixels_tab[0][2] = avg_pixels16_y2_mmx2;
02635 
02636             c->put_pixels_tab[1][1] = put_pixels8_x2_mmx2;
02637             c->put_pixels_tab[1][2] = put_pixels8_y2_mmx2;
02638 
02639             c->avg_pixels_tab[1][0] = avg_pixels8_mmx2;
02640             c->avg_pixels_tab[1][1] = avg_pixels8_x2_mmx2;
02641             c->avg_pixels_tab[1][2] = avg_pixels8_y2_mmx2;
02642 
02643             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02644                 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_mmx2;
02645                 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_mmx2;
02646                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_mmx2;
02647                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_mmx2;
02648                 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_mmx2;
02649                 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_mmx2;
02650 
02651                 if (CONFIG_VP3_DECODER) {
02652                     c->vp3_v_loop_filter= ff_vp3_v_loop_filter_mmx2;
02653                     c->vp3_h_loop_filter= ff_vp3_h_loop_filter_mmx2;
02654                 }
02655             }
02656             if (CONFIG_VP3_DECODER) {
02657                 c->vp3_idct_dc_add = ff_vp3_idct_dc_add_mmx2;
02658             }
02659 
02660 #define SET_QPEL_FUNCS(PFX, IDX, SIZE, CPU) \
02661             c->PFX ## _pixels_tab[IDX][ 0] = PFX ## SIZE ## _mc00_ ## CPU; \
02662             c->PFX ## _pixels_tab[IDX][ 1] = PFX ## SIZE ## _mc10_ ## CPU; \
02663             c->PFX ## _pixels_tab[IDX][ 2] = PFX ## SIZE ## _mc20_ ## CPU; \
02664             c->PFX ## _pixels_tab[IDX][ 3] = PFX ## SIZE ## _mc30_ ## CPU; \
02665             c->PFX ## _pixels_tab[IDX][ 4] = PFX ## SIZE ## _mc01_ ## CPU; \
02666             c->PFX ## _pixels_tab[IDX][ 5] = PFX ## SIZE ## _mc11_ ## CPU; \
02667             c->PFX ## _pixels_tab[IDX][ 6] = PFX ## SIZE ## _mc21_ ## CPU; \
02668             c->PFX ## _pixels_tab[IDX][ 7] = PFX ## SIZE ## _mc31_ ## CPU; \
02669             c->PFX ## _pixels_tab[IDX][ 8] = PFX ## SIZE ## _mc02_ ## CPU; \
02670             c->PFX ## _pixels_tab[IDX][ 9] = PFX ## SIZE ## _mc12_ ## CPU; \
02671             c->PFX ## _pixels_tab[IDX][10] = PFX ## SIZE ## _mc22_ ## CPU; \
02672             c->PFX ## _pixels_tab[IDX][11] = PFX ## SIZE ## _mc32_ ## CPU; \
02673             c->PFX ## _pixels_tab[IDX][12] = PFX ## SIZE ## _mc03_ ## CPU; \
02674             c->PFX ## _pixels_tab[IDX][13] = PFX ## SIZE ## _mc13_ ## CPU; \
02675             c->PFX ## _pixels_tab[IDX][14] = PFX ## SIZE ## _mc23_ ## CPU; \
02676             c->PFX ## _pixels_tab[IDX][15] = PFX ## SIZE ## _mc33_ ## CPU
02677 
02678             SET_QPEL_FUNCS(put_qpel, 0, 16, mmx2);
02679             SET_QPEL_FUNCS(put_qpel, 1, 8, mmx2);
02680             SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, mmx2);
02681             SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, mmx2);
02682             SET_QPEL_FUNCS(avg_qpel, 0, 16, mmx2);
02683             SET_QPEL_FUNCS(avg_qpel, 1, 8, mmx2);
02684 
02685             SET_QPEL_FUNCS(put_h264_qpel, 0, 16, mmx2);
02686             SET_QPEL_FUNCS(put_h264_qpel, 1, 8, mmx2);
02687             SET_QPEL_FUNCS(put_h264_qpel, 2, 4, mmx2);
02688             SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, mmx2);
02689             SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, mmx2);
02690             SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, mmx2);
02691 
02692             SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, mmx2);
02693             SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, mmx2);
02694             SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, mmx2);
02695             SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, mmx2);
02696 
02697             c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_mmx2;
02698             c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_mmx2;
02699 
02700             c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_mmx2_nornd;
02701 
02702             c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_mmx2_rnd;
02703             c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_mmx2;
02704             c->avg_h264_chroma_pixels_tab[2]= avg_h264_chroma_mc2_mmx2;
02705             c->put_h264_chroma_pixels_tab[2]= put_h264_chroma_mc2_mmx2;
02706 
02707 #if HAVE_YASM
02708             c->add_hfyu_median_prediction = ff_add_hfyu_median_prediction_mmx2;
02709 #endif
02710 #if HAVE_7REGS && HAVE_TEN_OPERANDS
02711             if( mm_flags&FF_MM_3DNOW )
02712                 c->add_hfyu_median_prediction = add_hfyu_median_prediction_cmov;
02713 #endif
02714 
02715             if (CONFIG_CAVS_DECODER)
02716                 ff_cavsdsp_init_mmx2(c, avctx);
02717 
02718             if (CONFIG_VC1_DECODER)
02719                 ff_vc1dsp_init_mmx(c, avctx);
02720 
02721             c->add_png_paeth_prediction= add_png_paeth_prediction_mmx2;
02722         } else if (mm_flags & FF_MM_3DNOW) {
02723             c->prefetch = prefetch_3dnow;
02724 
02725             c->put_pixels_tab[0][1] = put_pixels16_x2_3dnow;
02726             c->put_pixels_tab[0][2] = put_pixels16_y2_3dnow;
02727 
02728             c->avg_pixels_tab[0][0] = avg_pixels16_3dnow;
02729             c->avg_pixels_tab[0][1] = avg_pixels16_x2_3dnow;
02730             c->avg_pixels_tab[0][2] = avg_pixels16_y2_3dnow;
02731 
02732             c->put_pixels_tab[1][1] = put_pixels8_x2_3dnow;
02733             c->put_pixels_tab[1][2] = put_pixels8_y2_3dnow;
02734 
02735             c->avg_pixels_tab[1][0] = avg_pixels8_3dnow;
02736             c->avg_pixels_tab[1][1] = avg_pixels8_x2_3dnow;
02737             c->avg_pixels_tab[1][2] = avg_pixels8_y2_3dnow;
02738 
02739             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02740                 c->put_no_rnd_pixels_tab[0][1] = put_no_rnd_pixels16_x2_3dnow;
02741                 c->put_no_rnd_pixels_tab[0][2] = put_no_rnd_pixels16_y2_3dnow;
02742                 c->put_no_rnd_pixels_tab[1][1] = put_no_rnd_pixels8_x2_3dnow;
02743                 c->put_no_rnd_pixels_tab[1][2] = put_no_rnd_pixels8_y2_3dnow;
02744                 c->avg_pixels_tab[0][3] = avg_pixels16_xy2_3dnow;
02745                 c->avg_pixels_tab[1][3] = avg_pixels8_xy2_3dnow;
02746             }
02747 
02748             SET_QPEL_FUNCS(put_qpel, 0, 16, 3dnow);
02749             SET_QPEL_FUNCS(put_qpel, 1, 8, 3dnow);
02750             SET_QPEL_FUNCS(put_no_rnd_qpel, 0, 16, 3dnow);
02751             SET_QPEL_FUNCS(put_no_rnd_qpel, 1, 8, 3dnow);
02752             SET_QPEL_FUNCS(avg_qpel, 0, 16, 3dnow);
02753             SET_QPEL_FUNCS(avg_qpel, 1, 8, 3dnow);
02754 
02755             SET_QPEL_FUNCS(put_h264_qpel, 0, 16, 3dnow);
02756             SET_QPEL_FUNCS(put_h264_qpel, 1, 8, 3dnow);
02757             SET_QPEL_FUNCS(put_h264_qpel, 2, 4, 3dnow);
02758             SET_QPEL_FUNCS(avg_h264_qpel, 0, 16, 3dnow);
02759             SET_QPEL_FUNCS(avg_h264_qpel, 1, 8, 3dnow);
02760             SET_QPEL_FUNCS(avg_h264_qpel, 2, 4, 3dnow);
02761 
02762             SET_QPEL_FUNCS(put_2tap_qpel, 0, 16, 3dnow);
02763             SET_QPEL_FUNCS(put_2tap_qpel, 1, 8, 3dnow);
02764             SET_QPEL_FUNCS(avg_2tap_qpel, 0, 16, 3dnow);
02765             SET_QPEL_FUNCS(avg_2tap_qpel, 1, 8, 3dnow);
02766 
02767             c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_3dnow_rnd;
02768             c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_3dnow;
02769 
02770             c->avg_rv40_chroma_pixels_tab[0]= avg_rv40_chroma_mc8_3dnow;
02771             c->avg_rv40_chroma_pixels_tab[1]= avg_rv40_chroma_mc4_3dnow;
02772 
02773             if (CONFIG_CAVS_DECODER)
02774                 ff_cavsdsp_init_3dnow(c, avctx);
02775         }
02776 
02777 
02778 #define H264_QPEL_FUNCS(x, y, CPU)\
02779             c->put_h264_qpel_pixels_tab[0][x+y*4] = put_h264_qpel16_mc##x##y##_##CPU;\
02780             c->put_h264_qpel_pixels_tab[1][x+y*4] = put_h264_qpel8_mc##x##y##_##CPU;\
02781             c->avg_h264_qpel_pixels_tab[0][x+y*4] = avg_h264_qpel16_mc##x##y##_##CPU;\
02782             c->avg_h264_qpel_pixels_tab[1][x+y*4] = avg_h264_qpel8_mc##x##y##_##CPU;
02783         if((mm_flags & FF_MM_SSE2) && !(mm_flags & FF_MM_3DNOW)){
02784             // these functions are slower than mmx on AMD, but faster on Intel
02785             c->put_pixels_tab[0][0] = put_pixels16_sse2;
02786             c->avg_pixels_tab[0][0] = avg_pixels16_sse2;
02787             H264_QPEL_FUNCS(0, 0, sse2);
02788         }
02789         if(mm_flags & FF_MM_SSE2){
02790             H264_QPEL_FUNCS(0, 1, sse2);
02791             H264_QPEL_FUNCS(0, 2, sse2);
02792             H264_QPEL_FUNCS(0, 3, sse2);
02793             H264_QPEL_FUNCS(1, 1, sse2);
02794             H264_QPEL_FUNCS(1, 2, sse2);
02795             H264_QPEL_FUNCS(1, 3, sse2);
02796             H264_QPEL_FUNCS(2, 1, sse2);
02797             H264_QPEL_FUNCS(2, 2, sse2);
02798             H264_QPEL_FUNCS(2, 3, sse2);
02799             H264_QPEL_FUNCS(3, 1, sse2);
02800             H264_QPEL_FUNCS(3, 2, sse2);
02801             H264_QPEL_FUNCS(3, 3, sse2);
02802 
02803             if (CONFIG_VP6_DECODER) {
02804                 c->vp6_filter_diag4 = ff_vp6_filter_diag4_sse2;
02805             }
02806         }
02807 #if HAVE_SSSE3
02808         if(mm_flags & FF_MM_SSSE3){
02809             H264_QPEL_FUNCS(1, 0, ssse3);
02810             H264_QPEL_FUNCS(1, 1, ssse3);
02811             H264_QPEL_FUNCS(1, 2, ssse3);
02812             H264_QPEL_FUNCS(1, 3, ssse3);
02813             H264_QPEL_FUNCS(2, 0, ssse3);
02814             H264_QPEL_FUNCS(2, 1, ssse3);
02815             H264_QPEL_FUNCS(2, 2, ssse3);
02816             H264_QPEL_FUNCS(2, 3, ssse3);
02817             H264_QPEL_FUNCS(3, 0, ssse3);
02818             H264_QPEL_FUNCS(3, 1, ssse3);
02819             H264_QPEL_FUNCS(3, 2, ssse3);
02820             H264_QPEL_FUNCS(3, 3, ssse3);
02821             c->put_no_rnd_vc1_chroma_pixels_tab[0]= put_vc1_chroma_mc8_ssse3_nornd;
02822             c->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_vc1_chroma_mc8_ssse3_nornd;
02823             c->put_h264_chroma_pixels_tab[0]= put_h264_chroma_mc8_ssse3_rnd;
02824             c->avg_h264_chroma_pixels_tab[0]= avg_h264_chroma_mc8_ssse3_rnd;
02825             c->put_h264_chroma_pixels_tab[1]= put_h264_chroma_mc4_ssse3;
02826             c->avg_h264_chroma_pixels_tab[1]= avg_h264_chroma_mc4_ssse3;
02827             c->add_png_paeth_prediction= add_png_paeth_prediction_ssse3;
02828 #if HAVE_YASM
02829             c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_ssse3;
02830             if (mm_flags & FF_MM_SSE4) // not really sse4, just slow on Conroe
02831                 c->add_hfyu_left_prediction = ff_add_hfyu_left_prediction_sse4;
02832 #endif
02833         }
02834 #endif
02835 
02836         if(mm_flags & FF_MM_3DNOW){
02837             c->vorbis_inverse_coupling = vorbis_inverse_coupling_3dnow;
02838             c->vector_fmul = vector_fmul_3dnow;
02839             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02840                 c->float_to_int16 = float_to_int16_3dnow;
02841                 c->float_to_int16_interleave = float_to_int16_interleave_3dnow;
02842             }
02843         }
02844         if(mm_flags & FF_MM_3DNOWEXT){
02845             c->vector_fmul_reverse = vector_fmul_reverse_3dnow2;
02846             c->vector_fmul_window = vector_fmul_window_3dnow2;
02847             if(!(avctx->flags & CODEC_FLAG_BITEXACT)){
02848                 c->float_to_int16_interleave = float_to_int16_interleave_3dn2;
02849             }
02850         }
02851         if(mm_flags & FF_MM_MMX2){
02852 #if HAVE_YASM
02853             c->scalarproduct_int16 = ff_scalarproduct_int16_mmx2;
02854             c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_mmx2;
02855 #endif
02856         }
02857         if(mm_flags & FF_MM_SSE){
02858             c->vorbis_inverse_coupling = vorbis_inverse_coupling_sse;
02859             c->ac3_downmix = ac3_downmix_sse;
02860             c->vector_fmul = vector_fmul_sse;
02861             c->vector_fmul_reverse = vector_fmul_reverse_sse;
02862             c->vector_fmul_add = vector_fmul_add_sse;
02863             c->vector_fmul_window = vector_fmul_window_sse;
02864             c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse;
02865             c->vector_clipf = vector_clipf_sse;
02866             c->float_to_int16 = float_to_int16_sse;
02867             c->float_to_int16_interleave = float_to_int16_interleave_sse;
02868 #if HAVE_YASM
02869             c->scalarproduct_float = ff_scalarproduct_float_sse;
02870 #endif
02871         }
02872         if(mm_flags & FF_MM_3DNOW)
02873             c->vector_fmul_add = vector_fmul_add_3dnow; // faster than sse
02874         if(mm_flags & FF_MM_SSE2){
02875             c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_sse2;
02876             c->float_to_int16 = float_to_int16_sse2;
02877             c->float_to_int16_interleave = float_to_int16_interleave_sse2;
02878 #if HAVE_YASM
02879             c->scalarproduct_int16 = ff_scalarproduct_int16_sse2;
02880             c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_sse2;
02881 #endif
02882         }
02883         if((mm_flags & FF_MM_SSSE3) && !(mm_flags & (FF_MM_SSE42|FF_MM_3DNOW)) && HAVE_YASM) // cachesplit
02884             c->scalarproduct_and_madd_int16 = ff_scalarproduct_and_madd_int16_ssse3;
02885     }
02886 
02887     if (CONFIG_ENCODERS)
02888         dsputilenc_init_mmx(c, avctx);
02889 
02890 #if 0
02891     // for speed testing
02892     get_pixels = just_return;
02893     put_pixels_clamped = just_return;
02894     add_pixels_clamped = just_return;
02895 
02896     pix_abs16x16 = just_return;
02897     pix_abs16x16_x2 = just_return;
02898     pix_abs16x16_y2 = just_return;
02899     pix_abs16x16_xy2 = just_return;
02900 
02901     put_pixels_tab[0] = just_return;
02902     put_pixels_tab[1] = just_return;
02903     put_pixels_tab[2] = just_return;
02904     put_pixels_tab[3] = just_return;
02905 
02906     put_no_rnd_pixels_tab[0] = just_return;
02907     put_no_rnd_pixels_tab[1] = just_return;
02908     put_no_rnd_pixels_tab[2] = just_return;
02909     put_no_rnd_pixels_tab[3] = just_return;
02910 
02911     avg_pixels_tab[0] = just_return;
02912     avg_pixels_tab[1] = just_return;
02913     avg_pixels_tab[2] = just_return;
02914     avg_pixels_tab[3] = just_return;
02915 
02916     avg_no_rnd_pixels_tab[0] = just_return;
02917     avg_no_rnd_pixels_tab[1] = just_return;
02918     avg_no_rnd_pixels_tab[2] = just_return;
02919     avg_no_rnd_pixels_tab[3] = just_return;
02920 
02921     //av_fdct = just_return;
02922     //ff_idct = just_return;
02923 #endif
02924 }
02925 
02926 #if CONFIG_H264DSP
02927 void ff_h264dsp_init_x86(H264DSPContext *c)
02928 {
02929     mm_flags = mm_support();
02930 
02931     if (mm_flags & FF_MM_MMX) {
02932         c->h264_idct_dc_add=
02933         c->h264_idct_add= ff_h264_idct_add_mmx;
02934         c->h264_idct8_dc_add=
02935         c->h264_idct8_add= ff_h264_idct8_add_mmx;
02936 
02937         c->h264_idct_add16     = ff_h264_idct_add16_mmx;
02938         c->h264_idct8_add4     = ff_h264_idct8_add4_mmx;
02939         c->h264_idct_add8      = ff_h264_idct_add8_mmx;
02940         c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx;
02941 
02942         if (mm_flags & FF_MM_MMX2) {
02943             c->h264_idct_dc_add= ff_h264_idct_dc_add_mmx2;
02944             c->h264_idct8_dc_add= ff_h264_idct8_dc_add_mmx2;
02945             c->h264_idct_add16     = ff_h264_idct_add16_mmx2;
02946             c->h264_idct8_add4     = ff_h264_idct8_add4_mmx2;
02947             c->h264_idct_add8      = ff_h264_idct_add8_mmx2;
02948             c->h264_idct_add16intra= ff_h264_idct_add16intra_mmx2;
02949 
02950             c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_mmx2;
02951             c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_mmx2;
02952             c->h264_v_loop_filter_chroma= h264_v_loop_filter_chroma_mmx2;
02953             c->h264_h_loop_filter_chroma= h264_h_loop_filter_chroma_mmx2;
02954             c->h264_v_loop_filter_chroma_intra= h264_v_loop_filter_chroma_intra_mmx2;
02955             c->h264_h_loop_filter_chroma_intra= h264_h_loop_filter_chroma_intra_mmx2;
02956             c->h264_loop_filter_strength= h264_loop_filter_strength_mmx2;
02957 
02958             c->weight_h264_pixels_tab[0]= ff_h264_weight_16x16_mmx2;
02959             c->weight_h264_pixels_tab[1]= ff_h264_weight_16x8_mmx2;
02960             c->weight_h264_pixels_tab[2]= ff_h264_weight_8x16_mmx2;
02961             c->weight_h264_pixels_tab[3]= ff_h264_weight_8x8_mmx2;
02962             c->weight_h264_pixels_tab[4]= ff_h264_weight_8x4_mmx2;
02963             c->weight_h264_pixels_tab[5]= ff_h264_weight_4x8_mmx2;
02964             c->weight_h264_pixels_tab[6]= ff_h264_weight_4x4_mmx2;
02965             c->weight_h264_pixels_tab[7]= ff_h264_weight_4x2_mmx2;
02966 
02967             c->biweight_h264_pixels_tab[0]= ff_h264_biweight_16x16_mmx2;
02968             c->biweight_h264_pixels_tab[1]= ff_h264_biweight_16x8_mmx2;
02969             c->biweight_h264_pixels_tab[2]= ff_h264_biweight_8x16_mmx2;
02970             c->biweight_h264_pixels_tab[3]= ff_h264_biweight_8x8_mmx2;
02971             c->biweight_h264_pixels_tab[4]= ff_h264_biweight_8x4_mmx2;
02972             c->biweight_h264_pixels_tab[5]= ff_h264_biweight_4x8_mmx2;
02973             c->biweight_h264_pixels_tab[6]= ff_h264_biweight_4x4_mmx2;
02974             c->biweight_h264_pixels_tab[7]= ff_h264_biweight_4x2_mmx2;
02975         }
02976         if(mm_flags & FF_MM_SSE2){
02977             c->h264_idct8_add = ff_h264_idct8_add_sse2;
02978             c->h264_idct8_add4= ff_h264_idct8_add4_sse2;
02979         }
02980 
02981 #if CONFIG_GPL && HAVE_YASM
02982         if (mm_flags & FF_MM_MMX2){
02983 #if ARCH_X86_32
02984             c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_mmxext;
02985             c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_mmxext;
02986 #endif
02987             if( mm_flags&FF_MM_SSE2 ){
02988 #if ARCH_X86_64 || !defined(__ICC) || __ICC > 1110
02989                 c->h264_v_loop_filter_luma = ff_x264_deblock_v_luma_sse2;
02990                 c->h264_h_loop_filter_luma = ff_x264_deblock_h_luma_sse2;
02991                 c->h264_v_loop_filter_luma_intra = ff_x264_deblock_v_luma_intra_sse2;
02992                 c->h264_h_loop_filter_luma_intra = ff_x264_deblock_h_luma_intra_sse2;
02993 #endif
02994                 c->h264_idct_add16 = ff_h264_idct_add16_sse2;
02995                 c->h264_idct_add8  = ff_h264_idct_add8_sse2;
02996                 c->h264_idct_add16intra = ff_h264_idct_add16intra_sse2;
02997             }
02998         }
02999 #endif
03000     }
03001 }
03002 #endif /* CONFIG_H264DSP */

Generated on Fri Sep 16 2011 17:17:46 for FFmpeg by  doxygen 1.7.1