Libav 0.7.1
|
00001 /* 00002 * MMX optimized DSP utils 00003 * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> 00004 * 00005 * This file is part of Libav. 00006 * 00007 * Libav is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * Libav is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with Libav; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 #ifndef AVCODEC_X86_DSPUTIL_MMX_H 00023 #define AVCODEC_X86_DSPUTIL_MMX_H 00024 00025 #include <stdint.h> 00026 #include "libavcodec/dsputil.h" 00027 #include "libavutil/x86_cpu.h" 00028 00029 typedef struct { uint64_t a, b; } xmm_reg; 00030 00031 extern const uint64_t ff_bone; 00032 extern const uint64_t ff_wtwo; 00033 00034 extern const uint64_t ff_pdw_80000000[2]; 00035 00036 extern const xmm_reg ff_pw_3; 00037 extern const xmm_reg ff_pw_4; 00038 extern const xmm_reg ff_pw_5; 00039 extern const xmm_reg ff_pw_8; 00040 extern const uint64_t ff_pw_15; 00041 extern const xmm_reg ff_pw_16; 00042 extern const xmm_reg ff_pw_18; 00043 extern const uint64_t ff_pw_20; 00044 extern const xmm_reg ff_pw_27; 00045 extern const xmm_reg ff_pw_28; 00046 extern const xmm_reg ff_pw_32; 00047 extern const uint64_t ff_pw_42; 00048 extern const uint64_t ff_pw_53; 00049 extern const xmm_reg ff_pw_63; 00050 extern const xmm_reg ff_pw_64; 00051 extern const uint64_t ff_pw_96; 00052 extern const uint64_t ff_pw_128; 00053 extern const uint64_t ff_pw_255; 00054 00055 extern const xmm_reg ff_pb_1; 00056 extern const xmm_reg ff_pb_3; 00057 extern const uint64_t ff_pb_7; 00058 extern const uint64_t ff_pb_1F; 00059 extern const uint64_t ff_pb_3F; 00060 extern const uint64_t ff_pb_81; 00061 extern const xmm_reg ff_pb_A1; 00062 extern const xmm_reg ff_pb_F8; 00063 extern const uint64_t ff_pb_FC; 00064 extern const xmm_reg ff_pb_FE; 00065 00066 extern const double ff_pd_1[2]; 00067 extern const double ff_pd_2[2]; 00068 00069 #define LOAD4(stride,in,a,b,c,d)\ 00070 "movq 0*"#stride"+"#in", "#a"\n\t"\ 00071 "movq 1*"#stride"+"#in", "#b"\n\t"\ 00072 "movq 2*"#stride"+"#in", "#c"\n\t"\ 00073 "movq 3*"#stride"+"#in", "#d"\n\t" 00074 00075 #define STORE4(stride,out,a,b,c,d)\ 00076 "movq "#a", 0*"#stride"+"#out"\n\t"\ 00077 "movq "#b", 1*"#stride"+"#out"\n\t"\ 00078 "movq "#c", 2*"#stride"+"#out"\n\t"\ 00079 "movq "#d", 3*"#stride"+"#out"\n\t" 00080 00081 /* in/out: mma=mma+mmb, mmb=mmb-mma */ 00082 #define SUMSUB_BA( a, b ) \ 00083 "paddw "#b", "#a" \n\t"\ 00084 "paddw "#b", "#b" \n\t"\ 00085 "psubw "#a", "#b" \n\t" 00086 00087 #define SBUTTERFLY(a,b,t,n,m)\ 00088 "mov" #m " " #a ", " #t " \n\t" /* abcd */\ 00089 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ 00090 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ 00091 00092 #define TRANSPOSE4(a,b,c,d,t)\ 00093 SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\ 00094 SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\ 00095 SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\ 00096 SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */ 00097 00098 static inline void transpose4x4(uint8_t *dst, uint8_t *src, x86_reg dst_stride, x86_reg src_stride){ 00099 __asm__ volatile( //FIXME could save 1 instruction if done as 8x4 ... 00100 "movd (%1), %%mm0 \n\t" 00101 "add %3, %1 \n\t" 00102 "movd (%1), %%mm1 \n\t" 00103 "movd (%1,%3,1), %%mm2 \n\t" 00104 "movd (%1,%3,2), %%mm3 \n\t" 00105 "punpcklbw %%mm1, %%mm0 \n\t" 00106 "punpcklbw %%mm3, %%mm2 \n\t" 00107 "movq %%mm0, %%mm1 \n\t" 00108 "punpcklwd %%mm2, %%mm0 \n\t" 00109 "punpckhwd %%mm2, %%mm1 \n\t" 00110 "movd %%mm0, (%0) \n\t" 00111 "add %2, %0 \n\t" 00112 "punpckhdq %%mm0, %%mm0 \n\t" 00113 "movd %%mm0, (%0) \n\t" 00114 "movd %%mm1, (%0,%2,1) \n\t" 00115 "punpckhdq %%mm1, %%mm1 \n\t" 00116 "movd %%mm1, (%0,%2,2) \n\t" 00117 00118 : "+&r" (dst), 00119 "+&r" (src) 00120 : "r" (dst_stride), 00121 "r" (src_stride) 00122 : "memory" 00123 ); 00124 } 00125 00126 // e,f,g,h can be memory 00127 // out: a,d,t,c 00128 #define TRANSPOSE8x4(a,b,c,d,e,f,g,h,t)\ 00129 "punpcklbw " #e ", " #a " \n\t" /* a0 e0 a1 e1 a2 e2 a3 e3 */\ 00130 "punpcklbw " #f ", " #b " \n\t" /* b0 f0 b1 f1 b2 f2 b3 f3 */\ 00131 "punpcklbw " #g ", " #c " \n\t" /* c0 g0 c1 g1 c2 g2 d3 g3 */\ 00132 "punpcklbw " #h ", " #d " \n\t" /* d0 h0 d1 h1 d2 h2 d3 h3 */\ 00133 SBUTTERFLY(a, b, t, bw, q) /* a= a0 b0 e0 f0 a1 b1 e1 f1 */\ 00134 /* t= a2 b2 e2 f2 a3 b3 e3 f3 */\ 00135 SBUTTERFLY(c, d, b, bw, q) /* c= c0 d0 g0 h0 c1 d1 g1 h1 */\ 00136 /* b= c2 d2 g2 h2 c3 d3 g3 h3 */\ 00137 SBUTTERFLY(a, c, d, wd, q) /* a= a0 b0 c0 d0 e0 f0 g0 h0 */\ 00138 /* d= a1 b1 c1 d1 e1 f1 g1 h1 */\ 00139 SBUTTERFLY(t, b, c, wd, q) /* t= a2 b2 c2 d2 e2 f2 g2 h2 */\ 00140 /* c= a3 b3 c3 d3 e3 f3 g3 h3 */ 00141 00142 #if ARCH_X86_64 00143 // permutes 01234567 -> 05736421 00144 #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ 00145 SBUTTERFLY(a,b,%%xmm8,wd,dqa)\ 00146 SBUTTERFLY(c,d,b,wd,dqa)\ 00147 SBUTTERFLY(e,f,d,wd,dqa)\ 00148 SBUTTERFLY(g,h,f,wd,dqa)\ 00149 SBUTTERFLY(a,c,h,dq,dqa)\ 00150 SBUTTERFLY(%%xmm8,b,c,dq,dqa)\ 00151 SBUTTERFLY(e,g,b,dq,dqa)\ 00152 SBUTTERFLY(d,f,g,dq,dqa)\ 00153 SBUTTERFLY(a,e,f,qdq,dqa)\ 00154 SBUTTERFLY(%%xmm8,d,e,qdq,dqa)\ 00155 SBUTTERFLY(h,b,d,qdq,dqa)\ 00156 SBUTTERFLY(c,g,b,qdq,dqa)\ 00157 "movdqa %%xmm8, "#g" \n\t" 00158 #else 00159 #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ 00160 "movdqa "#h", "#t" \n\t"\ 00161 SBUTTERFLY(a,b,h,wd,dqa)\ 00162 "movdqa "#h", 16"#t" \n\t"\ 00163 "movdqa "#t", "#h" \n\t"\ 00164 SBUTTERFLY(c,d,b,wd,dqa)\ 00165 SBUTTERFLY(e,f,d,wd,dqa)\ 00166 SBUTTERFLY(g,h,f,wd,dqa)\ 00167 SBUTTERFLY(a,c,h,dq,dqa)\ 00168 "movdqa "#h", "#t" \n\t"\ 00169 "movdqa 16"#t", "#h" \n\t"\ 00170 SBUTTERFLY(h,b,c,dq,dqa)\ 00171 SBUTTERFLY(e,g,b,dq,dqa)\ 00172 SBUTTERFLY(d,f,g,dq,dqa)\ 00173 SBUTTERFLY(a,e,f,qdq,dqa)\ 00174 SBUTTERFLY(h,d,e,qdq,dqa)\ 00175 "movdqa "#h", 16"#t" \n\t"\ 00176 "movdqa "#t", "#h" \n\t"\ 00177 SBUTTERFLY(h,b,d,qdq,dqa)\ 00178 SBUTTERFLY(c,g,b,qdq,dqa)\ 00179 "movdqa 16"#t", "#g" \n\t" 00180 #endif 00181 00182 #define MOVQ_WONE(regd) \ 00183 __asm__ volatile ( \ 00184 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ 00185 "psrlw $15, %%" #regd ::) 00186 00187 void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx); 00188 void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx); 00189 00190 void ff_add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00191 void ff_put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00192 void ff_put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00193 00194 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00195 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00196 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00197 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00198 00199 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd); 00200 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd); 00201 00202 void ff_mmx_idct(DCTELEM *block); 00203 void ff_mmxext_idct(DCTELEM *block); 00204 00205 00206 void ff_deinterlace_line_mmx(uint8_t *dst, 00207 const uint8_t *lum_m4, const uint8_t *lum_m3, 00208 const uint8_t *lum_m2, const uint8_t *lum_m1, 00209 const uint8_t *lum, 00210 int size); 00211 00212 void ff_deinterlace_line_inplace_mmx(const uint8_t *lum_m4, 00213 const uint8_t *lum_m3, 00214 const uint8_t *lum_m2, 00215 const uint8_t *lum_m1, 00216 const uint8_t *lum, int size); 00217 00218 #endif /* AVCODEC_X86_DSPUTIL_MMX_H */