Libav
|
00001 /* 00002 * MMX optimized DSP utils 00003 * Copyright (c) 2007 Aurelien Jacobs <aurel@gnuage.org> 00004 * 00005 * This file is part of FFmpeg. 00006 * 00007 * FFmpeg is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * FFmpeg is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with FFmpeg; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 #ifndef AVCODEC_X86_DSPUTIL_MMX_H 00023 #define AVCODEC_X86_DSPUTIL_MMX_H 00024 00025 #include <stdint.h> 00026 #include "libavcodec/dsputil.h" 00027 00028 typedef struct { uint64_t a, b; } xmm_reg; 00029 00030 extern const uint64_t ff_bone; 00031 extern const uint64_t ff_wtwo; 00032 00033 extern const uint64_t ff_pdw_80000000[2]; 00034 00035 extern const uint64_t ff_pw_3; 00036 extern const uint64_t ff_pw_4; 00037 extern const xmm_reg ff_pw_5; 00038 extern const xmm_reg ff_pw_8; 00039 extern const uint64_t ff_pw_15; 00040 extern const xmm_reg ff_pw_16; 00041 extern const uint64_t ff_pw_20; 00042 extern const xmm_reg ff_pw_28; 00043 extern const xmm_reg ff_pw_32; 00044 extern const uint64_t ff_pw_42; 00045 extern const xmm_reg ff_pw_64; 00046 extern const uint64_t ff_pw_96; 00047 extern const uint64_t ff_pw_128; 00048 extern const uint64_t ff_pw_255; 00049 00050 extern const uint64_t ff_pb_1; 00051 extern const uint64_t ff_pb_3; 00052 extern const uint64_t ff_pb_7; 00053 extern const uint64_t ff_pb_1F; 00054 extern const uint64_t ff_pb_3F; 00055 extern const uint64_t ff_pb_81; 00056 extern const uint64_t ff_pb_A1; 00057 extern const uint64_t ff_pb_FC; 00058 00059 extern const double ff_pd_1[2]; 00060 extern const double ff_pd_2[2]; 00061 00062 #define LOAD4(stride,in,a,b,c,d)\ 00063 "movq 0*"#stride"+"#in", "#a"\n\t"\ 00064 "movq 1*"#stride"+"#in", "#b"\n\t"\ 00065 "movq 2*"#stride"+"#in", "#c"\n\t"\ 00066 "movq 3*"#stride"+"#in", "#d"\n\t" 00067 00068 #define STORE4(stride,out,a,b,c,d)\ 00069 "movq "#a", 0*"#stride"+"#out"\n\t"\ 00070 "movq "#b", 1*"#stride"+"#out"\n\t"\ 00071 "movq "#c", 2*"#stride"+"#out"\n\t"\ 00072 "movq "#d", 3*"#stride"+"#out"\n\t" 00073 00074 /* in/out: mma=mma+mmb, mmb=mmb-mma */ 00075 #define SUMSUB_BA( a, b ) \ 00076 "paddw "#b", "#a" \n\t"\ 00077 "paddw "#b", "#b" \n\t"\ 00078 "psubw "#a", "#b" \n\t" 00079 00080 #define SBUTTERFLY(a,b,t,n,m)\ 00081 "mov" #m " " #a ", " #t " \n\t" /* abcd */\ 00082 "punpckl" #n " " #b ", " #a " \n\t" /* aebf */\ 00083 "punpckh" #n " " #b ", " #t " \n\t" /* cgdh */\ 00084 00085 #define TRANSPOSE4(a,b,c,d,t)\ 00086 SBUTTERFLY(a,b,t,wd,q) /* a=aebf t=cgdh */\ 00087 SBUTTERFLY(c,d,b,wd,q) /* c=imjn b=kolp */\ 00088 SBUTTERFLY(a,c,d,dq,q) /* a=aeim d=bfjn */\ 00089 SBUTTERFLY(t,b,c,dq,q) /* t=cgko c=dhlp */ 00090 00091 // e,f,g,h can be memory 00092 // out: a,d,t,c 00093 #define TRANSPOSE8x4(a,b,c,d,e,f,g,h,t)\ 00094 "punpcklbw " #e ", " #a " \n\t" /* a0 e0 a1 e1 a2 e2 a3 e3 */\ 00095 "punpcklbw " #f ", " #b " \n\t" /* b0 f0 b1 f1 b2 f2 b3 f3 */\ 00096 "punpcklbw " #g ", " #c " \n\t" /* c0 g0 c1 g1 c2 g2 d3 g3 */\ 00097 "punpcklbw " #h ", " #d " \n\t" /* d0 h0 d1 h1 d2 h2 d3 h3 */\ 00098 SBUTTERFLY(a, b, t, bw, q) /* a= a0 b0 e0 f0 a1 b1 e1 f1 */\ 00099 /* t= a2 b2 e2 f2 a3 b3 e3 f3 */\ 00100 SBUTTERFLY(c, d, b, bw, q) /* c= c0 d0 g0 h0 c1 d1 g1 h1 */\ 00101 /* b= c2 d2 g2 h2 c3 d3 g3 h3 */\ 00102 SBUTTERFLY(a, c, d, wd, q) /* a= a0 b0 c0 d0 e0 f0 g0 h0 */\ 00103 /* d= a1 b1 c1 d1 e1 f1 g1 h1 */\ 00104 SBUTTERFLY(t, b, c, wd, q) /* t= a2 b2 c2 d2 e2 f2 g2 h2 */\ 00105 /* c= a3 b3 c3 d3 e3 f3 g3 h3 */ 00106 00107 #if ARCH_X86_64 00108 // permutes 01234567 -> 05736421 00109 #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ 00110 SBUTTERFLY(a,b,%%xmm8,wd,dqa)\ 00111 SBUTTERFLY(c,d,b,wd,dqa)\ 00112 SBUTTERFLY(e,f,d,wd,dqa)\ 00113 SBUTTERFLY(g,h,f,wd,dqa)\ 00114 SBUTTERFLY(a,c,h,dq,dqa)\ 00115 SBUTTERFLY(%%xmm8,b,c,dq,dqa)\ 00116 SBUTTERFLY(e,g,b,dq,dqa)\ 00117 SBUTTERFLY(d,f,g,dq,dqa)\ 00118 SBUTTERFLY(a,e,f,qdq,dqa)\ 00119 SBUTTERFLY(%%xmm8,d,e,qdq,dqa)\ 00120 SBUTTERFLY(h,b,d,qdq,dqa)\ 00121 SBUTTERFLY(c,g,b,qdq,dqa)\ 00122 "movdqa %%xmm8, "#g" \n\t" 00123 #else 00124 #define TRANSPOSE8(a,b,c,d,e,f,g,h,t)\ 00125 "movdqa "#h", "#t" \n\t"\ 00126 SBUTTERFLY(a,b,h,wd,dqa)\ 00127 "movdqa "#h", 16"#t" \n\t"\ 00128 "movdqa "#t", "#h" \n\t"\ 00129 SBUTTERFLY(c,d,b,wd,dqa)\ 00130 SBUTTERFLY(e,f,d,wd,dqa)\ 00131 SBUTTERFLY(g,h,f,wd,dqa)\ 00132 SBUTTERFLY(a,c,h,dq,dqa)\ 00133 "movdqa "#h", "#t" \n\t"\ 00134 "movdqa 16"#t", "#h" \n\t"\ 00135 SBUTTERFLY(h,b,c,dq,dqa)\ 00136 SBUTTERFLY(e,g,b,dq,dqa)\ 00137 SBUTTERFLY(d,f,g,dq,dqa)\ 00138 SBUTTERFLY(a,e,f,qdq,dqa)\ 00139 SBUTTERFLY(h,d,e,qdq,dqa)\ 00140 "movdqa "#h", 16"#t" \n\t"\ 00141 "movdqa "#t", "#h" \n\t"\ 00142 SBUTTERFLY(h,b,d,qdq,dqa)\ 00143 SBUTTERFLY(c,g,b,qdq,dqa)\ 00144 "movdqa 16"#t", "#g" \n\t" 00145 #endif 00146 00147 #define MOVQ_WONE(regd) \ 00148 __asm__ volatile ( \ 00149 "pcmpeqd %%" #regd ", %%" #regd " \n\t" \ 00150 "psrlw $15, %%" #regd ::) 00151 00152 void dsputilenc_init_mmx(DSPContext* c, AVCodecContext *avctx); 00153 void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx); 00154 00155 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00156 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00157 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size); 00158 00159 void ff_cavsdsp_init_mmx2(DSPContext* c, AVCodecContext *avctx); 00160 void ff_cavsdsp_init_3dnow(DSPContext* c, AVCodecContext *avctx); 00161 void ff_put_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00162 void ff_avg_cavs_qpel8_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00163 void ff_put_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00164 void ff_avg_cavs_qpel16_mc00_mmx2(uint8_t *dst, uint8_t *src, int stride); 00165 00166 void ff_vc1dsp_init_mmx(DSPContext* dsp, AVCodecContext *avctx); 00167 void ff_put_vc1_mspel_mc00_mmx(uint8_t *dst, const uint8_t *src, int stride, int rnd); 00168 void ff_avg_vc1_mspel_mc00_mmx2(uint8_t *dst, const uint8_t *src, int stride, int rnd); 00169 00170 void ff_lpc_compute_autocorr_sse2(const int32_t *data, int len, int lag, 00171 double *autoc); 00172 00173 void ff_mmx_idct(DCTELEM *block); 00174 void ff_mmxext_idct(DCTELEM *block); 00175 00176 #endif /* AVCODEC_X86_DSPUTIL_MMX_H */