Libav
|
00001 /* 00002 * simple math operations 00003 * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al 00004 * 00005 * This file is part of FFmpeg. 00006 * 00007 * FFmpeg is free software; you can redistribute it and/or 00008 * modify it under the terms of the GNU Lesser General Public 00009 * License as published by the Free Software Foundation; either 00010 * version 2.1 of the License, or (at your option) any later version. 00011 * 00012 * FFmpeg is distributed in the hope that it will be useful, 00013 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00014 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00015 * Lesser General Public License for more details. 00016 * 00017 * You should have received a copy of the GNU Lesser General Public 00018 * License along with FFmpeg; if not, write to the Free Software 00019 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00020 */ 00021 00022 #ifndef AVCODEC_ARM_MATHOPS_H 00023 #define AVCODEC_ARM_MATHOPS_H 00024 00025 #include <stdint.h> 00026 #include "config.h" 00027 #include "libavutil/common.h" 00028 00029 #if HAVE_INLINE_ASM 00030 00031 # define MULL MULL 00032 static inline av_const int MULL(int a, int b, unsigned shift) 00033 { 00034 int lo, hi; 00035 __asm__("smull %0, %1, %2, %3 \n\t" 00036 "mov %0, %0, lsr %4 \n\t" 00037 "add %1, %0, %1, lsl %5 \n\t" 00038 : "=&r"(lo), "=&r"(hi) 00039 : "r"(b), "r"(a), "ir"(shift), "ir"(32-shift)); 00040 return hi; 00041 } 00042 00043 #define MULH MULH 00044 #if HAVE_ARMV6 00045 static inline av_const int MULH(int a, int b) 00046 { 00047 int r; 00048 __asm__ ("smmul %0, %1, %2" : "=r"(r) : "r"(a), "r"(b)); 00049 return r; 00050 } 00051 #else 00052 static inline av_const int MULH(int a, int b) 00053 { 00054 int lo, hi; 00055 __asm__ ("smull %0, %1, %2, %3" : "=&r"(lo), "=&r"(hi) : "r"(b), "r"(a)); 00056 return hi; 00057 } 00058 #endif 00059 00060 static inline av_const int64_t MUL64(int a, int b) 00061 { 00062 union { uint64_t x; unsigned hl[2]; } x; 00063 __asm__ ("smull %0, %1, %2, %3" 00064 : "=r"(x.hl[0]), "=r"(x.hl[1]) : "r"(a), "r"(b)); 00065 return x.x; 00066 } 00067 #define MUL64 MUL64 00068 00069 static inline av_const int64_t MAC64(int64_t d, int a, int b) 00070 { 00071 union { uint64_t x; unsigned hl[2]; } x = { d }; 00072 __asm__ ("smlal %0, %1, %2, %3" 00073 : "+r"(x.hl[0]), "+r"(x.hl[1]) : "r"(a), "r"(b)); 00074 return x.x; 00075 } 00076 #define MAC64(d, a, b) ((d) = MAC64(d, a, b)) 00077 #define MLS64(d, a, b) MAC64(d, -(a), b) 00078 00079 #if HAVE_ARMV5TE 00080 00081 /* signed 16x16 -> 32 multiply add accumulate */ 00082 # define MAC16(rt, ra, rb) \ 00083 __asm__ ("smlabb %0, %1, %2, %0" : "+r"(rt) : "r"(ra), "r"(rb)); 00084 00085 /* signed 16x16 -> 32 multiply */ 00086 # define MUL16 MUL16 00087 static inline av_const int MUL16(int ra, int rb) 00088 { 00089 int rt; 00090 __asm__ ("smulbb %0, %1, %2" : "=r"(rt) : "r"(ra), "r"(rb)); 00091 return rt; 00092 } 00093 00094 #endif 00095 00096 #define mid_pred mid_pred 00097 static inline av_const int mid_pred(int a, int b, int c) 00098 { 00099 int m; 00100 __asm__ volatile ( 00101 "mov %0, %2 \n\t" 00102 "cmp %1, %2 \n\t" 00103 "movgt %0, %1 \n\t" 00104 "movgt %1, %2 \n\t" 00105 "cmp %1, %3 \n\t" 00106 "movle %1, %3 \n\t" 00107 "cmp %0, %1 \n\t" 00108 "movgt %0, %1 \n\t" 00109 : "=&r"(m), "+r"(a) 00110 : "r"(b), "r"(c)); 00111 return m; 00112 } 00113 00114 #endif /* HAVE_INLINE_ASM */ 00115 00116 #endif /* AVCODEC_ARM_MATHOPS_H */