Libav 0.7.1
|
00001 /* 00002 * Copyright (c) 2010 Mans Rullgard <mans@mansr.com> 00003 * 00004 * This file is part of Libav. 00005 * 00006 * Libav is free software; you can redistribute it and/or 00007 * modify it under the terms of the GNU Lesser General Public 00008 * License as published by the Free Software Foundation; either 00009 * version 2.1 of the License, or (at your option) any later version. 00010 * 00011 * Libav is distributed in the hope that it will be useful, 00012 * but WITHOUT ANY WARRANTY; without even the implied warranty of 00013 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 00014 * Lesser General Public License for more details. 00015 * 00016 * You should have received a copy of the GNU Lesser General Public 00017 * License along with Libav; if not, write to the Free Software 00018 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA 00019 */ 00020 00021 #ifndef AVUTIL_ARM_INTMATH_H 00022 #define AVUTIL_ARM_INTMATH_H 00023 00024 #include <stdint.h> 00025 00026 #include "config.h" 00027 #include "libavutil/attributes.h" 00028 00029 #if HAVE_INLINE_ASM 00030 00031 #if HAVE_ARMV6 00032 00033 #define FASTDIV FASTDIV 00034 static av_always_inline av_const int FASTDIV(int a, int b) 00035 { 00036 int r; 00037 __asm__ ("cmp %2, #2 \n\t" 00038 "ldr %0, [%3, %2, lsl #2] \n\t" 00039 "lsrle %0, %1, #1 \n\t" 00040 "smmulgt %0, %0, %1 \n\t" 00041 : "=&r"(r) : "r"(a), "r"(b), "r"(ff_inverse) : "cc"); 00042 return r; 00043 } 00044 00045 #define av_clip_uint8 av_clip_uint8_arm 00046 static av_always_inline av_const uint8_t av_clip_uint8_arm(int a) 00047 { 00048 unsigned x; 00049 __asm__ ("usat %0, #8, %1" : "=r"(x) : "r"(a)); 00050 return x; 00051 } 00052 00053 #define av_clip_int8 av_clip_int8_arm 00054 static av_always_inline av_const uint8_t av_clip_int8_arm(int a) 00055 { 00056 unsigned x; 00057 __asm__ ("ssat %0, #8, %1" : "=r"(x) : "r"(a)); 00058 return x; 00059 } 00060 00061 #define av_clip_uint16 av_clip_uint16_arm 00062 static av_always_inline av_const uint16_t av_clip_uint16_arm(int a) 00063 { 00064 unsigned x; 00065 __asm__ ("usat %0, #16, %1" : "=r"(x) : "r"(a)); 00066 return x; 00067 } 00068 00069 #define av_clip_int16 av_clip_int16_arm 00070 static av_always_inline av_const int16_t av_clip_int16_arm(int a) 00071 { 00072 int x; 00073 __asm__ ("ssat %0, #16, %1" : "=r"(x) : "r"(a)); 00074 return x; 00075 } 00076 00077 #define av_clip_uintp2 av_clip_uintp2_arm 00078 static av_always_inline av_const unsigned av_clip_uintp2_arm(int a, int p) 00079 { 00080 unsigned x; 00081 __asm__ ("usat %0, %2, %1" : "=r"(x) : "r"(a), "i"(p)); 00082 return x; 00083 } 00084 00085 00086 #else /* HAVE_ARMV6 */ 00087 00088 #define FASTDIV FASTDIV 00089 static av_always_inline av_const int FASTDIV(int a, int b) 00090 { 00091 int r, t; 00092 __asm__ ("umull %1, %0, %2, %3" 00093 : "=&r"(r), "=&r"(t) : "r"(a), "r"(ff_inverse[b])); 00094 return r; 00095 } 00096 00097 #endif /* HAVE_ARMV6 */ 00098 00099 #define av_clipl_int32 av_clipl_int32_arm 00100 static av_always_inline av_const int32_t av_clipl_int32_arm(int64_t a) 00101 { 00102 int x, y; 00103 __asm__ ("adds %1, %R2, %Q2, lsr #31 \n\t" 00104 "mvnne %1, #1<<31 \n\t" 00105 "moveq %0, %Q2 \n\t" 00106 "eorne %0, %1, %R2, asr #31 \n\t" 00107 : "=r"(x), "=&r"(y) : "r"(a)); 00108 return x; 00109 } 00110 00111 #endif /* HAVE_INLINE_ASM */ 00112 00113 #endif /* AVUTIL_ARM_INTMATH_H */