1/*===---- fma4intrin.h - FMA4 intrinsics -----------------------------------=== 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to deal 5 * in the Software without restriction, including without limitation the rights 6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 * copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 * THE SOFTWARE. 20 * 21 *===-----------------------------------------------------------------------=== 22 */ 23 24#ifndef __X86INTRIN_H 25#error "Never use <fma4intrin.h> directly; include <x86intrin.h> instead." 26#endif 27 28#ifndef __FMA4INTRIN_H 29#define __FMA4INTRIN_H 30 31#ifndef __FMA4__ 32# error "FMA4 instruction set is not enabled" 33#else 34 35#include <pmmintrin.h> 36 37static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 38_mm_macc_ps(__m128 __A, __m128 __B, __m128 __C) 39{ 40 return (__m128)__builtin_ia32_vfmaddps(__A, __B, __C); 41} 42 43static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 44_mm_macc_pd(__m128d __A, __m128d __B, __m128d __C) 45{ 46 return (__m128d)__builtin_ia32_vfmaddpd(__A, __B, __C); 47} 48 49static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 50_mm_macc_ss(__m128 __A, __m128 __B, __m128 __C) 51{ 52 return (__m128)__builtin_ia32_vfmaddss(__A, __B, __C); 53} 54 55static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 56_mm_macc_sd(__m128d __A, __m128d __B, __m128d __C) 57{ 58 return (__m128d)__builtin_ia32_vfmaddsd(__A, __B, __C); 59} 60 61static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 62_mm_msub_ps(__m128 __A, __m128 __B, __m128 __C) 63{ 64 return (__m128)__builtin_ia32_vfmsubps(__A, __B, __C); 65} 66 67static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 68_mm_msub_pd(__m128d __A, __m128d __B, __m128d __C) 69{ 70 return (__m128d)__builtin_ia32_vfmsubpd(__A, __B, __C); 71} 72 73static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 74_mm_msub_ss(__m128 __A, __m128 __B, __m128 __C) 75{ 76 return (__m128)__builtin_ia32_vfmsubss(__A, __B, __C); 77} 78 79static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 80_mm_msub_sd(__m128d __A, __m128d __B, __m128d __C) 81{ 82 return (__m128d)__builtin_ia32_vfmsubsd(__A, __B, __C); 83} 84 85static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 86_mm_nmacc_ps(__m128 __A, __m128 __B, __m128 __C) 87{ 88 return (__m128)__builtin_ia32_vfnmaddps(__A, __B, __C); 89} 90 91static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 92_mm_nmacc_pd(__m128d __A, __m128d __B, __m128d __C) 93{ 94 return (__m128d)__builtin_ia32_vfnmaddpd(__A, __B, __C); 95} 96 97static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 98_mm_nmacc_ss(__m128 __A, __m128 __B, __m128 __C) 99{ 100 return (__m128)__builtin_ia32_vfnmaddss(__A, __B, __C); 101} 102 103static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 104_mm_nmacc_sd(__m128d __A, __m128d __B, __m128d __C) 105{ 106 return (__m128d)__builtin_ia32_vfnmaddsd(__A, __B, __C); 107} 108 109static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 110_mm_nmsub_ps(__m128 __A, __m128 __B, __m128 __C) 111{ 112 return (__m128)__builtin_ia32_vfnmsubps(__A, __B, __C); 113} 114 115static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 116_mm_nmsub_pd(__m128d __A, __m128d __B, __m128d __C) 117{ 118 return (__m128d)__builtin_ia32_vfnmsubpd(__A, __B, __C); 119} 120 121static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 122_mm_nmsub_ss(__m128 __A, __m128 __B, __m128 __C) 123{ 124 return (__m128)__builtin_ia32_vfnmsubss(__A, __B, __C); 125} 126 127static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 128_mm_nmsub_sd(__m128d __A, __m128d __B, __m128d __C) 129{ 130 return (__m128d)__builtin_ia32_vfnmsubsd(__A, __B, __C); 131} 132 133static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 134_mm_maddsub_ps(__m128 __A, __m128 __B, __m128 __C) 135{ 136 return (__m128)__builtin_ia32_vfmaddsubps(__A, __B, __C); 137} 138 139static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 140_mm_maddsub_pd(__m128d __A, __m128d __B, __m128d __C) 141{ 142 return (__m128d)__builtin_ia32_vfmaddsubpd(__A, __B, __C); 143} 144 145static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 146_mm_msubadd_ps(__m128 __A, __m128 __B, __m128 __C) 147{ 148 return (__m128)__builtin_ia32_vfmsubaddps(__A, __B, __C); 149} 150 151static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 152_mm_msubadd_pd(__m128d __A, __m128d __B, __m128d __C) 153{ 154 return (__m128d)__builtin_ia32_vfmsubaddpd(__A, __B, __C); 155} 156 157static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 158_mm256_macc_ps(__m256 __A, __m256 __B, __m256 __C) 159{ 160 return (__m256)__builtin_ia32_vfmaddps256(__A, __B, __C); 161} 162 163static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 164_mm256_macc_pd(__m256d __A, __m256d __B, __m256d __C) 165{ 166 return (__m256d)__builtin_ia32_vfmaddpd256(__A, __B, __C); 167} 168 169static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 170_mm256_msub_ps(__m256 __A, __m256 __B, __m256 __C) 171{ 172 return (__m256)__builtin_ia32_vfmsubps256(__A, __B, __C); 173} 174 175static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 176_mm256_msub_pd(__m256d __A, __m256d __B, __m256d __C) 177{ 178 return (__m256d)__builtin_ia32_vfmsubpd256(__A, __B, __C); 179} 180 181static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 182_mm256_nmacc_ps(__m256 __A, __m256 __B, __m256 __C) 183{ 184 return (__m256)__builtin_ia32_vfnmaddps256(__A, __B, __C); 185} 186 187static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 188_mm256_nmacc_pd(__m256d __A, __m256d __B, __m256d __C) 189{ 190 return (__m256d)__builtin_ia32_vfnmaddpd256(__A, __B, __C); 191} 192 193static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 194_mm256_nmsub_ps(__m256 __A, __m256 __B, __m256 __C) 195{ 196 return (__m256)__builtin_ia32_vfnmsubps256(__A, __B, __C); 197} 198 199static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 200_mm256_nmsub_pd(__m256d __A, __m256d __B, __m256d __C) 201{ 202 return (__m256d)__builtin_ia32_vfnmsubpd256(__A, __B, __C); 203} 204 205static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 206_mm256_maddsub_ps(__m256 __A, __m256 __B, __m256 __C) 207{ 208 return (__m256)__builtin_ia32_vfmaddsubps256(__A, __B, __C); 209} 210 211static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 212_mm256_maddsub_pd(__m256d __A, __m256d __B, __m256d __C) 213{ 214 return (__m256d)__builtin_ia32_vfmaddsubpd256(__A, __B, __C); 215} 216 217static __inline__ __m256 __attribute__((__always_inline__, __nodebug__)) 218_mm256_msubadd_ps(__m256 __A, __m256 __B, __m256 __C) 219{ 220 return (__m256)__builtin_ia32_vfmsubaddps256(__A, __B, __C); 221} 222 223static __inline__ __m256d __attribute__((__always_inline__, __nodebug__)) 224_mm256_msubadd_pd(__m256d __A, __m256d __B, __m256d __C) 225{ 226 return (__m256d)__builtin_ia32_vfmsubaddpd256(__A, __B, __C); 227} 228 229#endif /* __FMA4__ */ 230 231#endif /* __FMA4INTRIN_H */ 232