smmintrin.h revision 0d57ca1449d7a4d7afb927d4c3c1069dc6339372
1/*===---- smmintrin.h - SSE4 intrinsics ------------------------------------=== 2 * 3 * Permission is hereby granted, free of charge, to any person obtaining a copy 4 * of this software and associated documentation files (the "Software"), to deal 5 * in the Software without restriction, including without limitation the rights 6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 7 * copies of the Software, and to permit persons to whom the Software is 8 * furnished to do so, subject to the following conditions: 9 * 10 * The above copyright notice and this permission notice shall be included in 11 * all copies or substantial portions of the Software. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN 19 * THE SOFTWARE. 20 * 21 *===-----------------------------------------------------------------------=== 22 */ 23 24#ifndef _SMMINTRIN_H 25#define _SMMINTRIN_H 26 27#ifndef __SSE4_1__ 28#error "SSE4.1 instruction set not enabled" 29#else 30 31#include <tmmintrin.h> 32 33/* SSE4 Rounding macros. */ 34#define _MM_FROUND_TO_NEAREST_INT 0x00 35#define _MM_FROUND_TO_NEG_INF 0x01 36#define _MM_FROUND_TO_POS_INF 0x02 37#define _MM_FROUND_TO_ZERO 0x03 38#define _MM_FROUND_CUR_DIRECTION 0x04 39 40#define _MM_FROUND_RAISE_EXC 0x00 41#define _MM_FROUND_NO_EXC 0x08 42 43#define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT) 44#define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF) 45#define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF) 46#define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO) 47#define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION) 48#define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION) 49 50#define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL) 51#define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL) 52#define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL) 53#define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL) 54 55#define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR) 56#define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR) 57#define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR) 58#define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR) 59 60#define _mm_round_ps(X, Y) __builtin_ia32_roundps((X), (Y)) 61#define _mm_round_ss(X, Y, M) __builtin_ia32_roundss((X), (Y), (M)) 62#define _mm_round_pd(X, M) __builtin_ia32_roundpd((X), (M)) 63#define _mm_round_sd(X, Y, M) __builtin_ia32_roundsd((X), (Y), (M)) 64 65/* SSE4 Packed Blending Intrinsics. */ 66#define _mm_blend_pd(V1, V2, M) __extension__ ({ \ 67 __m128d __V1 = (V1); \ 68 __m128d __V2 = (V2); \ 69 (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, M); }) 70 71#define _mm_blend_ps(V1, V2, M) __extension__ ({ \ 72 __m128 __V1 = (V1); \ 73 __m128 __V2 = (V2); \ 74 (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, M); }) 75 76static __inline__ __m128d __attribute__((__always_inline__, __nodebug__)) 77_mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M) 78{ 79 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2, 80 (__v2df)__M); 81} 82 83static __inline__ __m128 __attribute__((__always_inline__, __nodebug__)) 84_mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M) 85{ 86 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2, 87 (__v4sf)__M); 88} 89 90static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 91_mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M) 92{ 93 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2, 94 (__v16qi)__M); 95} 96 97#define _mm_blend_epi16(V1, V2, M) __extension__ ({ \ 98 __m128i __V1 = (V1); \ 99 __m128i __V2 = (V2); \ 100 (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, M); }) 101 102/* SSE4 Dword Multiply Instructions. */ 103static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 104_mm_mullo_epi32 (__m128i __V1, __m128i __V2) 105{ 106 return (__m128i) ((__v4si)__V1 * (__v4si)__V2); 107} 108 109static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 110_mm_mul_epi32 (__m128i __V1, __m128i __V2) 111{ 112 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2); 113} 114 115/* SSE4 Floating Point Dot Product Instructions. */ 116#define _mm_dp_ps(X, Y, M) __builtin_ia32_dpps ((X), (Y), (M)) 117#define _mm_dp_pd(X, Y, M) __builtin_ia32_dppd ((X), (Y), (M)) 118 119/* SSE4 Streaming Load Hint Instruction. */ 120static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 121_mm_stream_load_si128 (__m128i *__V) 122{ 123 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V); 124} 125 126/* SSE4 Packed Integer Min/Max Instructions. */ 127static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 128_mm_min_epi8 (__m128i __V1, __m128i __V2) 129{ 130 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2); 131} 132 133static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 134_mm_max_epi8 (__m128i __V1, __m128i __V2) 135{ 136 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2); 137} 138 139static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 140_mm_min_epu16 (__m128i __V1, __m128i __V2) 141{ 142 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2); 143} 144 145static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 146_mm_max_epu16 (__m128i __V1, __m128i __V2) 147{ 148 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2); 149} 150 151static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 152_mm_min_epi32 (__m128i __V1, __m128i __V2) 153{ 154 return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2); 155} 156 157static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 158_mm_max_epi32 (__m128i __V1, __m128i __V2) 159{ 160 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2); 161} 162 163static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 164_mm_min_epu32 (__m128i __V1, __m128i __V2) 165{ 166 return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2); 167} 168 169static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 170_mm_max_epu32 (__m128i __V1, __m128i __V2) 171{ 172 return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2); 173} 174 175/* SSE4 Insertion and Extraction from XMM Register Instructions. */ 176#define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N)) 177#define _mm_extract_ps(X, N) (__extension__ \ 178 ({ union { int i; float f; } __t; \ 179 __v4sf __a = (__v4sf)(X); \ 180 __t.f = __a[N]; \ 181 __t.i;})) 182 183/* Miscellaneous insert and extract macros. */ 184/* Extract a single-precision float from X at index N into D. */ 185#define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \ 186 (D) = __a[N]; })) 187 188/* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create 189 an index suitable for _mm_insert_ps. */ 190#define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z)) 191 192/* Extract a float from X at index N into the first index of the return. */ 193#define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \ 194 _MM_MK_INSERTPS_NDX((N), 0, 0x0e)) 195 196/* Insert int into packed integer array at index. */ 197#define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ 198 __a[N] = I; \ 199 __a;})) 200#define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ 201 __a[N] = I; \ 202 __a;})) 203#ifdef __x86_64__ 204#define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ 205 __a[N] = I; \ 206 __a;})) 207#endif /* __x86_64__ */ 208 209/* Extract int from packed integer array at index. This returns the element 210 * as a zero extended value, so it is unsigned. 211 */ 212#define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \ 213 (unsigned char)__a[N];})) 214#define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \ 215 (unsigned)__a[N];})) 216#ifdef __x86_64__ 217#define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \ 218 __a[N];})) 219#endif /* __x86_64 */ 220 221/* SSE4 128-bit Packed Integer Comparisons. */ 222static __inline__ int __attribute__((__always_inline__, __nodebug__)) 223_mm_testz_si128(__m128i __M, __m128i __V) 224{ 225 return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V); 226} 227 228static __inline__ int __attribute__((__always_inline__, __nodebug__)) 229_mm_testc_si128(__m128i __M, __m128i __V) 230{ 231 return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V); 232} 233 234static __inline__ int __attribute__((__always_inline__, __nodebug__)) 235_mm_testnzc_si128(__m128i __M, __m128i __V) 236{ 237 return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V); 238} 239 240#define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V))) 241#define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V)) 242#define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V)) 243 244/* SSE4 64-bit Packed Integer Comparisons. */ 245static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 246_mm_cmpeq_epi64(__m128i __V1, __m128i __V2) 247{ 248 return (__m128i) __builtin_ia32_pcmpeqq((__v2di)__V1, (__v2di)__V2); 249} 250 251/* SSE4 Packed Integer Sign-Extension. */ 252static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 253_mm_cvtepi8_epi16(__m128i __V) 254{ 255 return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V); 256} 257 258static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 259_mm_cvtepi8_epi32(__m128i __V) 260{ 261 return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V); 262} 263 264static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 265_mm_cvtepi8_epi64(__m128i __V) 266{ 267 return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V); 268} 269 270static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 271_mm_cvtepi16_epi32(__m128i __V) 272{ 273 return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V); 274} 275 276static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 277_mm_cvtepi16_epi64(__m128i __V) 278{ 279 return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V); 280} 281 282static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 283_mm_cvtepi32_epi64(__m128i __V) 284{ 285 return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V); 286} 287 288/* SSE4 Packed Integer Zero-Extension. */ 289static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 290_mm_cvtepu8_epi16(__m128i __V) 291{ 292 return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V); 293} 294 295static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 296_mm_cvtepu8_epi32(__m128i __V) 297{ 298 return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V); 299} 300 301static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 302_mm_cvtepu8_epi64(__m128i __V) 303{ 304 return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V); 305} 306 307static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 308_mm_cvtepu16_epi32(__m128i __V) 309{ 310 return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V); 311} 312 313static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 314_mm_cvtepu16_epi64(__m128i __V) 315{ 316 return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V); 317} 318 319static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 320_mm_cvtepu32_epi64(__m128i __V) 321{ 322 return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V); 323} 324 325/* SSE4 Pack with Unsigned Saturation. */ 326static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 327_mm_packus_epi32(__m128i __V1, __m128i __V2) 328{ 329 return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2); 330} 331 332/* SSE4 Multiple Packed Sums of Absolute Difference. */ 333#define _mm_mpsadbw_epu8(X, Y, M) __builtin_ia32_mpsadbw128((X), (Y), (M)) 334 335/* These definitions are normally in nmmintrin.h, but gcc puts them in here 336 so we'll do the same. */ 337#ifdef __SSE4_2__ 338 339/* These specify the type of data that we're comparing. */ 340#define _SIDD_UBYTE_OPS 0x00 341#define _SIDD_UWORD_OPS 0x01 342#define _SIDD_SBYTE_OPS 0x02 343#define _SIDD_SWORD_OPS 0x03 344 345/* These specify the type of comparison operation. */ 346#define _SIDD_CMP_EQUAL_ANY 0x00 347#define _SIDD_CMP_RANGES 0x04 348#define _SIDD_CMP_EQUAL_EACH 0x08 349#define _SIDD_CMP_EQUAL_ORDERED 0x0c 350 351/* These macros specify the polarity of the operation. */ 352#define _SIDD_POSITIVE_POLARITY 0x00 353#define _SIDD_NEGATIVE_POLARITY 0x10 354#define _SIDD_MASKED_POSITIVE_POLARITY 0x20 355#define _SIDD_MASKED_NEGATIVE_POLARITY 0x30 356 357/* These macros are used in _mm_cmpXstri() to specify the return. */ 358#define _SIDD_LEAST_SIGNIFICANT 0x00 359#define _SIDD_MOST_SIGNIFICANT 0x40 360 361/* These macros are used in _mm_cmpXstri() to specify the return. */ 362#define _SIDD_BIT_MASK 0x00 363#define _SIDD_UNIT_MASK 0x40 364 365/* SSE4.2 Packed Comparison Intrinsics. */ 366#define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M)) 367#define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M)) 368 369#define _mm_cmpestrm(A, LA, B, LB, M) \ 370 __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M)) 371#define _mm_cmpestri(A, LA, B, LB, M) \ 372 __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M)) 373 374/* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */ 375#define _mm_cmpistra(A, B, M) \ 376 __builtin_ia32_pcmpistria128((A), (B), (M)) 377#define _mm_cmpistrc(A, B, M) \ 378 __builtin_ia32_pcmpistric128((A), (B), (M)) 379#define _mm_cmpistro(A, B, M) \ 380 __builtin_ia32_pcmpistrio128((A), (B), (M)) 381#define _mm_cmpistrs(A, B, M) \ 382 __builtin_ia32_pcmpistris128((A), (B), (M)) 383#define _mm_cmpistrz(A, B, M) \ 384 __builtin_ia32_pcmpistriz128((A), (B), (M)) 385 386#define _mm_cmpestra(A, LA, B, LB, M) \ 387 __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M)) 388#define _mm_cmpestrc(A, LA, B, LB, M) \ 389 __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M)) 390#define _mm_cmpestro(A, LA, B, LB, M) \ 391 __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M)) 392#define _mm_cmpestrs(A, LA, B, LB, M) \ 393 __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M)) 394#define _mm_cmpestrz(A, LA, B, LB, M) \ 395 __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M)) 396 397/* SSE4.2 Compare Packed Data -- Greater Than. */ 398static __inline__ __m128i __attribute__((__always_inline__, __nodebug__)) 399_mm_cmpgt_epi64(__m128i __V1, __m128i __V2) 400{ 401 return __builtin_ia32_pcmpgtq((__v2di)__V1, (__v2di)__V2); 402} 403 404/* SSE4.2 Accumulate CRC32. */ 405static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 406_mm_crc32_u8(unsigned int __C, unsigned char __D) 407{ 408 return __builtin_ia32_crc32qi(__C, __D); 409} 410 411static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 412_mm_crc32_u16(unsigned int __C, unsigned short __D) 413{ 414 return __builtin_ia32_crc32hi(__C, __D); 415} 416 417static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__)) 418_mm_crc32_u32(unsigned int __C, unsigned int __D) 419{ 420 return __builtin_ia32_crc32si(__C, __D); 421} 422 423#ifdef __x86_64__ 424static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__)) 425_mm_crc32_u64(unsigned long long __C, unsigned long long __D) 426{ 427 return __builtin_ia32_crc32di(__C, __D); 428} 429#endif /* __x86_64__ */ 430 431/* SSE4.2 Population Count. */ 432static __inline__ int __attribute__((__always_inline__, __nodebug__)) 433_mm_popcnt_u32(unsigned int __A) 434{ 435 return __builtin_popcount(__A); 436} 437 438#ifdef __x86_64__ 439static __inline__ long long __attribute__((__always_inline__, __nodebug__)) 440_mm_popcnt_u64(unsigned long long __A) 441{ 442 return __builtin_popcountll(__A); 443} 444#endif /* __x86_64__ */ 445 446#endif /* __SSE4_2__ */ 447#endif /* __SSE4_1__ */ 448 449#endif /* _SMMINTRIN_H */ 450