SkNx_sse.h revision 3296bee70d074bb8094b3229dbe12fa016657e90
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_sse_DEFINED
9#define SkNx_sse_DEFINED
10
11#include <immintrin.h>
12
13// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
14// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
15
16#define SKNX_IS_FAST
17
18template <>
19class SkNx<2, float> {
20public:
21    SkNx(const __m128& vec) : fVec(vec) {}
22
23    SkNx() {}
24    SkNx(float val) : fVec(_mm_set1_ps(val)) {}
25    static SkNx Load(const void* ptr) {
26        return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
27    }
28    SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
29
30    void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
31
32    SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
33    SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
34    SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
35    SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
36
37    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
38    SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
39    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
40    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
41    SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
42    SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
43
44    static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
45    static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
46
47    SkNx   sqrt() const { return _mm_sqrt_ps (fVec);  }
48    SkNx  rsqrt() const { return _mm_rsqrt_ps(fVec); }
49    SkNx invert() const { return _mm_rcp_ps(fVec); }
50
51    float operator[](int k) const {
52        SkASSERT(0 <= k && k < 2);
53        union { __m128 v; float fs[4]; } pun = {fVec};
54        return pun.fs[k&1];
55    }
56
57    bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
58    bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
59
60    __m128 fVec;
61};
62
63template <>
64class SkNx<4, float> {
65public:
66    SkNx(const __m128& vec) : fVec(vec) {}
67
68    SkNx() {}
69    SkNx(float val)           : fVec( _mm_set1_ps(val) ) {}
70    static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
71
72    SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
73
74    void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
75
76    SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
77    SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
78    SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
79    SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
80
81    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
82    SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
83    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
84    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
85    SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
86    SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
87
88    static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
89    static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
90
91    SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
92    SkNx floor() const {
93    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
94        return _mm_floor_ps(fVec);
95    #else
96        // Emulate _mm_floor_ps() with SSE2:
97        //   - roundtrip through integers via truncation
98        //   - subtract 1 if that's too big (possible for negative values).
99        // This restricts the domain of our inputs to a maximum somehwere around 2^31.
100        // Seems plenty big.
101        __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
102        __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
103        return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
104    #endif
105    }
106
107    SkNx   sqrt() const { return _mm_sqrt_ps (fVec);  }
108    SkNx  rsqrt() const { return _mm_rsqrt_ps(fVec); }
109    SkNx invert() const { return _mm_rcp_ps(fVec); }
110
111    float operator[](int k) const {
112        SkASSERT(0 <= k && k < 4);
113        union { __m128 v; float fs[4]; } pun = {fVec};
114        return pun.fs[k&3];
115    }
116
117    bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
118    bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
119
120    SkNx thenElse(const SkNx& t, const SkNx& e) const {
121    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
122        return _mm_blendv_ps(e.fVec, t.fVec, fVec);
123    #else
124        return _mm_or_ps(_mm_and_ps   (fVec, t.fVec),
125                         _mm_andnot_ps(fVec, e.fVec));
126    #endif
127    }
128
129    __m128 fVec;
130};
131
132template <>
133class SkNx<4, int> {
134public:
135    SkNx(const __m128i& vec) : fVec(vec) {}
136
137    SkNx() {}
138    SkNx(int val) : fVec(_mm_set1_epi32(val)) {}
139    static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
140    SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
141
142    void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
143
144    SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
145    SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
146    SkNx operator * (const SkNx& o) const {
147        __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
148                mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
149        return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
150                                  _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
151    }
152
153    SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
154    SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
155    SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
156
157    SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
158    SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
159
160    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
161    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
162    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
163
164    int operator[](int k) const {
165        SkASSERT(0 <= k && k < 4);
166        union { __m128i v; int is[4]; } pun = {fVec};
167        return pun.is[k&3];
168    }
169
170    SkNx thenElse(const SkNx& t, const SkNx& e) const {
171    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
172        return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
173    #else
174        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
175                            _mm_andnot_si128(fVec, e.fVec));
176    #endif
177    }
178
179    __m128i fVec;
180};
181
182template <>
183class SkNx<4, uint16_t> {
184public:
185    SkNx(const __m128i& vec) : fVec(vec) {}
186
187    SkNx() {}
188    SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
189    static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
190    SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
191
192    void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
193
194    SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
195    SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
196    SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
197
198    SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
199    SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
200
201    uint16_t operator[](int k) const {
202        SkASSERT(0 <= k && k < 4);
203        union { __m128i v; uint16_t us[8]; } pun = {fVec};
204        return pun.us[k&3];
205    }
206
207    __m128i fVec;
208};
209
210template <>
211class SkNx<8, uint16_t> {
212public:
213    SkNx(const __m128i& vec) : fVec(vec) {}
214
215    SkNx() {}
216    SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
217    static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
218    SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
219         uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
220
221    void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
222
223    SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
224    SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
225    SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
226
227    SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
228    SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
229
230    static SkNx Min(const SkNx& a, const SkNx& b) {
231        // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
232        // signed version, _mm_min_epi16, then shift back.
233        const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
234        const __m128i top_8x = _mm_set1_epi16(top);
235        return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
236                                                  _mm_sub_epi8(b.fVec, top_8x)));
237    }
238
239    SkNx thenElse(const SkNx& t, const SkNx& e) const {
240        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
241                            _mm_andnot_si128(fVec, e.fVec));
242    }
243
244    uint16_t operator[](int k) const {
245        SkASSERT(0 <= k && k < 8);
246        union { __m128i v; uint16_t us[8]; } pun = {fVec};
247        return pun.us[k&7];
248    }
249
250    __m128i fVec;
251};
252
253template <>
254class SkNx<4, uint8_t> {
255public:
256    SkNx() {}
257    SkNx(const __m128i& vec) : fVec(vec) {}
258    SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
259        : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
260
261
262    static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
263    void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
264
265    uint8_t operator[](int k) const {
266        SkASSERT(0 <= k && k < 4);
267        union { __m128i v; uint8_t us[16]; } pun = {fVec};
268        return pun.us[k&3];
269    }
270
271    // TODO as needed
272
273    __m128i fVec;
274};
275
276template <>
277class SkNx<16, uint8_t> {
278public:
279    SkNx(const __m128i& vec) : fVec(vec) {}
280
281    SkNx() {}
282    SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
283    static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
284    SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
285         uint8_t e, uint8_t f, uint8_t g, uint8_t h,
286         uint8_t i, uint8_t j, uint8_t k, uint8_t l,
287         uint8_t m, uint8_t n, uint8_t o, uint8_t p)
288        : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
289
290    void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
291
292    SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
293
294    SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
295    SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
296
297    static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
298    SkNx operator < (const SkNx& o) const {
299        // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
300        auto flip = _mm_set1_epi8(char(0x80));
301        return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
302    }
303
304    uint8_t operator[](int k) const {
305        SkASSERT(0 <= k && k < 16);
306        union { __m128i v; uint8_t us[16]; } pun = {fVec};
307        return pun.us[k&15];
308    }
309
310    SkNx thenElse(const SkNx& t, const SkNx& e) const {
311        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
312                            _mm_andnot_si128(fVec, e.fVec));
313    }
314
315    __m128i fVec;
316};
317
318template<> /*static*/ inline Sk4f SkNx_cast<float, int>(const Sk4i& src) {
319    return _mm_cvtepi32_ps(src.fVec);
320}
321
322template <> /*static*/ inline Sk4i SkNx_cast<int, float>(const Sk4f& src) {
323    return _mm_cvttps_epi32(src.fVec);
324}
325
326template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
327    auto _32 = _mm_cvttps_epi32(src.fVec);
328    // Ideally we'd use _mm_packus_epi32 here.  But that's SSE4.1+.
329#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
330    // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
331    const int _ = ~0;
332    return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
333#else
334    // With SSE2, we have to emulate _mm_packus_epi32 with _mm_packs_epi32:
335    _32 = _mm_sub_epi32(_32, _mm_set1_epi32((int)0x00008000));
336    return _mm_add_epi16(_mm_packs_epi32(_32, _32), _mm_set1_epi16((short)0x8000));
337#endif
338}
339
340template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
341    auto _32 = _mm_cvttps_epi32(src.fVec);
342#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
343    const int _ = ~0;
344    return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
345#else
346    auto _16 = _mm_packus_epi16(_32, _32);
347    return     _mm_packus_epi16(_16, _16);
348#endif
349}
350
351template<> /*static*/ inline Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
352#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
353    const int _ = ~0;
354    auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
355#else
356    auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
357         _32 = _mm_unpacklo_epi16(_16,     _mm_setzero_si128());
358#endif
359    return _mm_cvtepi32_ps(_32);
360}
361
362template<> /*static*/ inline Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
363    auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
364    return _mm_cvtepi32_ps(_32);
365}
366
367template<> /*static*/ inline Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
368    Sk8f ab, cd;
369    SkNx_split(src, &ab, &cd);
370
371    Sk4f a,b,c,d;
372    SkNx_split(ab, &a, &b);
373    SkNx_split(cd, &c, &d);
374
375    return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
376                                             _mm_cvttps_epi32(b.fVec)),
377                            _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
378                                             _mm_cvttps_epi32(d.fVec)));
379}
380
381template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
382    return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
383}
384
385template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
386    return _mm_packus_epi16(src.fVec, src.fVec);
387}
388
389template<> /*static*/ inline Sk4i SkNx_cast<int, uint16_t>(const Sk4h& src) {
390    return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
391}
392
393template<> /*static*/ inline Sk4h SkNx_cast<uint16_t, int>(const Sk4i& src) {
394#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
395    return _mm_packus_epi32(src.fVec, src.fVec);
396#else
397    // Sign extend to trick _mm_packs_epi32() into doing the pack we want.
398    __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
399    return _mm_packs_epi32(x,x);
400#endif
401}
402
403template<> /*static*/ inline Sk4b SkNx_cast<uint8_t, int>(const Sk4i& src) {
404    return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
405}
406
407static inline Sk4i Sk4f_round(const Sk4f& x) {
408    return _mm_cvtps_epi32(x.fVec);
409}
410
411#endif//SkNx_sse_DEFINED
412