SkNx_sse.h revision f55ea6a1deb21120944d406124a2984b5009260a
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_sse_DEFINED
9#define SkNx_sse_DEFINED
10
11#include <immintrin.h>
12
13// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
14// If you do, make sure this is in a static inline function... anywhere else risks violating ODR.
15
16namespace {
17
18template <>
19class SkNx<2, float> {
20public:
21    AI SkNx(const __m128& vec) : fVec(vec) {}
22
23    AI SkNx() {}
24    AI SkNx(float val) : fVec(_mm_set1_ps(val)) {}
25    AI static SkNx Load(const void* ptr) {
26        return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)ptr));
27    }
28    AI SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
29
30    AI void store(void* ptr) const { _mm_storel_pi((__m64*)ptr, fVec); }
31
32    AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
33    AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
34    AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
35    AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
36
37    AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
38    AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
39    AI SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
40    AI SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
41    AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
42    AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
43
44    AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
45    AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
46
47    AI SkNx   sqrt() const { return _mm_sqrt_ps (fVec);  }
48    AI SkNx  rsqrt() const { return _mm_rsqrt_ps(fVec); }
49    AI SkNx invert() const { return _mm_rcp_ps(fVec); }
50
51    AI float operator[](int k) const {
52        SkASSERT(0 <= k && k < 2);
53        union { __m128 v; float fs[4]; } pun = {fVec};
54        return pun.fs[k&1];
55    }
56
57    AI bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
58    AI bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
59
60    __m128 fVec;
61};
62
63template <>
64class SkNx<4, float> {
65public:
66    AI SkNx(const __m128& vec) : fVec(vec) {}
67
68    AI SkNx() {}
69    AI SkNx(float val)           : fVec( _mm_set1_ps(val) ) {}
70    AI SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
71
72    AI static SkNx Load(const void* ptr) { return _mm_loadu_ps((const float*)ptr); }
73    AI void store(void* ptr) const { _mm_storeu_ps((float*)ptr, fVec); }
74
75    AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
76        __m128 v0 = _mm_loadu_ps(((float*)ptr) +  0),
77               v1 = _mm_loadu_ps(((float*)ptr) +  4),
78               v2 = _mm_loadu_ps(((float*)ptr) +  8),
79               v3 = _mm_loadu_ps(((float*)ptr) + 12);
80        _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
81        *r = v0;
82        *g = v1;
83        *b = v2;
84        *a = v3;
85    }
86    AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
87        __m128 v0 = r.fVec,
88               v1 = g.fVec,
89               v2 = b.fVec,
90               v3 = a.fVec;
91        _MM_TRANSPOSE4_PS(v0, v1, v2, v3);
92        _mm_storeu_ps(((float*) dst) +  0, v0);
93        _mm_storeu_ps(((float*) dst) +  4, v1);
94        _mm_storeu_ps(((float*) dst) +  8, v2);
95        _mm_storeu_ps(((float*) dst) + 12, v3);
96    }
97
98    AI SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
99    AI SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
100    AI SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
101    AI SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
102
103    AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
104    AI SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
105    AI SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
106    AI SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
107    AI SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
108    AI SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
109
110    AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
111    AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
112
113    AI SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
114    AI SkNx floor() const {
115    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
116        return _mm_floor_ps(fVec);
117    #else
118        // Emulate _mm_floor_ps() with SSE2:
119        //   - roundtrip through integers via truncation
120        //   - subtract 1 if that's too big (possible for negative values).
121        // This restricts the domain of our inputs to a maximum somehwere around 2^31.
122        // Seems plenty big.
123        __m128 roundtrip = _mm_cvtepi32_ps(_mm_cvttps_epi32(fVec));
124        __m128 too_big = _mm_cmpgt_ps(roundtrip, fVec);
125        return _mm_sub_ps(roundtrip, _mm_and_ps(too_big, _mm_set1_ps(1.0f)));
126    #endif
127    }
128
129    AI SkNx   sqrt() const { return _mm_sqrt_ps (fVec);  }
130    AI SkNx  rsqrt() const { return _mm_rsqrt_ps(fVec); }
131    AI SkNx invert() const { return _mm_rcp_ps(fVec); }
132
133    AI float operator[](int k) const {
134        SkASSERT(0 <= k && k < 4);
135        union { __m128 v; float fs[4]; } pun = {fVec};
136        return pun.fs[k&3];
137    }
138
139    AI bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
140    AI bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
141
142    AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
143    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
144        return _mm_blendv_ps(e.fVec, t.fVec, fVec);
145    #else
146        return _mm_or_ps(_mm_and_ps   (fVec, t.fVec),
147                         _mm_andnot_ps(fVec, e.fVec));
148    #endif
149    }
150
151    __m128 fVec;
152};
153
154template <>
155class SkNx<4, int32_t> {
156public:
157    AI SkNx(const __m128i& vec) : fVec(vec) {}
158
159    AI SkNx() {}
160    AI SkNx(int32_t val) : fVec(_mm_set1_epi32(val)) {}
161    AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
162    AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
163
164    AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
165
166    AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
167    AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
168    AI SkNx operator * (const SkNx& o) const {
169        __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
170                mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
171        return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
172                                  _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
173    }
174
175    AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
176    AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
177    AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
178
179    AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
180    AI SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
181
182    AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
183    AI SkNx operator  < (const SkNx& o) const { return _mm_cmplt_epi32 (fVec, o.fVec); }
184    AI SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_epi32 (fVec, o.fVec); }
185
186    AI int32_t operator[](int k) const {
187        SkASSERT(0 <= k && k < 4);
188        union { __m128i v; int32_t is[4]; } pun = {fVec};
189        return pun.is[k&3];
190    }
191
192    AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
193    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
194        return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
195    #else
196        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
197                            _mm_andnot_si128(fVec, e.fVec));
198    #endif
199    }
200
201    __m128i fVec;
202};
203
204template <>
205class SkNx<4, uint32_t> {
206public:
207    AI SkNx(const __m128i& vec) : fVec(vec) {}
208
209    AI SkNx() {}
210    AI SkNx(uint32_t val) : fVec(_mm_set1_epi32(val)) {}
211    AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
212    AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
213
214    AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
215
216    AI SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
217    AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
218    // Not quite sure how to best do operator * in SSE2.  We probably don't use it.
219
220    AI SkNx operator & (const SkNx& o) const { return _mm_and_si128(fVec, o.fVec); }
221    AI SkNx operator | (const SkNx& o) const { return _mm_or_si128(fVec, o.fVec); }
222    AI SkNx operator ^ (const SkNx& o) const { return _mm_xor_si128(fVec, o.fVec); }
223
224    AI SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
225    AI SkNx operator >> (int bits) const { return _mm_srli_epi32(fVec, bits); }
226
227    AI SkNx operator == (const SkNx& o) const { return _mm_cmpeq_epi32 (fVec, o.fVec); }
228    // operator < and > take a little extra fiddling to make work for unsigned ints.
229
230    AI uint32_t operator[](int k) const {
231        SkASSERT(0 <= k && k < 4);
232        union { __m128i v; uint32_t us[4]; } pun = {fVec};
233        return pun.us[k&3];
234    }
235
236    AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
237    #if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
238        return _mm_blendv_epi8(e.fVec, t.fVec, fVec);
239    #else
240        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
241                            _mm_andnot_si128(fVec, e.fVec));
242    #endif
243    }
244
245    __m128i fVec;
246};
247
248
249template <>
250class SkNx<4, uint16_t> {
251public:
252    AI SkNx(const __m128i& vec) : fVec(vec) {}
253
254    AI SkNx() {}
255    AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
256    AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d)
257        : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
258
259    AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
260    AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
261
262    AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
263        __m128i lo = _mm_loadu_si128(((__m128i*)ptr) + 0),
264                hi = _mm_loadu_si128(((__m128i*)ptr) + 1);
265        __m128i even = _mm_unpacklo_epi16(lo, hi),   // r0 r2 g0 g2 b0 b2 a0 a2
266                 odd = _mm_unpackhi_epi16(lo, hi);   // r1 r3 ...
267        __m128i rg = _mm_unpacklo_epi16(even, odd),  // r0 r1 r2 r3 g0 g1 g2 g3
268                ba = _mm_unpackhi_epi16(even, odd);  // b0 b1 ...   a0 a1 ...
269        *r = rg;
270        *g = _mm_srli_si128(rg, 8);
271        *b = ba;
272        *a = _mm_srli_si128(ba, 8);
273    }
274    AI static void Store4(void* dst, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
275        __m128i rg = _mm_unpacklo_epi16(r.fVec, g.fVec);
276        __m128i ba = _mm_unpacklo_epi16(b.fVec, a.fVec);
277        __m128i lo = _mm_unpacklo_epi32(rg, ba);
278        __m128i hi = _mm_unpackhi_epi32(rg, ba);
279        _mm_storeu_si128(((__m128i*) dst) + 0, lo);
280        _mm_storeu_si128(((__m128i*) dst) + 1, hi);
281    }
282
283    AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
284    AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
285    AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
286
287    AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
288    AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
289
290    AI uint16_t operator[](int k) const {
291        SkASSERT(0 <= k && k < 4);
292        union { __m128i v; uint16_t us[8]; } pun = {fVec};
293        return pun.us[k&3];
294    }
295
296    __m128i fVec;
297};
298
299template <>
300class SkNx<8, uint16_t> {
301public:
302    AI SkNx(const __m128i& vec) : fVec(vec) {}
303
304    AI SkNx() {}
305    AI SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
306    AI SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
307            uint16_t e, uint16_t f, uint16_t g, uint16_t h)
308        : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
309
310    AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
311    AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
312
313    AI static void Load4(const void* ptr, SkNx* r, SkNx* g, SkNx* b, SkNx* a) {
314        // TODO: AVX2 version
315        __m128i _01 = _mm_loadu_si128(((__m128i*)ptr) + 0),
316                _23 = _mm_loadu_si128(((__m128i*)ptr) + 1),
317                _45 = _mm_loadu_si128(((__m128i*)ptr) + 2),
318                _67 = _mm_loadu_si128(((__m128i*)ptr) + 3);
319
320        __m128i _02 = _mm_unpacklo_epi16(_01, _23),  // r0 r2 g0 g2 b0 b2 a0 a2
321                _13 = _mm_unpackhi_epi16(_01, _23),  // r1 r3 g1 g3 b1 b3 a1 a3
322                _46 = _mm_unpacklo_epi16(_45, _67),
323                _57 = _mm_unpackhi_epi16(_45, _67);
324
325        __m128i rg0123 = _mm_unpacklo_epi16(_02, _13),  // r0 r1 r2 r3 g0 g1 g2 g3
326                ba0123 = _mm_unpackhi_epi16(_02, _13),  // b0 b1 b2 b3 a0 a1 a2 a3
327                rg4567 = _mm_unpacklo_epi16(_46, _57),
328                ba4567 = _mm_unpackhi_epi16(_46, _57);
329
330        *r = _mm_unpacklo_epi64(rg0123, rg4567);
331        *g = _mm_unpackhi_epi64(rg0123, rg4567);
332        *b = _mm_unpacklo_epi64(ba0123, ba4567);
333        *a = _mm_unpackhi_epi64(ba0123, ba4567);
334    }
335    AI static void Store4(void* ptr, const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
336        // TODO: AVX2 version
337        __m128i rg0123 = _mm_unpacklo_epi16(r.fVec, g.fVec),  // r0 g0 r1 g1 r2 g2 r3 g3
338                rg4567 = _mm_unpackhi_epi16(r.fVec, g.fVec),  // r4 g4 r5 g5 r6 g6 r7 g7
339                ba0123 = _mm_unpacklo_epi16(b.fVec, a.fVec),
340                ba4567 = _mm_unpackhi_epi16(b.fVec, a.fVec);
341
342        _mm_storeu_si128((__m128i*)ptr + 0, _mm_unpacklo_epi32(rg0123, ba0123));
343        _mm_storeu_si128((__m128i*)ptr + 1, _mm_unpackhi_epi32(rg0123, ba0123));
344        _mm_storeu_si128((__m128i*)ptr + 2, _mm_unpacklo_epi32(rg4567, ba4567));
345        _mm_storeu_si128((__m128i*)ptr + 3, _mm_unpackhi_epi32(rg4567, ba4567));
346    }
347
348    AI SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
349    AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
350    AI SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
351
352    AI SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
353    AI SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
354
355    AI static SkNx Min(const SkNx& a, const SkNx& b) {
356        // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
357        // signed version, _mm_min_epi16, then shift back.
358        const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
359        const __m128i top_8x = _mm_set1_epi16(top);
360        return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
361                                                  _mm_sub_epi8(b.fVec, top_8x)));
362    }
363
364    AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
365        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
366                            _mm_andnot_si128(fVec, e.fVec));
367    }
368
369    AI uint16_t operator[](int k) const {
370        SkASSERT(0 <= k && k < 8);
371        union { __m128i v; uint16_t us[8]; } pun = {fVec};
372        return pun.us[k&7];
373    }
374
375    __m128i fVec;
376};
377
378template <>
379class SkNx<4, uint8_t> {
380public:
381    AI SkNx() {}
382    AI SkNx(const __m128i& vec) : fVec(vec) {}
383    AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d)
384        : fVec(_mm_setr_epi8(a,b,c,d, 0,0,0,0, 0,0,0,0, 0,0,0,0)) {}
385
386
387    AI static SkNx Load(const void* ptr) { return _mm_cvtsi32_si128(*(const int*)ptr); }
388    AI void store(void* ptr) const { *(int*)ptr = _mm_cvtsi128_si32(fVec); }
389
390    AI uint8_t operator[](int k) const {
391        SkASSERT(0 <= k && k < 4);
392        union { __m128i v; uint8_t us[16]; } pun = {fVec};
393        return pun.us[k&3];
394    }
395
396    // TODO as needed
397
398    __m128i fVec;
399};
400
401template <>
402class SkNx<16, uint8_t> {
403public:
404    AI SkNx(const __m128i& vec) : fVec(vec) {}
405
406    AI SkNx() {}
407    AI SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
408    AI static SkNx Load(const void* ptr) { return _mm_loadu_si128((const __m128i*)ptr); }
409    AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
410            uint8_t e, uint8_t f, uint8_t g, uint8_t h,
411            uint8_t i, uint8_t j, uint8_t k, uint8_t l,
412            uint8_t m, uint8_t n, uint8_t o, uint8_t p)
413        : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
414
415    AI void store(void* ptr) const { _mm_storeu_si128((__m128i*)ptr, fVec); }
416
417    AI SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
418
419    AI SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
420    AI SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
421
422    AI static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
423    AI SkNx operator < (const SkNx& o) const {
424        // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
425        auto flip = _mm_set1_epi8(char(0x80));
426        return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
427    }
428
429    AI uint8_t operator[](int k) const {
430        SkASSERT(0 <= k && k < 16);
431        union { __m128i v; uint8_t us[16]; } pun = {fVec};
432        return pun.us[k&15];
433    }
434
435    AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
436        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
437                            _mm_andnot_si128(fVec, e.fVec));
438    }
439
440    __m128i fVec;
441};
442
443#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_AVX2
444
445    template <>
446    class SkNx<8, uint8_t> {
447    public:
448        AI SkNx(const __m128i& vec) : fVec(vec) {}
449
450        AI SkNx() {}
451        AI SkNx(uint8_t v) : fVec(_mm_set1_epi8(v)) {}
452        AI SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
453                uint8_t e, uint8_t f, uint8_t g, uint8_t h)
454            : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, 0,0,0,0, 0,0,0,0)) {}
455
456
457        AI static SkNx Load(const void* ptr) { return _mm_loadl_epi64((const __m128i*)ptr); }
458        AI void store(void* ptr) const { _mm_storel_epi64((__m128i*)ptr, fVec); }
459
460        AI uint8_t operator[](int k) const {
461            SkASSERT(0 <= k && k < 8);
462            union { __m128i v; uint8_t us[16]; } pun = {fVec};
463            return pun.us[k&7];
464        }
465
466        __m128i fVec;
467    };
468
469    template <>
470    class SkNx<8, int32_t> {
471    public:
472        AI SkNx(const __m256i& vec) : fVec(vec) {}
473
474        AI SkNx() {}
475        AI SkNx(int32_t v) : fVec(_mm256_set1_epi32(v)) {}
476        AI SkNx(int32_t a, int32_t b, int32_t c, int32_t d,
477                int32_t e, int32_t f, int32_t g, int32_t h)
478            : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
479
480        AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
481        AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
482
483        AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
484        AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
485        AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
486
487        AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
488        AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
489        AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
490
491        AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
492        AI SkNx operator >> (int bits) const { return _mm256_srai_epi32(fVec, bits); }
493
494        AI int32_t operator[](int k) const {
495            SkASSERT(0 <= k && k < 8);
496            union { __m256i v; int32_t is[8]; } pun = {fVec};
497            return pun.is[k&7];
498        }
499
500        __m256i fVec;
501    };
502
503    template <>
504    class SkNx<8, uint32_t> {
505    public:
506        AI SkNx(const __m256i& vec) : fVec(vec) {}
507
508        AI SkNx() {}
509        AI SkNx(uint32_t v) : fVec(_mm256_set1_epi32(v)) {}
510        AI SkNx(uint32_t a, uint32_t b, uint32_t c, uint32_t d,
511                uint32_t e, uint32_t f, uint32_t g, uint32_t h)
512            : fVec(_mm256_setr_epi32(a,b,c,d, e,f,g,h)) {}
513
514        AI static SkNx Load(const void* ptr) { return _mm256_loadu_si256((const __m256i*)ptr); }
515        AI void store(void* ptr) const { _mm256_storeu_si256((__m256i*)ptr, fVec); }
516
517        AI SkNx operator + (const SkNx& o) const { return _mm256_add_epi32(fVec, o.fVec); }
518        AI SkNx operator - (const SkNx& o) const { return _mm256_sub_epi32(fVec, o.fVec); }
519        AI SkNx operator * (const SkNx& o) const { return _mm256_mullo_epi32(fVec, o.fVec); }
520
521        AI SkNx operator & (const SkNx& o) const { return _mm256_and_si256(fVec, o.fVec); }
522        AI SkNx operator | (const SkNx& o) const { return _mm256_or_si256(fVec, o.fVec); }
523        AI SkNx operator ^ (const SkNx& o) const { return _mm256_xor_si256(fVec, o.fVec); }
524
525        AI SkNx operator << (int bits) const { return _mm256_slli_epi32(fVec, bits); }
526        AI SkNx operator >> (int bits) const { return _mm256_srli_epi32(fVec, bits); }
527
528        AI uint32_t operator[](int k) const {
529            SkASSERT(0 <= k && k < 8);
530            union { __m256i v; uint32_t us[8]; } pun = {fVec};
531            return pun.us[k&7];
532        }
533
534        __m256i fVec;
535    };
536
537    // _mm256_unpack{lo,hi}_pd() auto-casting to and from __m256d.
538    AI static __m256 unpacklo_pd(__m256 x, __m256 y) {
539        return _mm256_castpd_ps(_mm256_unpacklo_pd(_mm256_castps_pd(x), _mm256_castps_pd(y)));
540    }
541    AI static __m256 unpackhi_pd(__m256 x, __m256 y) {
542        return _mm256_castpd_ps(_mm256_unpackhi_pd(_mm256_castps_pd(x), _mm256_castps_pd(y)));
543    }
544
545    template <>
546    class SkNx<8, float> {
547    public:
548        AI SkNx(const __m256& vec) : fVec(vec) {}
549
550        AI SkNx() {}
551        AI SkNx(float val) : fVec(_mm256_set1_ps(val)) {}
552        AI SkNx(float a, float b, float c, float d,
553                float e, float f, float g, float h) : fVec(_mm256_setr_ps(a,b,c,d,e,f,g,h)) {}
554
555        AI static SkNx Load(const void* ptr) { return _mm256_loadu_ps((const float*)ptr); }
556        AI void store(void* ptr) const { _mm256_storeu_ps((float*)ptr, fVec); }
557
558        AI static void Store4(void* ptr,
559                              const SkNx& r, const SkNx& g, const SkNx& b, const SkNx& a) {
560            __m256 rg0145 = _mm256_unpacklo_ps(r.fVec, g.fVec),  // r0 g0 r1 g1 | r4 g4 r5 g5
561                   rg2367 = _mm256_unpackhi_ps(r.fVec, g.fVec),  // r2 ...      | r6 ...
562                   ba0145 = _mm256_unpacklo_ps(b.fVec, a.fVec),  // b0 a0 b1 a1 | b4 a4 b5 a5
563                   ba2367 = _mm256_unpackhi_ps(b.fVec, a.fVec);  // b2 ...      | b6 ...
564
565            __m256 _04 = unpacklo_pd(rg0145, ba0145),  // r0 g0 b0 a0 | r4 g4 b4 a4
566                   _15 = unpackhi_pd(rg0145, ba0145),  // r1 ...      | r5 ...
567                   _26 = unpacklo_pd(rg2367, ba2367),  // r2 ...      | r6 ...
568                   _37 = unpackhi_pd(rg2367, ba2367);  // r3 ...      | r7 ...
569
570            __m256 _01 = _mm256_permute2f128_ps(_04, _15, 16),  // 16 == 010 000 == lo, lo
571                   _23 = _mm256_permute2f128_ps(_26, _37, 16),
572                   _45 = _mm256_permute2f128_ps(_04, _15, 25),  // 25 == 011 001 == hi, hi
573                   _67 = _mm256_permute2f128_ps(_26, _37, 25);
574
575            _mm256_storeu_ps((float*)ptr + 0*8, _01);
576            _mm256_storeu_ps((float*)ptr + 1*8, _23);
577            _mm256_storeu_ps((float*)ptr + 2*8, _45);
578            _mm256_storeu_ps((float*)ptr + 3*8, _67);
579        }
580
581        AI SkNx operator+(const SkNx& o) const { return _mm256_add_ps(fVec, o.fVec); }
582        AI SkNx operator-(const SkNx& o) const { return _mm256_sub_ps(fVec, o.fVec); }
583        AI SkNx operator*(const SkNx& o) const { return _mm256_mul_ps(fVec, o.fVec); }
584        AI SkNx operator/(const SkNx& o) const { return _mm256_div_ps(fVec, o.fVec); }
585
586        AI SkNx operator==(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_EQ_OQ); }
587        AI SkNx operator!=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_NEQ_OQ); }
588        AI SkNx operator <(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LT_OQ); }
589        AI SkNx operator >(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GT_OQ); }
590        AI SkNx operator<=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_LE_OQ); }
591        AI SkNx operator>=(const SkNx& o) const { return _mm256_cmp_ps(fVec, o.fVec, _CMP_GE_OQ); }
592
593        AI static SkNx Min(const SkNx& l, const SkNx& r) { return _mm256_min_ps(l.fVec, r.fVec); }
594        AI static SkNx Max(const SkNx& l, const SkNx& r) { return _mm256_max_ps(l.fVec, r.fVec); }
595
596        AI SkNx   sqrt() const { return _mm256_sqrt_ps (fVec); }
597        AI SkNx  rsqrt() const { return _mm256_rsqrt_ps(fVec); }
598        AI SkNx invert() const { return _mm256_rcp_ps  (fVec); }
599
600        AI SkNx abs() const { return _mm256_andnot_ps(_mm256_set1_ps(-0.0f), fVec); }
601        AI SkNx floor() const { return _mm256_floor_ps(fVec); }
602
603        AI float operator[](int k) const {
604            SkASSERT(0 <= k && k < 8);
605            union { __m256 v; float fs[8]; } pun = {fVec};
606            return pun.fs[k&7];
607        }
608
609        AI SkNx thenElse(const SkNx& t, const SkNx& e) const {
610            return _mm256_blendv_ps(e.fVec, t.fVec, fVec);
611        }
612
613        __m256 fVec;
614    };
615
616    AI static void SkNx_split(const Sk8f& v, Sk4f* lo, Sk4f* hi) {
617        *lo = _mm256_extractf128_ps(v.fVec, 0);
618        *hi = _mm256_extractf128_ps(v.fVec, 1);
619    }
620
621    AI static Sk8f SkNx_join(const Sk4f& lo, const Sk4f& hi) {
622        return _mm256_insertf128_ps(_mm256_castps128_ps256(lo.fVec), hi.fVec, 1);
623    }
624
625    AI static Sk8f SkNx_fma(const Sk8f& a, const Sk8f& b, const Sk8f& c) {
626        return _mm256_fmadd_ps(a.fVec, b.fVec, c.fVec);
627    }
628
629    template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8b& src) {
630        return _mm256_cvtepu8_epi32(src.fVec);
631    }
632
633    template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8b& src) {
634        return _mm256_cvtepi32_ps(SkNx_cast<int>(src).fVec);
635    }
636
637    template<> AI /*static*/ Sk8f SkNx_cast<float>(const Sk8i& src) {
638        return _mm256_cvtepi32_ps(src.fVec);
639    }
640
641    template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8f& src) {
642        return _mm256_cvttps_epi32(src.fVec);
643    }
644
645    template<> AI /*static*/ Sk8i SkNx_cast<int>(const Sk8h& src) {
646        return _mm256_cvtepu16_epi32(src.fVec);
647    }
648    template<> AI /*static*/ Sk8h SkNx_cast<uint16_t>(const Sk8i& src) {
649        __m128i lo = _mm256_extractf128_si256(src.fVec, 0),
650                hi = _mm256_extractf128_si256(src.fVec, 1);
651        return _mm_packus_epi32(lo, hi);
652    }
653    template<> AI /*static*/ Sk8b SkNx_cast<uint8_t>(const Sk8i& src) {
654        auto _16 = SkNx_cast<uint16_t>(src);
655        return _mm_packus_epi16(_16.fVec, _16.fVec);
656    }
657
658#endif
659
660template<> AI /*static*/ Sk4f SkNx_cast<float, int32_t>(const Sk4i& src) {
661    return _mm_cvtepi32_ps(src.fVec);
662}
663template<> AI /*static*/ Sk4f SkNx_cast<float, uint32_t>(const Sk4u& src) {
664    return SkNx_cast<float>(Sk4i::Load(&src));
665}
666
667template <> AI /*static*/ Sk4i SkNx_cast<int32_t, float>(const Sk4f& src) {
668    return _mm_cvttps_epi32(src.fVec);
669}
670
671template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, int32_t>(const Sk4i& src) {
672#if 0 && SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSE41
673    // TODO: This seems to be causing code generation problems.   Investigate?
674    return _mm_packus_epi32(src.fVec);
675#elif SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
676    // With SSSE3, we can just shuffle the low 2 bytes from each lane right into place.
677    const int _ = ~0;
678    return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,1, 4,5, 8,9, 12,13, _,_,_,_,_,_,_,_));
679#else
680    // With SSE2, we have to sign extend our input, making _mm_packs_epi32 do the pack we want.
681    __m128i x = _mm_srai_epi32(_mm_slli_epi32(src.fVec, 16), 16);
682    return _mm_packs_epi32(x,x);
683#endif
684}
685
686template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, float>(const Sk4f& src) {
687    return SkNx_cast<uint16_t>(SkNx_cast<int32_t>(src));
688}
689
690template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, float>(const Sk4f& src) {
691    auto _32 = _mm_cvttps_epi32(src.fVec);
692#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
693    const int _ = ~0;
694    return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
695#else
696    auto _16 = _mm_packus_epi16(_32, _32);
697    return     _mm_packus_epi16(_16, _16);
698#endif
699}
700
701template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint8_t>(const Sk4b& src) {
702#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
703    const int _ = ~0;
704    return _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
705#else
706    auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
707    return _mm_unpacklo_epi16(_16, _mm_setzero_si128());
708#endif
709}
710
711template<> AI /*static*/ Sk4f SkNx_cast<float, uint8_t>(const Sk4b& src) {
712    return _mm_cvtepi32_ps(SkNx_cast<int32_t>(src).fVec);
713}
714
715template<> AI /*static*/ Sk4f SkNx_cast<float, uint16_t>(const Sk4h& src) {
716    auto _32 = _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
717    return _mm_cvtepi32_ps(_32);
718}
719
720template<> AI /*static*/ Sk16b SkNx_cast<uint8_t, float>(const Sk16f& src) {
721    Sk8f ab, cd;
722    SkNx_split(src, &ab, &cd);
723
724    Sk4f a,b,c,d;
725    SkNx_split(ab, &a, &b);
726    SkNx_split(cd, &c, &d);
727
728    return _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
729                                             _mm_cvttps_epi32(b.fVec)),
730                            _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
731                                             _mm_cvttps_epi32(d.fVec)));
732}
733
734template<> AI /*static*/ Sk4h SkNx_cast<uint16_t, uint8_t>(const Sk4b& src) {
735    return _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128());
736}
737
738template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, uint16_t>(const Sk4h& src) {
739    return _mm_packus_epi16(src.fVec, src.fVec);
740}
741
742template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint16_t>(const Sk4h& src) {
743    return _mm_unpacklo_epi16(src.fVec, _mm_setzero_si128());
744}
745
746template<> AI /*static*/ Sk4b SkNx_cast<uint8_t, int32_t>(const Sk4i& src) {
747    return _mm_packus_epi16(_mm_packus_epi16(src.fVec, src.fVec), src.fVec);
748}
749
750template<> AI /*static*/ Sk4i SkNx_cast<int32_t, uint32_t>(const Sk4u& src) {
751    return src.fVec;
752}
753
754AI static Sk4i Sk4f_round(const Sk4f& x) {
755    return _mm_cvtps_epi32(x.fVec);
756}
757
758}  // namespace
759
760#endif//SkNx_sse_DEFINED
761