SkNx_sse.h revision c33065a93ad0874672c4c66b9711aa0b3ef7b7e7
1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef SkNx_sse_DEFINED
9#define SkNx_sse_DEFINED
10
11// This file may assume <= SSE2, but must check SK_CPU_SSE_LEVEL for anything more recent.
12
13#define SKNX_IS_FAST
14
15namespace {  // See SkNx.h
16
17
18template <>
19class SkNx<2, float> {
20public:
21    SkNx(const __m128& vec) : fVec(vec) {}
22
23    SkNx() {}
24    SkNx(float val) : fVec(_mm_set1_ps(val)) {}
25    static SkNx Load(const float vals[2]) {
26        return _mm_castsi128_ps(_mm_loadl_epi64((const __m128i*)vals));
27    }
28    SkNx(float a, float b) : fVec(_mm_setr_ps(a,b,0,0)) {}
29
30    void store(float vals[2]) const { _mm_storel_pi((__m64*)vals, fVec); }
31
32    SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
33    SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
34    SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
35    SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
36
37    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
38    SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
39    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
40    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
41    SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
42    SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
43
44    static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
45    static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
46
47    SkNx  sqrt() const { return _mm_sqrt_ps (fVec);  }
48    SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
49    SkNx rsqrt1() const { return this->rsqrt0(); }
50    SkNx rsqrt2() const { return this->rsqrt1(); }
51
52    SkNx       invert() const { return SkNx(1) / *this; }
53    SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
54
55    template <int k> float kth() const {
56        SkASSERT(0 <= k && k < 2);
57        union { __m128 v; float fs[4]; } pun = {fVec};
58        return pun.fs[k&1];
59    }
60
61    bool allTrue() const { return 0xff == (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
62    bool anyTrue() const { return 0x00 != (_mm_movemask_epi8(_mm_castps_si128(fVec)) & 0xff); }
63
64    __m128 fVec;
65};
66
67template <>
68class SkNx<2, double> {
69public:
70    SkNx(const __m128d& vec) : fVec(vec) {}
71
72    SkNx() {}
73    SkNx(double val) : fVec(_mm_set1_pd(val)) {}
74    static SkNx Load(const double vals[2]) { return _mm_loadu_pd(vals); }
75    SkNx(double a, double b) : fVec(_mm_setr_pd(a,b)) {}
76
77    void store(double vals[2]) const { _mm_storeu_pd(vals, fVec); }
78
79    SkNx operator + (const SkNx& o) const { return _mm_add_pd(fVec, o.fVec); }
80    SkNx operator - (const SkNx& o) const { return _mm_sub_pd(fVec, o.fVec); }
81    SkNx operator * (const SkNx& o) const { return _mm_mul_pd(fVec, o.fVec); }
82    SkNx operator / (const SkNx& o) const { return _mm_div_pd(fVec, o.fVec); }
83
84    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_pd (fVec, o.fVec); }
85    SkNx operator != (const SkNx& o) const { return _mm_cmpneq_pd(fVec, o.fVec); }
86    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_pd (fVec, o.fVec); }
87    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_pd (fVec, o.fVec); }
88    SkNx operator <= (const SkNx& o) const { return _mm_cmple_pd (fVec, o.fVec); }
89    SkNx operator >= (const SkNx& o) const { return _mm_cmpge_pd (fVec, o.fVec); }
90
91    static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_pd(l.fVec, r.fVec); }
92    static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_pd(l.fVec, r.fVec); }
93
94    SkNx sqrt() const { return _mm_sqrt_pd(fVec);  }
95
96    template <int k> double kth() const {
97        SkASSERT(0 <= k && k < 2);
98        union { __m128d v; double fs[2]; } pun = {fVec};
99        return pun.fs[k&1];
100    }
101
102    bool allTrue() const { return 0x3 == _mm_movemask_pd(fVec); }
103    bool anyTrue() const { return 0x0 != _mm_movemask_pd(fVec); }
104
105    SkNx thenElse(const SkNx& t, const SkNx& e) const {
106        return _mm_or_pd(_mm_and_pd   (fVec, t.fVec),
107                         _mm_andnot_pd(fVec, e.fVec));
108    }
109
110    __m128d fVec;
111};
112
113template <>
114class SkNx<4, int> {
115public:
116    SkNx(const __m128i& vec) : fVec(vec) {}
117
118    SkNx() {}
119    SkNx(int val) : fVec(_mm_set1_epi32(val)) {}
120    static SkNx Load(const int vals[4]) { return _mm_loadu_si128((const __m128i*)vals); }
121    SkNx(int a, int b, int c, int d) : fVec(_mm_setr_epi32(a,b,c,d)) {}
122
123    void store(int vals[4]) const { _mm_storeu_si128((__m128i*)vals, fVec); }
124
125    SkNx operator + (const SkNx& o) const { return _mm_add_epi32(fVec, o.fVec); }
126    SkNx operator - (const SkNx& o) const { return _mm_sub_epi32(fVec, o.fVec); }
127    SkNx operator * (const SkNx& o) const {
128        __m128i mul20 = _mm_mul_epu32(fVec, o.fVec),
129                mul31 = _mm_mul_epu32(_mm_srli_si128(fVec, 4), _mm_srli_si128(o.fVec, 4));
130        return _mm_unpacklo_epi32(_mm_shuffle_epi32(mul20, _MM_SHUFFLE(0,0,2,0)),
131                                  _mm_shuffle_epi32(mul31, _MM_SHUFFLE(0,0,2,0)));
132    }
133
134    SkNx operator << (int bits) const { return _mm_slli_epi32(fVec, bits); }
135    SkNx operator >> (int bits) const { return _mm_srai_epi32(fVec, bits); }
136
137    template <int k> int kth() const {
138        SkASSERT(0 <= k && k < 4);
139        switch (k) {
140            case 0: return _mm_cvtsi128_si32(fVec);
141            case 1: return _mm_cvtsi128_si32(_mm_srli_si128(fVec,  4));
142            case 2: return _mm_cvtsi128_si32(_mm_srli_si128(fVec,  8));
143            case 3: return _mm_cvtsi128_si32(_mm_srli_si128(fVec, 12));
144            default: SkASSERT(false); return 0;
145        }
146    }
147
148    __m128i fVec;
149};
150
151template <>
152class SkNx<4, float> {
153public:
154    SkNx(const __m128& vec) : fVec(vec) {}
155
156    SkNx() {}
157    SkNx(float val)           : fVec( _mm_set1_ps(val) ) {}
158    static SkNx Load(const float vals[4]) { return _mm_loadu_ps(vals); }
159
160    SkNx(float a, float b, float c, float d) : fVec(_mm_setr_ps(a,b,c,d)) {}
161
162    void store(float vals[4]) const { _mm_storeu_ps(vals, fVec); }
163
164    SkNx operator + (const SkNx& o) const { return _mm_add_ps(fVec, o.fVec); }
165    SkNx operator - (const SkNx& o) const { return _mm_sub_ps(fVec, o.fVec); }
166    SkNx operator * (const SkNx& o) const { return _mm_mul_ps(fVec, o.fVec); }
167    SkNx operator / (const SkNx& o) const { return _mm_div_ps(fVec, o.fVec); }
168
169    SkNx operator == (const SkNx& o) const { return _mm_cmpeq_ps (fVec, o.fVec); }
170    SkNx operator != (const SkNx& o) const { return _mm_cmpneq_ps(fVec, o.fVec); }
171    SkNx operator  < (const SkNx& o) const { return _mm_cmplt_ps (fVec, o.fVec); }
172    SkNx operator  > (const SkNx& o) const { return _mm_cmpgt_ps (fVec, o.fVec); }
173    SkNx operator <= (const SkNx& o) const { return _mm_cmple_ps (fVec, o.fVec); }
174    SkNx operator >= (const SkNx& o) const { return _mm_cmpge_ps (fVec, o.fVec); }
175
176    static SkNx Min(const SkNx& l, const SkNx& r) { return _mm_min_ps(l.fVec, r.fVec); }
177    static SkNx Max(const SkNx& l, const SkNx& r) { return _mm_max_ps(l.fVec, r.fVec); }
178
179    SkNx abs() const { return _mm_andnot_ps(_mm_set1_ps(-0.0f), fVec); }
180
181    SkNx  sqrt() const { return _mm_sqrt_ps (fVec);  }
182    SkNx rsqrt0() const { return _mm_rsqrt_ps(fVec); }
183    SkNx rsqrt1() const { return this->rsqrt0(); }
184    SkNx rsqrt2() const { return this->rsqrt1(); }
185
186    SkNx       invert() const { return SkNx(1) / *this; }
187    SkNx approxInvert() const { return _mm_rcp_ps(fVec); }
188
189    template <int k> float kth() const {
190        SkASSERT(0 <= k && k < 4);
191        union { __m128 v; float fs[4]; } pun = {fVec};
192        return pun.fs[k&3];
193    }
194
195    bool allTrue() const { return 0xffff == _mm_movemask_epi8(_mm_castps_si128(fVec)); }
196    bool anyTrue() const { return 0x0000 != _mm_movemask_epi8(_mm_castps_si128(fVec)); }
197
198    SkNx thenElse(const SkNx& t, const SkNx& e) const {
199        return _mm_or_ps(_mm_and_ps   (fVec, t.fVec),
200                         _mm_andnot_ps(fVec, e.fVec));
201    }
202
203    __m128 fVec;
204};
205
206template <>
207class SkNx<4, uint16_t> {
208public:
209    SkNx(const __m128i& vec) : fVec(vec) {}
210
211    SkNx() {}
212    SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
213    static SkNx Load(const uint16_t vals[4]) { return _mm_loadl_epi64((const __m128i*)vals); }
214    SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d) : fVec(_mm_setr_epi16(a,b,c,d,0,0,0,0)) {}
215
216    void store(uint16_t vals[4]) const { _mm_storel_epi64((__m128i*)vals, fVec); }
217
218    SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
219    SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
220    SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
221
222    SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
223    SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
224
225    template <int k> uint16_t kth() const {
226        SkASSERT(0 <= k && k < 4);
227        return _mm_extract_epi16(fVec, k);
228    }
229
230    __m128i fVec;
231};
232
233template <>
234class SkNx<8, uint16_t> {
235public:
236    SkNx(const __m128i& vec) : fVec(vec) {}
237
238    SkNx() {}
239    SkNx(uint16_t val) : fVec(_mm_set1_epi16(val)) {}
240    static SkNx Load(const uint16_t vals[8]) { return _mm_loadu_si128((const __m128i*)vals); }
241    SkNx(uint16_t a, uint16_t b, uint16_t c, uint16_t d,
242         uint16_t e, uint16_t f, uint16_t g, uint16_t h) : fVec(_mm_setr_epi16(a,b,c,d,e,f,g,h)) {}
243
244    void store(uint16_t vals[8]) const { _mm_storeu_si128((__m128i*)vals, fVec); }
245
246    SkNx operator + (const SkNx& o) const { return _mm_add_epi16(fVec, o.fVec); }
247    SkNx operator - (const SkNx& o) const { return _mm_sub_epi16(fVec, o.fVec); }
248    SkNx operator * (const SkNx& o) const { return _mm_mullo_epi16(fVec, o.fVec); }
249
250    SkNx operator << (int bits) const { return _mm_slli_epi16(fVec, bits); }
251    SkNx operator >> (int bits) const { return _mm_srli_epi16(fVec, bits); }
252
253    static SkNx Min(const SkNx& a, const SkNx& b) {
254        // No unsigned _mm_min_epu16, so we'll shift into a space where we can use the
255        // signed version, _mm_min_epi16, then shift back.
256        const uint16_t top = 0x8000; // Keep this separate from _mm_set1_epi16 or MSVC will whine.
257        const __m128i top_8x = _mm_set1_epi16(top);
258        return _mm_add_epi8(top_8x, _mm_min_epi16(_mm_sub_epi8(a.fVec, top_8x),
259                                                  _mm_sub_epi8(b.fVec, top_8x)));
260    }
261
262    SkNx thenElse(const SkNx& t, const SkNx& e) const {
263        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
264                            _mm_andnot_si128(fVec, e.fVec));
265    }
266
267    template <int k> uint16_t kth() const {
268        SkASSERT(0 <= k && k < 8);
269        return _mm_extract_epi16(fVec, k);
270    }
271
272    __m128i fVec;
273};
274
275template <>
276class SkNx<4, uint8_t> {
277public:
278    SkNx(const __m128i& vec) : fVec(vec) {}
279
280    SkNx() {}
281    static SkNx Load(const uint8_t vals[4]) { return _mm_cvtsi32_si128(*(const int*)vals); }
282    void store(uint8_t vals[4]) const { *(int*)vals = _mm_cvtsi128_si32(fVec); }
283
284    // TODO as needed
285
286    __m128i fVec;
287};
288
289template <>
290class SkNx<8, uint8_t> {
291public:
292    SkNx(const __m128i& vec) : fVec(vec) {}
293
294    SkNx() {}
295    static SkNx Load(const uint8_t vals[8]) { return _mm_loadl_epi64((const __m128i*)vals); }
296    void store(uint8_t vals[8]) const { _mm_storel_epi64((__m128i*)vals, fVec); }
297
298    // TODO as needed
299
300    __m128i fVec;
301};
302
303template <>
304class SkNx<16, uint8_t> {
305public:
306    SkNx(const __m128i& vec) : fVec(vec) {}
307
308    SkNx() {}
309    SkNx(uint8_t val) : fVec(_mm_set1_epi8(val)) {}
310    static SkNx Load(const uint8_t vals[16]) { return _mm_loadu_si128((const __m128i*)vals); }
311    SkNx(uint8_t a, uint8_t b, uint8_t c, uint8_t d,
312         uint8_t e, uint8_t f, uint8_t g, uint8_t h,
313         uint8_t i, uint8_t j, uint8_t k, uint8_t l,
314         uint8_t m, uint8_t n, uint8_t o, uint8_t p)
315        : fVec(_mm_setr_epi8(a,b,c,d, e,f,g,h, i,j,k,l, m,n,o,p)) {}
316
317    void store(uint8_t vals[16]) const { _mm_storeu_si128((__m128i*)vals, fVec); }
318
319    SkNx saturatedAdd(const SkNx& o) const { return _mm_adds_epu8(fVec, o.fVec); }
320
321    SkNx operator + (const SkNx& o) const { return _mm_add_epi8(fVec, o.fVec); }
322    SkNx operator - (const SkNx& o) const { return _mm_sub_epi8(fVec, o.fVec); }
323
324    static SkNx Min(const SkNx& a, const SkNx& b) { return _mm_min_epu8(a.fVec, b.fVec); }
325    SkNx operator < (const SkNx& o) const {
326        // There's no unsigned _mm_cmplt_epu8, so we flip the sign bits then use a signed compare.
327        auto flip = _mm_set1_epi8(char(0x80));
328        return _mm_cmplt_epi8(_mm_xor_si128(flip, fVec), _mm_xor_si128(flip, o.fVec));
329    }
330
331    template <int k> uint8_t kth() const {
332        SkASSERT(0 <= k && k < 16);
333        // SSE4.1 would just `return _mm_extract_epi8(fVec, k)`.  We have to read 16-bits instead.
334        int pair = _mm_extract_epi16(fVec, k/2);
335        return k % 2 == 0 ? pair : (pair >> 8);
336    }
337
338    SkNx thenElse(const SkNx& t, const SkNx& e) const {
339        return _mm_or_si128(_mm_and_si128   (fVec, t.fVec),
340                            _mm_andnot_si128(fVec, e.fVec));
341    }
342
343    __m128i fVec;
344};
345
346
347template<> inline Sk4i SkNx_cast<int, float, 4>(const Sk4f& src) {
348    return _mm_cvttps_epi32(src.fVec);
349}
350
351template<> inline Sk4b SkNx_cast<uint8_t, float, 4>(const Sk4f& src) {
352    auto _32 = _mm_cvttps_epi32(src.fVec);
353#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
354    const int _ = ~0;
355    return _mm_shuffle_epi8(_32, _mm_setr_epi8(0,4,8,12, _,_,_,_, _,_,_,_, _,_,_,_));
356#else
357    auto _16 = _mm_packus_epi16(_32, _32);
358    return     _mm_packus_epi16(_16, _16);
359#endif
360}
361
362template<> inline Sk4f SkNx_cast<float, uint8_t, 4>(const Sk4b& src) {
363#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
364    const int _ = ~0;
365    auto _32 = _mm_shuffle_epi8(src.fVec, _mm_setr_epi8(0,_,_,_, 1,_,_,_, 2,_,_,_, 3,_,_,_));
366#else
367    auto _16 = _mm_unpacklo_epi8(src.fVec, _mm_setzero_si128()),
368         _32 = _mm_unpacklo_epi16(_16,     _mm_setzero_si128());
369#endif
370    return _mm_cvtepi32_ps(_32);
371}
372
373static inline void Sk4f_ToBytes(uint8_t bytes[16],
374                                const Sk4f& a, const Sk4f& b, const Sk4f& c, const Sk4f& d) {
375    _mm_storeu_si128((__m128i*)bytes,
376                     _mm_packus_epi16(_mm_packus_epi16(_mm_cvttps_epi32(a.fVec),
377                                                       _mm_cvttps_epi32(b.fVec)),
378                                      _mm_packus_epi16(_mm_cvttps_epi32(c.fVec),
379                                                       _mm_cvttps_epi32(d.fVec))));
380}
381
382
383}  // namespace
384
385#endif//SkNx_sse_DEFINED
386