1/*
2 * Copyright 2012 The Android Open Source Project
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkBitmapProcState_opts_SSSE3.h"
9#include "SkPaint.h"
10#include "SkUtils.h"
11
12/* With the exception of the compilers that don't support it, we always build the
13 * SSSE3 functions and enable the caller to determine SSSE3 support.  However for
14 * compilers that do not support SSSE3 we provide a stub implementation.
15 */
16#if SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
17
18#include <tmmintrin.h>  // SSSE3
19
20// adding anonymous namespace seemed to force gcc to inline directly the
21// instantiation, instead of creating the functions
22// S32_generic_D32_filter_DX_SSSE3<true> and
23// S32_generic_D32_filter_DX_SSSE3<false> which were then called by the
24// external functions.
25namespace {
26// In this file, variations for alpha and non alpha versions are implemented
27// with a template, as it makes the code more compact and a bit easier to
28// maintain, while making the compiler generate the same exact code as with
29// two functions that only differ by a few lines.
30
31
32// Prepare all necessary constants for a round of processing for two pixel
33// pairs.
34// @param xy is the location where the xy parameters for four pixels should be
35//           read from. It is identical in concept with argument two of
36//           S32_{opaque}_D32_filter_DX methods.
37// @param mask_3FFF vector of 32 bit constants containing 3FFF,
38//                  suitable to mask the bottom 14 bits of a XY value.
39// @param mask_000F vector of 32 bit constants containing 000F,
40//                  suitable to mask the bottom 4 bits of a XY value.
41// @param sixteen_8bit vector of 8 bit components containing the value 16.
42// @param mask_dist_select vector of 8 bit components containing the shuffling
43//                         parameters to reorder x[0-3] parameters.
44// @param all_x_result vector of 8 bit components that will contain the
45//              (4x(x3), 4x(x2), 4x(x1), 4x(x0)) upon return.
46// @param sixteen_minus_x vector of 8 bit components, containing
47//              (4x(16 - x3), 4x(16 - x2), 4x(16 - x1), 4x(16 - x0))
48inline void PrepareConstantsTwoPixelPairs(const uint32_t* xy,
49                                          const __m128i& mask_3FFF,
50                                          const __m128i& mask_000F,
51                                          const __m128i& sixteen_8bit,
52                                          const __m128i& mask_dist_select,
53                                          __m128i* all_x_result,
54                                          __m128i* sixteen_minus_x,
55                                          int* x0,
56                                          int* x1) {
57    const __m128i xx = _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
58
59    // 4 delta X
60    // (x03, x02, x01, x00)
61    const __m128i x0_wide = _mm_srli_epi32(xx, 18);
62    // (x13, x12, x11, x10)
63    const __m128i x1_wide = _mm_and_si128(xx, mask_3FFF);
64
65    _mm_storeu_si128(reinterpret_cast<__m128i *>(x0), x0_wide);
66    _mm_storeu_si128(reinterpret_cast<__m128i *>(x1), x1_wide);
67
68    __m128i all_x = _mm_and_si128(_mm_srli_epi32(xx, 14), mask_000F);
69
70    // (4x(x3), 4x(x2), 4x(x1), 4x(x0))
71    all_x = _mm_shuffle_epi8(all_x, mask_dist_select);
72
73    *all_x_result = all_x;
74    // (4x(16-x3), 4x(16-x2), 4x(16-x1), 4x(16-x0))
75    *sixteen_minus_x = _mm_sub_epi8(sixteen_8bit, all_x);
76}
77
78// Prepare all necessary constants for a round of processing for two pixel
79// pairs.
80// @param xy is the location where the xy parameters for four pixels should be
81//           read from. It is identical in concept with argument two of
82//           S32_{opaque}_D32_filter_DXDY methods.
83// @param mask_3FFF vector of 32 bit constants containing 3FFF,
84//                  suitable to mask the bottom 14 bits of a XY value.
85// @param mask_000F vector of 32 bit constants containing 000F,
86//                  suitable to mask the bottom 4 bits of a XY value.
87// @param sixteen_8bit vector of 8 bit components containing the value 16.
88// @param mask_dist_select vector of 8 bit components containing the shuffling
89//                         parameters to reorder x[0-3] parameters.
90// @param all_xy_result vector of 8 bit components that will contain the
91//              (4x(y1), 4x(y0), 4x(x1), 4x(x0)) upon return.
92// @param sixteen_minus_x vector of 8 bit components, containing
93//              (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0)).
94inline void PrepareConstantsTwoPixelPairsDXDY(const uint32_t* xy,
95                                              const __m128i& mask_3FFF,
96                                              const __m128i& mask_000F,
97                                              const __m128i& sixteen_8bit,
98                                              const __m128i& mask_dist_select,
99                                              __m128i* all_xy_result,
100                                              __m128i* sixteen_minus_xy,
101                                              int* xy0, int* xy1) {
102    const __m128i xy_wide =
103                        _mm_loadu_si128(reinterpret_cast<const __m128i *>(xy));
104
105    // (x10, y10, x00, y00)
106    __m128i xy0_wide = _mm_srli_epi32(xy_wide, 18);
107    // (y10, y00, x10, x00)
108    xy0_wide =  _mm_shuffle_epi32(xy0_wide, _MM_SHUFFLE(2, 0, 3, 1));
109    // (x11, y11, x01, y01)
110    __m128i xy1_wide = _mm_and_si128(xy_wide, mask_3FFF);
111    // (y11, y01, x11, x01)
112    xy1_wide = _mm_shuffle_epi32(xy1_wide, _MM_SHUFFLE(2, 0, 3, 1));
113
114    _mm_storeu_si128(reinterpret_cast<__m128i *>(xy0), xy0_wide);
115    _mm_storeu_si128(reinterpret_cast<__m128i *>(xy1), xy1_wide);
116
117    // (x1, y1, x0, y0)
118    __m128i all_xy = _mm_and_si128(_mm_srli_epi32(xy_wide, 14), mask_000F);
119    // (y1, y0, x1, x0)
120    all_xy = _mm_shuffle_epi32(all_xy, _MM_SHUFFLE(2, 0, 3, 1));
121    // (4x(y1), 4x(y0), 4x(x1), 4x(x0))
122    all_xy = _mm_shuffle_epi8(all_xy, mask_dist_select);
123
124    *all_xy_result = all_xy;
125    // (4x(16-y1), 4x(16-y0), 4x(16-x1), 4x(16-x0))
126    *sixteen_minus_xy = _mm_sub_epi8(sixteen_8bit, all_xy);
127}
128
129// Helper function used when processing one pixel pair.
130// @param pixel0..3 are the four input pixels
131// @param scale_x vector of 8 bit components to multiply the pixel[0:3]. This
132//                will contain (4x(x1, 16-x1), 4x(x0, 16-x0))
133//                or (4x(x3, 16-x3), 4x(x2, 16-x2))
134// @return a vector of 16 bit components containing:
135// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
136inline __m128i ProcessPixelPairHelper(uint32_t pixel0,
137                                      uint32_t pixel1,
138                                      uint32_t pixel2,
139                                      uint32_t pixel3,
140                                      const __m128i& scale_x) {
141    __m128i a0, a1, a2, a3;
142    // Load 2 pairs of pixels
143    a0 = _mm_cvtsi32_si128(pixel0);
144    a1 = _mm_cvtsi32_si128(pixel1);
145
146    // Interleave pixels.
147    // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
148    a0 = _mm_unpacklo_epi8(a0, a1);
149
150    a2 = _mm_cvtsi32_si128(pixel2);
151    a3 = _mm_cvtsi32_si128(pixel3);
152    // (0, 0, 0, 0, 0, 0, 0, 0, Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2)
153    a2 = _mm_unpacklo_epi8(a2, a3);
154
155    // two pairs of pixel pairs, interleaved.
156    // (Aa3, Aa2, Ba3, Ba2, Ga3, Ga2, Ra3, Ra2,
157    //  Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
158    a0 = _mm_unpacklo_epi64(a0, a2);
159
160    // multiply and sum to 16 bit components.
161    // (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
162    // At that point, we use up a bit less than 12 bits for each 16 bit
163    // component:
164    // All components are less than 255. So,
165    // C0 * (16 - x) + C1 * x <= 255 * (16 - x) + 255 * x = 255 * 16.
166    return _mm_maddubs_epi16(a0, scale_x);
167}
168
169// Scale back the results after multiplications to the [0:255] range, and scale
170// by alpha when has_alpha is true.
171// Depending on whether one set or two sets of multiplications had been applied,
172// the results have to be shifted by four places (dividing by 16), or shifted
173// by eight places (dividing by 256), since each multiplication is by a quantity
174// in the range [0:16].
175template<bool has_alpha, int scale>
176inline __m128i ScaleFourPixels(__m128i* pixels,
177                               const __m128i& alpha) {
178    // Divide each 16 bit component by 16 (or 256 depending on scale).
179    *pixels = _mm_srli_epi16(*pixels, scale);
180
181    if (has_alpha) {
182        // Multiply by alpha.
183        *pixels = _mm_mullo_epi16(*pixels, alpha);
184
185        // Divide each 16 bit component by 256.
186        *pixels = _mm_srli_epi16(*pixels, 8);
187    }
188    return *pixels;
189}
190
191// Wrapper to calculate two output pixels from four input pixels. The
192// arguments are the same as ProcessPixelPairHelper. Technically, there are
193// eight input pixels, but since sub_y == 0, the factors applied to half of the
194// pixels is zero (sub_y), and are therefore omitted here to save on some
195// processing.
196// @param alpha when has_alpha is true, scale all resulting components by this
197//              value.
198// @return a vector of 16 bit components containing:
199// ((Aa2 * (16 - x1) + Aa3 * x1) * alpha, ...,
200// (Ra0 * (16 - x0) + Ra1 * x0) * alpha) (when has_alpha is true)
201// otherwise
202// (Aa2 * (16 - x1) + Aa3 * x1, ... , Ra0 * (16 - x0) + Ra1 * x0)
203// In both cases, the results are renormalized (divided by 16) to match the
204// expected formats when storing back the results into memory.
205template<bool has_alpha>
206inline __m128i ProcessPixelPairZeroSubY(uint32_t pixel0,
207                                        uint32_t pixel1,
208                                        uint32_t pixel2,
209                                        uint32_t pixel3,
210                                        const __m128i& scale_x,
211                                        const __m128i& alpha) {
212    __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
213                                         scale_x);
214    return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
215}
216
217// Same as ProcessPixelPairZeroSubY, expect processing one output pixel at a
218// time instead of two. As in the above function, only two pixels are needed
219// to generate a single pixel since sub_y == 0.
220// @return same as ProcessPixelPairZeroSubY, except that only the bottom 4
221// 16 bit components are set.
222template<bool has_alpha>
223inline __m128i ProcessOnePixelZeroSubY(uint32_t pixel0,
224                                       uint32_t pixel1,
225                                       __m128i scale_x,
226                                       __m128i alpha) {
227    __m128i a0 = _mm_cvtsi32_si128(pixel0);
228    __m128i a1 = _mm_cvtsi32_si128(pixel1);
229
230    // Interleave
231    a0 = _mm_unpacklo_epi8(a0, a1);
232
233    // (a0 * (16-x) + a1 * x)
234    __m128i sum = _mm_maddubs_epi16(a0, scale_x);
235
236    return ScaleFourPixels<has_alpha, 4>(&sum, alpha);
237}
238
239// Methods when sub_y != 0
240
241
242// Same as ProcessPixelPairHelper, except that the values are scaled by y.
243// @param y vector of 16 bit components containing 'y' values. There are two
244//        cases in practice, where y will contain the sub_y constant, or will
245//        contain the 16 - sub_y constant.
246// @return vector of 16 bit components containing:
247// (y * (Aa2 * (16 - x1) + Aa3 * x1), ... , y * (Ra0 * (16 - x0) + Ra1 * x0))
248inline __m128i ProcessPixelPair(uint32_t pixel0,
249                                uint32_t pixel1,
250                                uint32_t pixel2,
251                                uint32_t pixel3,
252                                const __m128i& scale_x,
253                                const __m128i& y) {
254    __m128i sum = ProcessPixelPairHelper(pixel0, pixel1, pixel2, pixel3,
255                                         scale_x);
256
257    // first row times 16-y or y depending on whether 'y' represents one or
258    // the other.
259    // Values will be up to 255 * 16 * 16 = 65280.
260    // (y * (Aa2 * (16 - x1) + Aa3 * x1), ... ,
261    //  y * (Ra0 * (16 - x0) + Ra1 * x0))
262    sum = _mm_mullo_epi16(sum, y);
263
264    return sum;
265}
266
267// Process two pixel pairs out of eight input pixels.
268// In other methods, the distinct pixels are passed one by one, but in this
269// case, the rows, and index offsets to the pixels into the row are passed
270// to generate the 8 pixels.
271// @param row0..1 top and bottom row where to find input pixels.
272// @param x0..1 offsets into the row for all eight input pixels.
273// @param all_y vector of 16 bit components containing the constant sub_y
274// @param neg_y vector of 16 bit components containing the constant 16 - sub_y
275// @param alpha vector of 16 bit components containing the alpha value to scale
276//        the results by, when has_alpha is true.
277// @return
278// (alpha * ((16-y) * (Aa2  * (16-x1) + Aa3  * x1) +
279//             y    * (Aa2' * (16-x1) + Aa3' * x1)),
280// ...
281//  alpha * ((16-y) * (Ra0  * (16-x0) + Ra1 * x0) +
282//             y    * (Ra0' * (16-x0) + Ra1' * x0))
283// With the factor alpha removed when has_alpha is false.
284// The values are scaled back to 16 bit components, but with only the bottom
285// 8 bits being set.
286template<bool has_alpha>
287inline __m128i ProcessTwoPixelPairs(const uint32_t* row0,
288                                    const uint32_t* row1,
289                                    const int* x0,
290                                    const int* x1,
291                                    const __m128i& scale_x,
292                                    const __m128i& all_y,
293                                    const __m128i& neg_y,
294                                    const __m128i& alpha) {
295    __m128i sum0 = ProcessPixelPair(
296        row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
297        scale_x, neg_y);
298    __m128i sum1 = ProcessPixelPair(
299        row1[x0[0]], row1[x1[0]], row1[x0[1]], row1[x1[1]],
300        scale_x, all_y);
301
302    // 2 samples fully summed.
303    // ((16-y) * (Aa2 * (16-x1) + Aa3 * x1) +
304    //  y * (Aa2' * (16-x1) + Aa3' * x1),
305    // ...
306    //  (16-y) * (Ra0 * (16 - x0) + Ra1 * x0)) +
307    //  y * (Ra0' * (16-x0) + Ra1' * x0))
308    // Each component, again can be at most 256 * 255 = 65280, so no overflow.
309    sum0 = _mm_add_epi16(sum0, sum1);
310
311    return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
312}
313
314// Similar to ProcessTwoPixelPairs except the pixel indexes.
315template<bool has_alpha>
316inline __m128i ProcessTwoPixelPairsDXDY(const uint32_t* row00,
317                                        const uint32_t* row01,
318                                        const uint32_t* row10,
319                                        const uint32_t* row11,
320                                        const int* xy0,
321                                        const int* xy1,
322                                        const __m128i& scale_x,
323                                        const __m128i& all_y,
324                                        const __m128i& neg_y,
325                                        const __m128i& alpha) {
326    // first row
327    __m128i sum0 = ProcessPixelPair(
328        row00[xy0[0]], row00[xy1[0]], row10[xy0[1]], row10[xy1[1]],
329        scale_x, neg_y);
330    // second row
331    __m128i sum1 = ProcessPixelPair(
332        row01[xy0[0]], row01[xy1[0]], row11[xy0[1]], row11[xy1[1]],
333        scale_x, all_y);
334
335    // 2 samples fully summed.
336    // ((16-y1) * (Aa2 * (16-x1) + Aa3 * x1) +
337    //  y0 * (Aa2' * (16-x1) + Aa3' * x1),
338    // ...
339    //  (16-y0) * (Ra0 * (16 - x0) + Ra1 * x0)) +
340    //  y0 * (Ra0' * (16-x0) + Ra1' * x0))
341    // Each component, again can be at most 256 * 255 = 65280, so no overflow.
342    sum0 = _mm_add_epi16(sum0, sum1);
343
344    return ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
345}
346
347
348// Same as ProcessPixelPair, except that performing the math one output pixel
349// at a time. This means that only the bottom four 16 bit components are set.
350inline __m128i ProcessOnePixel(uint32_t pixel0, uint32_t pixel1,
351                               const __m128i& scale_x, const __m128i& y) {
352    __m128i a0 = _mm_cvtsi32_si128(pixel0);
353    __m128i a1 = _mm_cvtsi32_si128(pixel1);
354
355    // Interleave
356    // (0, 0, 0, 0, 0, 0, 0, 0, Aa1, Aa0, Ba1, Ba0, Ga1, Ga0, Ra1, Ra0)
357    a0 = _mm_unpacklo_epi8(a0, a1);
358
359    // (a0 * (16-x) + a1 * x)
360    a0 = _mm_maddubs_epi16(a0, scale_x);
361
362    // scale row by y
363    return _mm_mullo_epi16(a0, y);
364}
365
366// Notes about the various tricks that are used in this implementation:
367// - specialization for sub_y == 0.
368// Statistically, 1/16th of the samples will have sub_y == 0. When this
369// happens, the math goes from:
370// (16 - x)*(16 - y)*a00 + x*(16 - y)*a01 + (16 - x)*y*a10 + x*y*a11
371// to:
372// (16 - x)*a00 + 16*x*a01
373// much simpler. The simplification makes for an easy boost in performance.
374// - calculating 4 output pixels at a time.
375//  This allows loading the coefficients x0 and x1 and shuffling them to the
376// optimum location only once per loop, instead of twice per loop.
377// This also allows us to store the four pixels with a single store.
378// - Use of 2 special SSSE3 instructions (comparatively to the SSE2 instruction
379// version):
380// _mm_shuffle_epi8 : this allows us to spread the coefficients x[0-3] loaded
381// in 32 bit values to 8 bit values repeated four times.
382// _mm_maddubs_epi16 : this allows us to perform multiplications and additions
383// in one swoop of 8bit values storing the results in 16 bit values. This
384// instruction is actually crucial for the speed of the implementation since
385// as one can see in the SSE2 implementation, all inputs have to be used as
386// 16 bits because the results are 16 bits. This basically allows us to process
387// twice as many pixel components per iteration.
388//
389// As a result, this method behaves faster than the traditional SSE2. The actual
390// boost varies greatly on the underlying architecture.
391template<bool has_alpha>
392void S32_generic_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
393                                     const uint32_t* xy,
394                                     int count, uint32_t* colors) {
395    SkASSERT(count > 0 && colors != NULL);
396    SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
397    SkASSERT(kN32_SkColorType == s.fBitmap->colorType());
398    if (has_alpha) {
399        SkASSERT(s.fAlphaScale < 256);
400    } else {
401        SkASSERT(s.fAlphaScale == 256);
402    }
403
404    const uint8_t* src_addr =
405            static_cast<const uint8_t*>(s.fBitmap->getPixels());
406    const size_t rb = s.fBitmap->rowBytes();
407    const uint32_t XY = *xy++;
408    const unsigned y0 = XY >> 14;
409    const uint32_t* row0 =
410            reinterpret_cast<const uint32_t*>(src_addr + (y0 >> 4) * rb);
411    const uint32_t* row1 =
412            reinterpret_cast<const uint32_t*>(src_addr + (XY & 0x3FFF) * rb);
413    const unsigned sub_y = y0 & 0xF;
414
415    // vector constants
416    const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
417                                                  8,  8,  8,  8,
418                                                  4,  4,  4,  4,
419                                                  0,  0,  0,  0);
420    const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
421    const __m128i mask_000F = _mm_set1_epi32(0x000F);
422    const __m128i sixteen_8bit = _mm_set1_epi8(16);
423    // (0, 0, 0, 0, 0, 0, 0, 0)
424    const __m128i zero = _mm_setzero_si128();
425
426    __m128i alpha = _mm_setzero_si128();
427    if (has_alpha) {
428        // 8x(alpha)
429        alpha = _mm_set1_epi16(s.fAlphaScale);
430    }
431
432    if (sub_y == 0) {
433        // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
434        while (count > 3) {
435            count -= 4;
436
437            int x0[4];
438            int x1[4];
439            __m128i all_x, sixteen_minus_x;
440            PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
441                                          sixteen_8bit, mask_dist_select,
442                                          &all_x, &sixteen_minus_x, x0, x1);
443            xy += 4;
444
445            // First pair of pixel pairs.
446            // (4x(x1, 16-x1), 4x(x0, 16-x0))
447            __m128i scale_x;
448            scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
449
450            __m128i sum0 = ProcessPixelPairZeroSubY<has_alpha>(
451                row0[x0[0]], row0[x1[0]], row0[x0[1]], row0[x1[1]],
452                scale_x, alpha);
453
454            // second pair of pixel pairs
455            // (4x (x3, 16-x3), 4x (16-x2, x2))
456            scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
457
458            __m128i sum1 = ProcessPixelPairZeroSubY<has_alpha>(
459                row0[x0[2]], row0[x1[2]], row0[x0[3]], row0[x1[3]],
460                scale_x, alpha);
461
462            // Pack lower 4 16 bit values of sum into lower 4 bytes.
463            sum0 = _mm_packus_epi16(sum0, sum1);
464
465            // Extract low int and store.
466            _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
467
468            colors += 4;
469        }
470
471        // handle remainder
472        while (count-- > 0) {
473            uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
474            unsigned x0 = xx >> 18;
475            unsigned x1 = xx & 0x3FFF;
476
477            // 16x(x)
478            const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
479
480            // (16x(16-x))
481            __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
482
483            scale_x = _mm_unpacklo_epi8(scale_x, all_x);
484
485            __m128i sum = ProcessOnePixelZeroSubY<has_alpha>(
486                row0[x0], row0[x1],
487                scale_x, alpha);
488
489            // Pack lower 4 16 bit values of sum into lower 4 bytes.
490            sum = _mm_packus_epi16(sum, zero);
491
492            // Extract low int and store.
493            *colors++ = _mm_cvtsi128_si32(sum);
494        }
495    } else {  // more general case, y != 0
496        // 8x(16)
497        const __m128i sixteen_16bit = _mm_set1_epi16(16);
498
499        // 8x (y)
500        const __m128i all_y = _mm_set1_epi16(sub_y);
501
502        // 8x (16-y)
503        const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
504
505        // Unroll 4x, interleave bytes, use pmaddubsw (all_x is small)
506        while (count > 3) {
507            count -= 4;
508
509            int x0[4];
510            int x1[4];
511            __m128i all_x, sixteen_minus_x;
512            PrepareConstantsTwoPixelPairs(xy, mask_3FFF, mask_000F,
513                                          sixteen_8bit, mask_dist_select,
514                                          &all_x, &sixteen_minus_x, x0, x1);
515            xy += 4;
516
517            // First pair of pixel pairs
518            // (4x(x1, 16-x1), 4x(x0, 16-x0))
519            __m128i scale_x;
520            scale_x = _mm_unpacklo_epi8(sixteen_minus_x, all_x);
521
522            __m128i sum0 = ProcessTwoPixelPairs<has_alpha>(
523                row0, row1, x0, x1,
524                scale_x, all_y, neg_y, alpha);
525
526            // second pair of pixel pairs
527            // (4x (x3, 16-x3), 4x (16-x2, x2))
528            scale_x = _mm_unpackhi_epi8(sixteen_minus_x, all_x);
529
530            __m128i sum1 = ProcessTwoPixelPairs<has_alpha>(
531                row0, row1, x0 + 2, x1 + 2,
532                scale_x, all_y, neg_y, alpha);
533
534            // Do the final packing of the two results
535
536            // Pack lower 4 16 bit values of sum into lower 4 bytes.
537            sum0 = _mm_packus_epi16(sum0, sum1);
538
539            // Extract low int and store.
540            _mm_storeu_si128(reinterpret_cast<__m128i *>(colors), sum0);
541
542            colors += 4;
543        }
544
545        // Left over.
546        while (count-- > 0) {
547            const uint32_t xx = *xy++;  // x0:14 | 4 | x1:14
548            const unsigned x0 = xx >> 18;
549            const unsigned x1 = xx & 0x3FFF;
550
551            // 16x(x)
552            const __m128i all_x = _mm_set1_epi8((xx >> 14) & 0x0F);
553
554            // 16x (16-x)
555            __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
556
557            // (8x (x, 16-x))
558            scale_x = _mm_unpacklo_epi8(scale_x, all_x);
559
560            // first row.
561            __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
562            // second row.
563            __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
564
565            // Add both rows for full sample
566            sum0 = _mm_add_epi16(sum0, sum1);
567
568            sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
569
570            // Pack lower 4 16 bit values of sum into lower 4 bytes.
571            sum0 = _mm_packus_epi16(sum0, zero);
572
573            // Extract low int and store.
574            *colors++ = _mm_cvtsi128_si32(sum0);
575        }
576    }
577}
578
579/*
580 * Similar to S32_generic_D32_filter_DX_SSSE3, we do not need to handle the
581 * special case suby == 0 as suby is changing in every loop.
582 */
583template<bool has_alpha>
584void S32_generic_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
585                                       const uint32_t* xy,
586                                       int count, uint32_t* colors) {
587    SkASSERT(count > 0 && colors != NULL);
588    SkASSERT(s.fFilterLevel != SkPaint::kNone_FilterLevel);
589    SkASSERT(kN32_SkColorType == s.fBitmap->colorType());
590    if (has_alpha) {
591        SkASSERT(s.fAlphaScale < 256);
592    } else {
593        SkASSERT(s.fAlphaScale == 256);
594    }
595
596    const uint8_t* src_addr =
597                        static_cast<const uint8_t*>(s.fBitmap->getPixels());
598    const size_t rb = s.fBitmap->rowBytes();
599
600    // vector constants
601    const __m128i mask_dist_select = _mm_set_epi8(12, 12, 12, 12,
602                                                  8,  8,  8,  8,
603                                                  4,  4,  4,  4,
604                                                  0,  0,  0,  0);
605    const __m128i mask_3FFF = _mm_set1_epi32(0x3FFF);
606    const __m128i mask_000F = _mm_set1_epi32(0x000F);
607    const __m128i sixteen_8bit = _mm_set1_epi8(16);
608
609    __m128i alpha;
610    if (has_alpha) {
611        // 8x(alpha)
612        alpha = _mm_set1_epi16(s.fAlphaScale);
613    }
614
615    // Unroll 2x, interleave bytes, use pmaddubsw (all_x is small)
616    while (count >= 2) {
617        int xy0[4];
618        int xy1[4];
619        __m128i all_xy, sixteen_minus_xy;
620        PrepareConstantsTwoPixelPairsDXDY(xy, mask_3FFF, mask_000F,
621                                          sixteen_8bit, mask_dist_select,
622                                         &all_xy, &sixteen_minus_xy, xy0, xy1);
623
624        // (4x(x1, 16-x1), 4x(x0, 16-x0))
625        __m128i scale_x = _mm_unpacklo_epi8(sixteen_minus_xy, all_xy);
626        // (4x(0, y1), 4x(0, y0))
627        __m128i all_y = _mm_unpackhi_epi8(all_xy, _mm_setzero_si128());
628        __m128i neg_y = _mm_sub_epi16(_mm_set1_epi16(16), all_y);
629
630        const uint32_t* row00 =
631                    reinterpret_cast<const uint32_t*>(src_addr + xy0[2] * rb);
632        const uint32_t* row01 =
633                    reinterpret_cast<const uint32_t*>(src_addr + xy1[2] * rb);
634        const uint32_t* row10 =
635                    reinterpret_cast<const uint32_t*>(src_addr + xy0[3] * rb);
636        const uint32_t* row11 =
637                    reinterpret_cast<const uint32_t*>(src_addr + xy1[3] * rb);
638
639        __m128i sum0 = ProcessTwoPixelPairsDXDY<has_alpha>(
640                                        row00, row01, row10, row11, xy0, xy1,
641                                        scale_x, all_y, neg_y, alpha);
642
643        // Pack lower 4 16 bit values of sum into lower 4 bytes.
644        sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
645
646        // Extract low int and store.
647        _mm_storel_epi64(reinterpret_cast<__m128i *>(colors), sum0);
648
649        xy += 4;
650        colors += 2;
651        count -= 2;
652    }
653
654    // Handle the remainder
655    while (count-- > 0) {
656        uint32_t data = *xy++;
657        unsigned y0 = data >> 14;
658        unsigned y1 = data & 0x3FFF;
659        unsigned subY = y0 & 0xF;
660        y0 >>= 4;
661
662        data = *xy++;
663        unsigned x0 = data >> 14;
664        unsigned x1 = data & 0x3FFF;
665        unsigned subX = x0 & 0xF;
666        x0 >>= 4;
667
668        const uint32_t* row0 =
669                        reinterpret_cast<const uint32_t*>(src_addr + y0 * rb);
670        const uint32_t* row1 =
671                        reinterpret_cast<const uint32_t*>(src_addr + y1 * rb);
672
673        // 16x(x)
674        const __m128i all_x = _mm_set1_epi8(subX);
675
676        // 16x (16-x)
677        __m128i scale_x = _mm_sub_epi8(sixteen_8bit, all_x);
678
679        // (8x (x, 16-x))
680        scale_x = _mm_unpacklo_epi8(scale_x, all_x);
681
682        // 8x(16)
683        const __m128i sixteen_16bit = _mm_set1_epi16(16);
684
685        // 8x (y)
686        const __m128i all_y = _mm_set1_epi16(subY);
687
688        // 8x (16-y)
689        const __m128i neg_y = _mm_sub_epi16(sixteen_16bit, all_y);
690
691        // first row.
692        __m128i sum0 = ProcessOnePixel(row0[x0], row0[x1], scale_x, neg_y);
693        // second row.
694        __m128i sum1 = ProcessOnePixel(row1[x0], row1[x1], scale_x, all_y);
695
696        // Add both rows for full sample
697        sum0 = _mm_add_epi16(sum0, sum1);
698
699        sum0 = ScaleFourPixels<has_alpha, 8>(&sum0, alpha);
700
701        // Pack lower 4 16 bit values of sum into lower 4 bytes.
702        sum0 = _mm_packus_epi16(sum0, _mm_setzero_si128());
703
704        // Extract low int and store.
705        *colors++ = _mm_cvtsi128_si32(sum0);
706    }
707}
708}  // namespace
709
710void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
711                                    const uint32_t* xy,
712                                    int count, uint32_t* colors) {
713    S32_generic_D32_filter_DX_SSSE3<false>(s, xy, count, colors);
714}
715
716void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
717                                   const uint32_t* xy,
718                                   int count, uint32_t* colors) {
719    S32_generic_D32_filter_DX_SSSE3<true>(s, xy, count, colors);
720}
721
722void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
723                                    const uint32_t* xy,
724                                    int count, uint32_t* colors) {
725    S32_generic_D32_filter_DXDY_SSSE3<false>(s, xy, count, colors);
726}
727
728void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
729                                   const uint32_t* xy,
730                                   int count, uint32_t* colors) {
731    S32_generic_D32_filter_DXDY_SSSE3<true>(s, xy, count, colors);
732}
733
734#else // SK_CPU_SSE_LEVEL >= SK_CPU_SSE_LEVEL_SSSE3
735
736void S32_opaque_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
737                                    const uint32_t* xy,
738                                    int count, uint32_t* colors) {
739    sk_throw();
740}
741
742void S32_alpha_D32_filter_DX_SSSE3(const SkBitmapProcState& s,
743                                   const uint32_t* xy,
744                                   int count, uint32_t* colors) {
745    sk_throw();
746}
747
748void S32_opaque_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
749                                    const uint32_t* xy,
750                                    int count, uint32_t* colors) {
751    sk_throw();
752}
753
754void S32_alpha_D32_filter_DXDY_SSSE3(const SkBitmapProcState& s,
755                                   const uint32_t* xy,
756                                   int count, uint32_t* colors) {
757    sk_throw();
758}
759
760#endif
761