1/*
2 * Copyright 2013 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "SkBitmapProcState.h"
9#include "SkBitmap.h"
10#include "SkColor.h"
11#include "SkColorPriv.h"
12#include "SkUnPreMultiply.h"
13#include "SkShader.h"
14#include "SkConvolver.h"
15
16#include "SkBitmapFilter_opts_SSE2.h"
17
18#include <emmintrin.h>
19
20#if 0
21static inline void print128i(__m128i value) {
22    int *v = (int*) &value;
23    printf("% .11d % .11d % .11d % .11d\n", v[0], v[1], v[2], v[3]);
24}
25
26static inline void print128i_16(__m128i value) {
27    short *v = (short*) &value;
28    printf("% .5d % .5d % .5d % .5d % .5d % .5d % .5d % .5d\n", v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7]);
29}
30
31static inline void print128i_8(__m128i value) {
32    unsigned char *v = (unsigned char*) &value;
33    printf("%.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u %.3u\n",
34           v[0], v[1], v[2], v[3], v[4], v[5], v[6], v[7],
35           v[8], v[9], v[10], v[11], v[12], v[13], v[14], v[15]
36           );
37}
38
39static inline void print128f(__m128 value) {
40    float *f = (float*) &value;
41    printf("%3.4f %3.4f %3.4f %3.4f\n", f[0], f[1], f[2], f[3]);
42}
43#endif
44
45// because the border is handled specially, this is guaranteed to have all 16 pixels
46// available to it without running off the bitmap's edge.
47
48void highQualityFilter_SSE2(const SkBitmapProcState& s, int x, int y,
49                            SkPMColor* SK_RESTRICT colors, int count) {
50
51    const int maxX = s.fBitmap->width() - 1;
52    const int maxY = s.fBitmap->height() - 1;
53
54    while (count-- > 0) {
55        SkPoint srcPt;
56        s.fInvProc(s.fInvMatrix, SkIntToScalar(x),
57                    SkIntToScalar(y), &srcPt);
58        srcPt.fX -= SK_ScalarHalf;
59        srcPt.fY -= SK_ScalarHalf;
60
61        int sx = SkScalarFloorToInt(srcPt.fX);
62        int sy = SkScalarFloorToInt(srcPt.fY);
63
64        __m128 weight = _mm_setzero_ps();
65        __m128 accum = _mm_setzero_ps();
66
67        int y0 = SkTMax(0, int(ceil(sy-s.getBitmapFilter()->width() + 0.5f)));
68        int y1 = SkTMin(maxY, int(floor(sy+s.getBitmapFilter()->width() + 0.5f)));
69        int x0 = SkTMax(0, int(ceil(sx-s.getBitmapFilter()->width() + 0.5f)));
70        int x1 = SkTMin(maxX, int(floor(sx+s.getBitmapFilter()->width() + 0.5f)));
71
72        for (int src_y = y0; src_y <= y1; src_y++) {
73            float yweight = SkScalarToFloat(s.getBitmapFilter()->lookupScalar(srcPt.fY - src_y));
74
75            for (int src_x = x0; src_x <= x1 ; src_x++) {
76                float xweight = SkScalarToFloat(s.getBitmapFilter()->lookupScalar(srcPt.fX - src_x));
77
78                float combined_weight = xweight * yweight;
79
80                SkPMColor color = *s.fBitmap->getAddr32(src_x, src_y);
81
82                __m128i c = _mm_cvtsi32_si128( color );
83                c = _mm_unpacklo_epi8(c, _mm_setzero_si128());
84                c = _mm_unpacklo_epi16(c, _mm_setzero_si128());
85
86                __m128 cfloat = _mm_cvtepi32_ps( c );
87
88                __m128 weightVector = _mm_set1_ps(combined_weight);
89
90                accum = _mm_add_ps(accum, _mm_mul_ps(cfloat, weightVector));
91                weight = _mm_add_ps( weight, weightVector );
92            }
93        }
94
95        accum = _mm_div_ps(accum, weight);
96        accum = _mm_add_ps(accum, _mm_set1_ps(0.5f));
97
98        __m128i accumInt = _mm_cvtps_epi32( accum );
99
100        int localResult[4];
101        _mm_storeu_si128((__m128i *) (localResult), accumInt);
102        int a = SkClampMax(localResult[0], 255);
103        int r = SkClampMax(localResult[1], a);
104        int g = SkClampMax(localResult[2], a);
105        int b = SkClampMax(localResult[3], a);
106
107        *colors++ = SkPackARGB32(a, r, g, b);
108
109        x++;
110    }
111}
112
113void highQualityFilter_ScaleOnly_SSE2(const SkBitmapProcState &s, int x, int y,
114                             SkPMColor *SK_RESTRICT colors, int count) {
115    const int maxX = s.fBitmap->width() - 1;
116    const int maxY = s.fBitmap->height() - 1;
117
118    SkPoint srcPt;
119    s.fInvProc(s.fInvMatrix, SkIntToScalar(x),
120                SkIntToScalar(y), &srcPt);
121    srcPt.fY -= SK_ScalarHalf;
122    int sy = SkScalarFloorToInt(srcPt.fY);
123
124    int y0 = SkTMax(0, int(ceil(sy-s.getBitmapFilter()->width() + 0.5f)));
125    int y1 = SkTMin(maxY, int(floor(sy+s.getBitmapFilter()->width() + 0.5f)));
126
127    while (count-- > 0) {
128        srcPt.fX -= SK_ScalarHalf;
129        srcPt.fY -= SK_ScalarHalf;
130
131        int sx = SkScalarFloorToInt(srcPt.fX);
132
133        float weight = 0;
134        __m128 accum = _mm_setzero_ps();
135
136        int x0 = SkTMax(0, int(ceil(sx-s.getBitmapFilter()->width() + 0.5f)));
137        int x1 = SkTMin(maxX, int(floor(sx+s.getBitmapFilter()->width() + 0.5f)));
138
139        for (int src_y = y0; src_y <= y1; src_y++) {
140            float yweight = SkScalarToFloat(s.getBitmapFilter()->lookupScalar(srcPt.fY - src_y));
141
142            for (int src_x = x0; src_x <= x1 ; src_x++) {
143                float xweight = SkScalarToFloat(s.getBitmapFilter()->lookupScalar(srcPt.fX - src_x));
144
145                float combined_weight = xweight * yweight;
146
147                SkPMColor color = *s.fBitmap->getAddr32(src_x, src_y);
148
149                __m128 c = _mm_set_ps((float)SkGetPackedB32(color),
150                                      (float)SkGetPackedG32(color),
151                                      (float)SkGetPackedR32(color),
152                                      (float)SkGetPackedA32(color));
153
154                __m128 weightVector = _mm_set1_ps(combined_weight);
155
156                accum = _mm_add_ps(accum, _mm_mul_ps(c, weightVector));
157                weight += combined_weight;
158            }
159        }
160
161        __m128 totalWeightVector = _mm_set1_ps(weight);
162        accum = _mm_div_ps(accum, totalWeightVector);
163        accum = _mm_add_ps(accum, _mm_set1_ps(0.5f));
164
165        float localResult[4];
166        _mm_storeu_ps(localResult, accum);
167        int a = SkClampMax(int(localResult[0]), 255);
168        int r = SkClampMax(int(localResult[1]), a);
169        int g = SkClampMax(int(localResult[2]), a);
170        int b = SkClampMax(int(localResult[3]), a);
171
172        *colors++ = SkPackARGB32(a, r, g, b);
173
174        x++;
175
176        s.fInvProc(s.fInvMatrix, SkIntToScalar(x),
177                    SkIntToScalar(y), &srcPt);
178
179    }
180}
181
182// Convolves horizontally along a single row. The row data is given in
183// |src_data| and continues for the num_values() of the filter.
184void convolveHorizontally_SSE2(const unsigned char* src_data,
185                               const SkConvolutionFilter1D& filter,
186                               unsigned char* out_row,
187                               bool /*has_alpha*/) {
188  int num_values = filter.numValues();
189
190  int filter_offset, filter_length;
191  __m128i zero = _mm_setzero_si128();
192  __m128i mask[4];
193  // |mask| will be used to decimate all extra filter coefficients that are
194  // loaded by SIMD when |filter_length| is not divisible by 4.
195  // mask[0] is not used in following algorithm.
196  mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
197  mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
198  mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
199
200  // Output one pixel each iteration, calculating all channels (RGBA) together.
201  for (int out_x = 0; out_x < num_values; out_x++) {
202    const SkConvolutionFilter1D::ConvolutionFixed* filter_values =
203        filter.FilterForValue(out_x, &filter_offset, &filter_length);
204
205    __m128i accum = _mm_setzero_si128();
206
207    // Compute the first pixel in this row that the filter affects. It will
208    // touch |filter_length| pixels (4 bytes each) after this.
209    const __m128i* row_to_filter =
210        reinterpret_cast<const __m128i*>(&src_data[filter_offset << 2]);
211
212    // We will load and accumulate with four coefficients per iteration.
213    for (int filter_x = 0; filter_x < filter_length >> 2; filter_x++) {
214
215      // Load 4 coefficients => duplicate 1st and 2nd of them for all channels.
216      __m128i coeff, coeff16;
217      // [16] xx xx xx xx c3 c2 c1 c0
218      coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
219      // [16] xx xx xx xx c1 c1 c0 c0
220      coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
221      // [16] c1 c1 c1 c1 c0 c0 c0 c0
222      coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
223
224      // Load four pixels => unpack the first two pixels to 16 bits =>
225      // multiply with coefficients => accumulate the convolution result.
226      // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
227      __m128i src8 = _mm_loadu_si128(row_to_filter);
228      // [16] a1 b1 g1 r1 a0 b0 g0 r0
229      __m128i src16 = _mm_unpacklo_epi8(src8, zero);
230      __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
231      __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
232      // [32]  a0*c0 b0*c0 g0*c0 r0*c0
233      __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
234      accum = _mm_add_epi32(accum, t);
235      // [32]  a1*c1 b1*c1 g1*c1 r1*c1
236      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
237      accum = _mm_add_epi32(accum, t);
238
239      // Duplicate 3rd and 4th coefficients for all channels =>
240      // unpack the 3rd and 4th pixels to 16 bits => multiply with coefficients
241      // => accumulate the convolution results.
242      // [16] xx xx xx xx c3 c3 c2 c2
243      coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
244      // [16] c3 c3 c3 c3 c2 c2 c2 c2
245      coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
246      // [16] a3 g3 b3 r3 a2 g2 b2 r2
247      src16 = _mm_unpackhi_epi8(src8, zero);
248      mul_hi = _mm_mulhi_epi16(src16, coeff16);
249      mul_lo = _mm_mullo_epi16(src16, coeff16);
250      // [32]  a2*c2 b2*c2 g2*c2 r2*c2
251      t = _mm_unpacklo_epi16(mul_lo, mul_hi);
252      accum = _mm_add_epi32(accum, t);
253      // [32]  a3*c3 b3*c3 g3*c3 r3*c3
254      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
255      accum = _mm_add_epi32(accum, t);
256
257      // Advance the pixel and coefficients pointers.
258      row_to_filter += 1;
259      filter_values += 4;
260    }
261
262    // When |filter_length| is not divisible by 4, we need to decimate some of
263    // the filter coefficient that was loaded incorrectly to zero; Other than
264    // that the algorithm is same with above, exceot that the 4th pixel will be
265    // always absent.
266    int r = filter_length&3;
267    if (r) {
268      // Note: filter_values must be padded to align_up(filter_offset, 8).
269      __m128i coeff, coeff16;
270      coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
271      // Mask out extra filter taps.
272      coeff = _mm_and_si128(coeff, mask[r]);
273      coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
274      coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
275
276      // Note: line buffer must be padded to align_up(filter_offset, 16).
277      // We resolve this by use C-version for the last horizontal line.
278      __m128i src8 = _mm_loadu_si128(row_to_filter);
279      __m128i src16 = _mm_unpacklo_epi8(src8, zero);
280      __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
281      __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
282      __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
283      accum = _mm_add_epi32(accum, t);
284      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
285      accum = _mm_add_epi32(accum, t);
286
287      src16 = _mm_unpackhi_epi8(src8, zero);
288      coeff16 = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
289      coeff16 = _mm_unpacklo_epi16(coeff16, coeff16);
290      mul_hi = _mm_mulhi_epi16(src16, coeff16);
291      mul_lo = _mm_mullo_epi16(src16, coeff16);
292      t = _mm_unpacklo_epi16(mul_lo, mul_hi);
293      accum = _mm_add_epi32(accum, t);
294    }
295
296    // Shift right for fixed point implementation.
297    accum = _mm_srai_epi32(accum, SkConvolutionFilter1D::kShiftBits);
298
299    // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
300    accum = _mm_packs_epi32(accum, zero);
301    // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
302    accum = _mm_packus_epi16(accum, zero);
303
304    // Store the pixel value of 32 bits.
305    *(reinterpret_cast<int*>(out_row)) = _mm_cvtsi128_si32(accum);
306    out_row += 4;
307  }
308}
309
310// Convolves horizontally along four rows. The row data is given in
311// |src_data| and continues for the num_values() of the filter.
312// The algorithm is almost same as |ConvolveHorizontally_SSE2|. Please
313// refer to that function for detailed comments.
314void convolve4RowsHorizontally_SSE2(const unsigned char* src_data[4],
315                                    const SkConvolutionFilter1D& filter,
316                                    unsigned char* out_row[4]) {
317  int num_values = filter.numValues();
318
319  int filter_offset, filter_length;
320  __m128i zero = _mm_setzero_si128();
321  __m128i mask[4];
322  // |mask| will be used to decimate all extra filter coefficients that are
323  // loaded by SIMD when |filter_length| is not divisible by 4.
324  // mask[0] is not used in following algorithm.
325  mask[1] = _mm_set_epi16(0, 0, 0, 0, 0, 0, 0, -1);
326  mask[2] = _mm_set_epi16(0, 0, 0, 0, 0, 0, -1, -1);
327  mask[3] = _mm_set_epi16(0, 0, 0, 0, 0, -1, -1, -1);
328
329  // Output one pixel each iteration, calculating all channels (RGBA) together.
330  for (int out_x = 0; out_x < num_values; out_x++) {
331    const SkConvolutionFilter1D::ConvolutionFixed* filter_values =
332        filter.FilterForValue(out_x, &filter_offset, &filter_length);
333
334    // four pixels in a column per iteration.
335    __m128i accum0 = _mm_setzero_si128();
336    __m128i accum1 = _mm_setzero_si128();
337    __m128i accum2 = _mm_setzero_si128();
338    __m128i accum3 = _mm_setzero_si128();
339    int start = (filter_offset<<2);
340    // We will load and accumulate with four coefficients per iteration.
341    for (int filter_x = 0; filter_x < (filter_length >> 2); filter_x++) {
342      __m128i coeff, coeff16lo, coeff16hi;
343      // [16] xx xx xx xx c3 c2 c1 c0
344      coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
345      // [16] xx xx xx xx c1 c1 c0 c0
346      coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
347      // [16] c1 c1 c1 c1 c0 c0 c0 c0
348      coeff16lo = _mm_unpacklo_epi16(coeff16lo, coeff16lo);
349      // [16] xx xx xx xx c3 c3 c2 c2
350      coeff16hi = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
351      // [16] c3 c3 c3 c3 c2 c2 c2 c2
352      coeff16hi = _mm_unpacklo_epi16(coeff16hi, coeff16hi);
353
354      __m128i src8, src16, mul_hi, mul_lo, t;
355
356#define ITERATION(src, accum)                                          \
357      src8 = _mm_loadu_si128(reinterpret_cast<const __m128i*>(src));   \
358      src16 = _mm_unpacklo_epi8(src8, zero);                           \
359      mul_hi = _mm_mulhi_epi16(src16, coeff16lo);                      \
360      mul_lo = _mm_mullo_epi16(src16, coeff16lo);                      \
361      t = _mm_unpacklo_epi16(mul_lo, mul_hi);                          \
362      accum = _mm_add_epi32(accum, t);                                 \
363      t = _mm_unpackhi_epi16(mul_lo, mul_hi);                          \
364      accum = _mm_add_epi32(accum, t);                                 \
365      src16 = _mm_unpackhi_epi8(src8, zero);                           \
366      mul_hi = _mm_mulhi_epi16(src16, coeff16hi);                      \
367      mul_lo = _mm_mullo_epi16(src16, coeff16hi);                      \
368      t = _mm_unpacklo_epi16(mul_lo, mul_hi);                          \
369      accum = _mm_add_epi32(accum, t);                                 \
370      t = _mm_unpackhi_epi16(mul_lo, mul_hi);                          \
371      accum = _mm_add_epi32(accum, t)
372
373      ITERATION(src_data[0] + start, accum0);
374      ITERATION(src_data[1] + start, accum1);
375      ITERATION(src_data[2] + start, accum2);
376      ITERATION(src_data[3] + start, accum3);
377
378      start += 16;
379      filter_values += 4;
380    }
381
382    int r = filter_length & 3;
383    if (r) {
384      // Note: filter_values must be padded to align_up(filter_offset, 8);
385      __m128i coeff;
386      coeff = _mm_loadl_epi64(reinterpret_cast<const __m128i*>(filter_values));
387      // Mask out extra filter taps.
388      coeff = _mm_and_si128(coeff, mask[r]);
389
390      __m128i coeff16lo = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(1, 1, 0, 0));
391      /* c1 c1 c1 c1 c0 c0 c0 c0 */
392      coeff16lo = _mm_unpacklo_epi16(coeff16lo, coeff16lo);
393      __m128i coeff16hi = _mm_shufflelo_epi16(coeff, _MM_SHUFFLE(3, 3, 2, 2));
394      coeff16hi = _mm_unpacklo_epi16(coeff16hi, coeff16hi);
395
396      __m128i src8, src16, mul_hi, mul_lo, t;
397
398      ITERATION(src_data[0] + start, accum0);
399      ITERATION(src_data[1] + start, accum1);
400      ITERATION(src_data[2] + start, accum2);
401      ITERATION(src_data[3] + start, accum3);
402    }
403
404    accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
405    accum0 = _mm_packs_epi32(accum0, zero);
406    accum0 = _mm_packus_epi16(accum0, zero);
407    accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
408    accum1 = _mm_packs_epi32(accum1, zero);
409    accum1 = _mm_packus_epi16(accum1, zero);
410    accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
411    accum2 = _mm_packs_epi32(accum2, zero);
412    accum2 = _mm_packus_epi16(accum2, zero);
413    accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
414    accum3 = _mm_packs_epi32(accum3, zero);
415    accum3 = _mm_packus_epi16(accum3, zero);
416
417    *(reinterpret_cast<int*>(out_row[0])) = _mm_cvtsi128_si32(accum0);
418    *(reinterpret_cast<int*>(out_row[1])) = _mm_cvtsi128_si32(accum1);
419    *(reinterpret_cast<int*>(out_row[2])) = _mm_cvtsi128_si32(accum2);
420    *(reinterpret_cast<int*>(out_row[3])) = _mm_cvtsi128_si32(accum3);
421
422    out_row[0] += 4;
423    out_row[1] += 4;
424    out_row[2] += 4;
425    out_row[3] += 4;
426  }
427}
428
429// Does vertical convolution to produce one output row. The filter values and
430// length are given in the first two parameters. These are applied to each
431// of the rows pointed to in the |source_data_rows| array, with each row
432// being |pixel_width| wide.
433//
434// The output must have room for |pixel_width * 4| bytes.
435template<bool has_alpha>
436void convolveVertically_SSE2(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
437                             int filter_length,
438                             unsigned char* const* source_data_rows,
439                             int pixel_width,
440                             unsigned char* out_row) {
441  int width = pixel_width & ~3;
442
443  __m128i zero = _mm_setzero_si128();
444  __m128i accum0, accum1, accum2, accum3, coeff16;
445  const __m128i* src;
446  // Output four pixels per iteration (16 bytes).
447  for (int out_x = 0; out_x < width; out_x += 4) {
448
449    // Accumulated result for each pixel. 32 bits per RGBA channel.
450    accum0 = _mm_setzero_si128();
451    accum1 = _mm_setzero_si128();
452    accum2 = _mm_setzero_si128();
453    accum3 = _mm_setzero_si128();
454
455    // Convolve with one filter coefficient per iteration.
456    for (int filter_y = 0; filter_y < filter_length; filter_y++) {
457
458      // Duplicate the filter coefficient 8 times.
459      // [16] cj cj cj cj cj cj cj cj
460      coeff16 = _mm_set1_epi16(filter_values[filter_y]);
461
462      // Load four pixels (16 bytes) together.
463      // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
464      src = reinterpret_cast<const __m128i*>(
465          &source_data_rows[filter_y][out_x << 2]);
466      __m128i src8 = _mm_loadu_si128(src);
467
468      // Unpack 1st and 2nd pixels from 8 bits to 16 bits for each channels =>
469      // multiply with current coefficient => accumulate the result.
470      // [16] a1 b1 g1 r1 a0 b0 g0 r0
471      __m128i src16 = _mm_unpacklo_epi8(src8, zero);
472      __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
473      __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
474      // [32] a0 b0 g0 r0
475      __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
476      accum0 = _mm_add_epi32(accum0, t);
477      // [32] a1 b1 g1 r1
478      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
479      accum1 = _mm_add_epi32(accum1, t);
480
481      // Unpack 3rd and 4th pixels from 8 bits to 16 bits for each channels =>
482      // multiply with current coefficient => accumulate the result.
483      // [16] a3 b3 g3 r3 a2 b2 g2 r2
484      src16 = _mm_unpackhi_epi8(src8, zero);
485      mul_hi = _mm_mulhi_epi16(src16, coeff16);
486      mul_lo = _mm_mullo_epi16(src16, coeff16);
487      // [32] a2 b2 g2 r2
488      t = _mm_unpacklo_epi16(mul_lo, mul_hi);
489      accum2 = _mm_add_epi32(accum2, t);
490      // [32] a3 b3 g3 r3
491      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
492      accum3 = _mm_add_epi32(accum3, t);
493    }
494
495    // Shift right for fixed point implementation.
496    accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
497    accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
498    accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
499    accum3 = _mm_srai_epi32(accum3, SkConvolutionFilter1D::kShiftBits);
500
501    // Packing 32 bits |accum| to 16 bits per channel (signed saturation).
502    // [16] a1 b1 g1 r1 a0 b0 g0 r0
503    accum0 = _mm_packs_epi32(accum0, accum1);
504    // [16] a3 b3 g3 r3 a2 b2 g2 r2
505    accum2 = _mm_packs_epi32(accum2, accum3);
506
507    // Packing 16 bits |accum| to 8 bits per channel (unsigned saturation).
508    // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
509    accum0 = _mm_packus_epi16(accum0, accum2);
510
511    if (has_alpha) {
512      // Compute the max(ri, gi, bi) for each pixel.
513      // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
514      __m128i a = _mm_srli_epi32(accum0, 8);
515      // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
516      __m128i b = _mm_max_epu8(a, accum0);  // Max of r and g.
517      // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
518      a = _mm_srli_epi32(accum0, 16);
519      // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
520      b = _mm_max_epu8(a, b);  // Max of r and g and b.
521      // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
522      b = _mm_slli_epi32(b, 24);
523
524      // Make sure the value of alpha channel is always larger than maximum
525      // value of color channels.
526      accum0 = _mm_max_epu8(b, accum0);
527    } else {
528      // Set value of alpha channels to 0xFF.
529      __m128i mask = _mm_set1_epi32(0xff000000);
530      accum0 = _mm_or_si128(accum0, mask);
531    }
532
533    // Store the convolution result (16 bytes) and advance the pixel pointers.
534    _mm_storeu_si128(reinterpret_cast<__m128i*>(out_row), accum0);
535    out_row += 16;
536  }
537
538  // When the width of the output is not divisible by 4, We need to save one
539  // pixel (4 bytes) each time. And also the fourth pixel is always absent.
540  if (pixel_width & 3) {
541    accum0 = _mm_setzero_si128();
542    accum1 = _mm_setzero_si128();
543    accum2 = _mm_setzero_si128();
544    for (int filter_y = 0; filter_y < filter_length; ++filter_y) {
545      coeff16 = _mm_set1_epi16(filter_values[filter_y]);
546      // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
547      src = reinterpret_cast<const __m128i*>(
548          &source_data_rows[filter_y][width<<2]);
549      __m128i src8 = _mm_loadu_si128(src);
550      // [16] a1 b1 g1 r1 a0 b0 g0 r0
551      __m128i src16 = _mm_unpacklo_epi8(src8, zero);
552      __m128i mul_hi = _mm_mulhi_epi16(src16, coeff16);
553      __m128i mul_lo = _mm_mullo_epi16(src16, coeff16);
554      // [32] a0 b0 g0 r0
555      __m128i t = _mm_unpacklo_epi16(mul_lo, mul_hi);
556      accum0 = _mm_add_epi32(accum0, t);
557      // [32] a1 b1 g1 r1
558      t = _mm_unpackhi_epi16(mul_lo, mul_hi);
559      accum1 = _mm_add_epi32(accum1, t);
560      // [16] a3 b3 g3 r3 a2 b2 g2 r2
561      src16 = _mm_unpackhi_epi8(src8, zero);
562      mul_hi = _mm_mulhi_epi16(src16, coeff16);
563      mul_lo = _mm_mullo_epi16(src16, coeff16);
564      // [32] a2 b2 g2 r2
565      t = _mm_unpacklo_epi16(mul_lo, mul_hi);
566      accum2 = _mm_add_epi32(accum2, t);
567    }
568
569    accum0 = _mm_srai_epi32(accum0, SkConvolutionFilter1D::kShiftBits);
570    accum1 = _mm_srai_epi32(accum1, SkConvolutionFilter1D::kShiftBits);
571    accum2 = _mm_srai_epi32(accum2, SkConvolutionFilter1D::kShiftBits);
572    // [16] a1 b1 g1 r1 a0 b0 g0 r0
573    accum0 = _mm_packs_epi32(accum0, accum1);
574    // [16] a3 b3 g3 r3 a2 b2 g2 r2
575    accum2 = _mm_packs_epi32(accum2, zero);
576    // [8] a3 b3 g3 r3 a2 b2 g2 r2 a1 b1 g1 r1 a0 b0 g0 r0
577    accum0 = _mm_packus_epi16(accum0, accum2);
578    if (has_alpha) {
579      // [8] xx a3 b3 g3 xx a2 b2 g2 xx a1 b1 g1 xx a0 b0 g0
580      __m128i a = _mm_srli_epi32(accum0, 8);
581      // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
582      __m128i b = _mm_max_epu8(a, accum0);  // Max of r and g.
583      // [8] xx xx a3 b3 xx xx a2 b2 xx xx a1 b1 xx xx a0 b0
584      a = _mm_srli_epi32(accum0, 16);
585      // [8] xx xx xx max3 xx xx xx max2 xx xx xx max1 xx xx xx max0
586      b = _mm_max_epu8(a, b);  // Max of r and g and b.
587      // [8] max3 00 00 00 max2 00 00 00 max1 00 00 00 max0 00 00 00
588      b = _mm_slli_epi32(b, 24);
589      accum0 = _mm_max_epu8(b, accum0);
590    } else {
591      __m128i mask = _mm_set1_epi32(0xff000000);
592      accum0 = _mm_or_si128(accum0, mask);
593    }
594
595    for (int out_x = width; out_x < pixel_width; out_x++) {
596      *(reinterpret_cast<int*>(out_row)) = _mm_cvtsi128_si32(accum0);
597      accum0 = _mm_srli_si128(accum0, 4);
598      out_row += 4;
599    }
600  }
601}
602
603void convolveVertically_SSE2(const SkConvolutionFilter1D::ConvolutionFixed* filter_values,
604                             int filter_length,
605                             unsigned char* const* source_data_rows,
606                             int pixel_width,
607                             unsigned char* out_row,
608                             bool has_alpha) {
609  if (has_alpha) {
610    convolveVertically_SSE2<true>(filter_values,
611                                  filter_length,
612                                  source_data_rows,
613                                  pixel_width,
614                                  out_row);
615  } else {
616    convolveVertically_SSE2<false>(filter_values,
617                                   filter_length,
618                                   source_data_rows,
619                                   pixel_width,
620                                   out_row);
621  }
622}
623
624void applySIMDPadding_SSE2(SkConvolutionFilter1D *filter) {
625    // Padding |paddingCount| of more dummy coefficients after the coefficients
626    // of last filter to prevent SIMD instructions which load 8 or 16 bytes
627    // together to access invalid memory areas. We are not trying to align the
628    // coefficients right now due to the opaqueness of <vector> implementation.
629    // This has to be done after all |AddFilter| calls.
630    for (int i = 0; i < 8; ++i) {
631        filter->addFilterValue(static_cast<SkConvolutionFilter1D::ConvolutionFixed>(0));
632    }
633}
634