1// Copyright 2015 Google Inc. All Rights Reserved.
2//
3// Use of this source code is governed by a BSD-style license
4// that can be found in the COPYING file in the root of the source
5// tree. An additional intellectual property rights grant can be found
6// in the file PATENTS. All contributing project authors may
7// be found in the AUTHORS file in the root of the source tree.
8// -----------------------------------------------------------------------------
9//
10// SSE4 version of some encoding functions.
11//
12// Author: Skal (pascal.massimino@gmail.com)
13
14#include "./dsp.h"
15
16#if defined(WEBP_USE_SSE41)
17#include <smmintrin.h>
18#include <stdlib.h>  // for abs()
19
20#include "./common_sse2.h"
21#include "../enc/vp8i_enc.h"
22
23//------------------------------------------------------------------------------
24// Compute susceptibility based on DCT-coeff histograms.
25
26static void CollectHistogram(const uint8_t* ref, const uint8_t* pred,
27                             int start_block, int end_block,
28                             VP8Histogram* const histo) {
29  const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
30  int j;
31  int distribution[MAX_COEFF_THRESH + 1] = { 0 };
32  for (j = start_block; j < end_block; ++j) {
33    int16_t out[16];
34    int k;
35
36    VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
37
38    // Convert coefficients to bin (within out[]).
39    {
40      // Load.
41      const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);
42      const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);
43      // v = abs(out) >> 3
44      const __m128i abs0 = _mm_abs_epi16(out0);
45      const __m128i abs1 = _mm_abs_epi16(out1);
46      const __m128i v0 = _mm_srai_epi16(abs0, 3);
47      const __m128i v1 = _mm_srai_epi16(abs1, 3);
48      // bin = min(v, MAX_COEFF_THRESH)
49      const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
50      const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
51      // Store.
52      _mm_storeu_si128((__m128i*)&out[0], bin0);
53      _mm_storeu_si128((__m128i*)&out[8], bin1);
54    }
55
56    // Convert coefficients to bin.
57    for (k = 0; k < 16; ++k) {
58      ++distribution[out[k]];
59    }
60  }
61  VP8SetHistogramData(distribution, histo);
62}
63
64//------------------------------------------------------------------------------
65// Texture distortion
66//
67// We try to match the spectral content (weighted) between source and
68// reconstructed samples.
69
70// Hadamard transform
71// Returns the weighted sum of the absolute value of transformed coefficients.
72// w[] contains a row-major 4 by 4 symmetric matrix.
73static int TTransform(const uint8_t* inA, const uint8_t* inB,
74                      const uint16_t* const w) {
75  int32_t sum[4];
76  __m128i tmp_0, tmp_1, tmp_2, tmp_3;
77
78  // Load and combine inputs.
79  {
80    const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]);
81    const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]);
82    const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]);
83    // In SSE4.1, with gcc 4.8 at least (maybe other versions),
84    // _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump
85    // of inA and inB, _mm_loadl_epi64 is still used not to have an out of
86    // bound read.
87    const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);
88    const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]);
89    const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]);
90    const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]);
91    const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);
92
93    // Combine inA and inB (we'll do two transforms in parallel).
94    const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);
95    const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);
96    const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);
97    const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);
98    tmp_0 = _mm_cvtepu8_epi16(inAB_0);
99    tmp_1 = _mm_cvtepu8_epi16(inAB_1);
100    tmp_2 = _mm_cvtepu8_epi16(inAB_2);
101    tmp_3 = _mm_cvtepu8_epi16(inAB_3);
102    // a00 a01 a02 a03   b00 b01 b02 b03
103    // a10 a11 a12 a13   b10 b11 b12 b13
104    // a20 a21 a22 a23   b20 b21 b22 b23
105    // a30 a31 a32 a33   b30 b31 b32 b33
106  }
107
108  // Vertical pass first to avoid a transpose (vertical and horizontal passes
109  // are commutative because w/kWeightY is symmetric) and subsequent transpose.
110  {
111    // Calculate a and b (two 4x4 at once).
112    const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
113    const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
114    const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
115    const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
116    const __m128i b0 = _mm_add_epi16(a0, a1);
117    const __m128i b1 = _mm_add_epi16(a3, a2);
118    const __m128i b2 = _mm_sub_epi16(a3, a2);
119    const __m128i b3 = _mm_sub_epi16(a0, a1);
120    // a00 a01 a02 a03   b00 b01 b02 b03
121    // a10 a11 a12 a13   b10 b11 b12 b13
122    // a20 a21 a22 a23   b20 b21 b22 b23
123    // a30 a31 a32 a33   b30 b31 b32 b33
124
125    // Transpose the two 4x4.
126    VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);
127  }
128
129  // Horizontal pass and difference of weighted sums.
130  {
131    // Load all inputs.
132    const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);
133    const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);
134
135    // Calculate a and b (two 4x4 at once).
136    const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
137    const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
138    const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
139    const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
140    const __m128i b0 = _mm_add_epi16(a0, a1);
141    const __m128i b1 = _mm_add_epi16(a3, a2);
142    const __m128i b2 = _mm_sub_epi16(a3, a2);
143    const __m128i b3 = _mm_sub_epi16(a0, a1);
144
145    // Separate the transforms of inA and inB.
146    __m128i A_b0 = _mm_unpacklo_epi64(b0, b1);
147    __m128i A_b2 = _mm_unpacklo_epi64(b2, b3);
148    __m128i B_b0 = _mm_unpackhi_epi64(b0, b1);
149    __m128i B_b2 = _mm_unpackhi_epi64(b2, b3);
150
151    A_b0 = _mm_abs_epi16(A_b0);
152    A_b2 = _mm_abs_epi16(A_b2);
153    B_b0 = _mm_abs_epi16(B_b0);
154    B_b2 = _mm_abs_epi16(B_b2);
155
156    // weighted sums
157    A_b0 = _mm_madd_epi16(A_b0, w_0);
158    A_b2 = _mm_madd_epi16(A_b2, w_8);
159    B_b0 = _mm_madd_epi16(B_b0, w_0);
160    B_b2 = _mm_madd_epi16(B_b2, w_8);
161    A_b0 = _mm_add_epi32(A_b0, A_b2);
162    B_b0 = _mm_add_epi32(B_b0, B_b2);
163
164    // difference of weighted sums
165    A_b2 = _mm_sub_epi32(A_b0, B_b0);
166    _mm_storeu_si128((__m128i*)&sum[0], A_b2);
167  }
168  return sum[0] + sum[1] + sum[2] + sum[3];
169}
170
171static int Disto4x4(const uint8_t* const a, const uint8_t* const b,
172                    const uint16_t* const w) {
173  const int diff_sum = TTransform(a, b, w);
174  return abs(diff_sum) >> 5;
175}
176
177static int Disto16x16(const uint8_t* const a, const uint8_t* const b,
178                      const uint16_t* const w) {
179  int D = 0;
180  int x, y;
181  for (y = 0; y < 16 * BPS; y += 4 * BPS) {
182    for (x = 0; x < 16; x += 4) {
183      D += Disto4x4(a + x + y, b + x + y, w);
184    }
185  }
186  return D;
187}
188
189//------------------------------------------------------------------------------
190// Quantization
191//
192
193// Generates a pshufb constant for shuffling 16b words.
194#define PSHUFB_CST(A,B,C,D,E,F,G,H) \
195  _mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \
196               2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \
197               2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \
198               2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0)
199
200static WEBP_INLINE int DoQuantizeBlock(int16_t in[16], int16_t out[16],
201                                       const uint16_t* const sharpen,
202                                       const VP8Matrix* const mtx) {
203  const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
204  const __m128i zero = _mm_setzero_si128();
205  __m128i out0, out8;
206  __m128i packed_out;
207
208  // Load all inputs.
209  __m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
210  __m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
211  const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
212  const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
213  const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
214  const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
215
216  // coeff = abs(in)
217  __m128i coeff0 = _mm_abs_epi16(in0);
218  __m128i coeff8 = _mm_abs_epi16(in8);
219
220  // coeff = abs(in) + sharpen
221  if (sharpen != NULL) {
222    const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);
223    const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);
224    coeff0 = _mm_add_epi16(coeff0, sharpen0);
225    coeff8 = _mm_add_epi16(coeff8, sharpen8);
226  }
227
228  // out = (coeff * iQ + B) >> QFIX
229  {
230    // doing calculations with 32b precision (QFIX=17)
231    // out = (coeff * iQ)
232    const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
233    const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
234    const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
235    const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
236    __m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
237    __m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
238    __m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
239    __m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
240    // out = (coeff * iQ + B)
241    const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
242    const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
243    const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
244    const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
245    out_00 = _mm_add_epi32(out_00, bias_00);
246    out_04 = _mm_add_epi32(out_04, bias_04);
247    out_08 = _mm_add_epi32(out_08, bias_08);
248    out_12 = _mm_add_epi32(out_12, bias_12);
249    // out = QUANTDIV(coeff, iQ, B, QFIX)
250    out_00 = _mm_srai_epi32(out_00, QFIX);
251    out_04 = _mm_srai_epi32(out_04, QFIX);
252    out_08 = _mm_srai_epi32(out_08, QFIX);
253    out_12 = _mm_srai_epi32(out_12, QFIX);
254
255    // pack result as 16b
256    out0 = _mm_packs_epi32(out_00, out_04);
257    out8 = _mm_packs_epi32(out_08, out_12);
258
259    // if (coeff > 2047) coeff = 2047
260    out0 = _mm_min_epi16(out0, max_coeff_2047);
261    out8 = _mm_min_epi16(out8, max_coeff_2047);
262  }
263
264  // put sign back
265  out0 = _mm_sign_epi16(out0, in0);
266  out8 = _mm_sign_epi16(out8, in8);
267
268  // in = out * Q
269  in0 = _mm_mullo_epi16(out0, q0);
270  in8 = _mm_mullo_epi16(out8, q8);
271
272  _mm_storeu_si128((__m128i*)&in[0], in0);
273  _mm_storeu_si128((__m128i*)&in[8], in8);
274
275  // zigzag the output before storing it. The re-ordering is:
276  //    0 1 2 3 4 5 6 7 | 8  9 10 11 12 13 14 15
277  // -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15
278  // There's only two misplaced entries ([8] and [7]) that are crossing the
279  // reg's boundaries.
280  // We use pshufb instead of pshuflo/pshufhi.
281  {
282    const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6);
283    const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1);
284    const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo);
285    const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7);  // extract #7
286    const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7);
287    const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1);
288    const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi);
289    const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8);  // extract #8
290    const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8);
291    const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7);
292    _mm_storeu_si128((__m128i*)&out[0], out_z0);
293    _mm_storeu_si128((__m128i*)&out[8], out_z8);
294    packed_out = _mm_packs_epi16(out_z0, out_z8);
295  }
296
297  // detect if all 'out' values are zeroes or not
298  return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);
299}
300
301#undef PSHUFB_CST
302
303static int QuantizeBlock(int16_t in[16], int16_t out[16],
304                         const VP8Matrix* const mtx) {
305  return DoQuantizeBlock(in, out, &mtx->sharpen_[0], mtx);
306}
307
308static int QuantizeBlockWHT(int16_t in[16], int16_t out[16],
309                            const VP8Matrix* const mtx) {
310  return DoQuantizeBlock(in, out, NULL, mtx);
311}
312
313static int Quantize2Blocks(int16_t in[32], int16_t out[32],
314                           const VP8Matrix* const mtx) {
315  int nz;
316  const uint16_t* const sharpen = &mtx->sharpen_[0];
317  nz  = DoQuantizeBlock(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;
318  nz |= DoQuantizeBlock(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;
319  return nz;
320}
321
322//------------------------------------------------------------------------------
323// Entry point
324
325extern void VP8EncDspInitSSE41(void);
326WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
327  VP8CollectHistogram = CollectHistogram;
328  VP8EncQuantizeBlock = QuantizeBlock;
329  VP8EncQuantize2Blocks = Quantize2Blocks;
330  VP8EncQuantizeBlockWHT = QuantizeBlockWHT;
331  VP8TDisto4x4 = Disto4x4;
332  VP8TDisto16x16 = Disto16x16;
333}
334
335#else  // !WEBP_USE_SSE41
336
337WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41)
338
339#endif  // WEBP_USE_SSE41
340