1/*
2 *  Copyright (c) 2011 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11/*
12 * The core AEC algorithm, SSE2 version of speed-critical functions.
13 */
14
15#include <emmintrin.h>
16#include <math.h>
17#include <string.h>  // memset
18
19#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
20#include "webrtc/modules/audio_processing/aec/aec_common.h"
21#include "webrtc/modules/audio_processing/aec/aec_core_internal.h"
22#include "webrtc/modules/audio_processing/aec/aec_rdft.h"
23
24__inline static float MulRe(float aRe, float aIm, float bRe, float bIm) {
25  return aRe * bRe - aIm * bIm;
26}
27
28__inline static float MulIm(float aRe, float aIm, float bRe, float bIm) {
29  return aRe * bIm + aIm * bRe;
30}
31
32static void FilterFarSSE2(AecCore* aec, float yf[2][PART_LEN1]) {
33  int i;
34  const int num_partitions = aec->num_partitions;
35  for (i = 0; i < num_partitions; i++) {
36    int j;
37    int xPos = (i + aec->xfBufBlockPos) * PART_LEN1;
38    int pos = i * PART_LEN1;
39    // Check for wrap
40    if (i + aec->xfBufBlockPos >= num_partitions) {
41      xPos -= num_partitions * (PART_LEN1);
42    }
43
44    // vectorized code (four at once)
45    for (j = 0; j + 3 < PART_LEN1; j += 4) {
46      const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
47      const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
48      const __m128 wfBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
49      const __m128 wfBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
50      const __m128 yf_re = _mm_loadu_ps(&yf[0][j]);
51      const __m128 yf_im = _mm_loadu_ps(&yf[1][j]);
52      const __m128 a = _mm_mul_ps(xfBuf_re, wfBuf_re);
53      const __m128 b = _mm_mul_ps(xfBuf_im, wfBuf_im);
54      const __m128 c = _mm_mul_ps(xfBuf_re, wfBuf_im);
55      const __m128 d = _mm_mul_ps(xfBuf_im, wfBuf_re);
56      const __m128 e = _mm_sub_ps(a, b);
57      const __m128 f = _mm_add_ps(c, d);
58      const __m128 g = _mm_add_ps(yf_re, e);
59      const __m128 h = _mm_add_ps(yf_im, f);
60      _mm_storeu_ps(&yf[0][j], g);
61      _mm_storeu_ps(&yf[1][j], h);
62    }
63    // scalar code for the remaining items.
64    for (; j < PART_LEN1; j++) {
65      yf[0][j] += MulRe(aec->xfBuf[0][xPos + j],
66                        aec->xfBuf[1][xPos + j],
67                        aec->wfBuf[0][pos + j],
68                        aec->wfBuf[1][pos + j]);
69      yf[1][j] += MulIm(aec->xfBuf[0][xPos + j],
70                        aec->xfBuf[1][xPos + j],
71                        aec->wfBuf[0][pos + j],
72                        aec->wfBuf[1][pos + j]);
73    }
74  }
75}
76
77static void ScaleErrorSignalSSE2(AecCore* aec, float ef[2][PART_LEN1]) {
78  const __m128 k1e_10f = _mm_set1_ps(1e-10f);
79  const __m128 kMu = aec->extended_filter_enabled ? _mm_set1_ps(kExtendedMu)
80                                                  : _mm_set1_ps(aec->normal_mu);
81  const __m128 kThresh = aec->extended_filter_enabled
82                             ? _mm_set1_ps(kExtendedErrorThreshold)
83                             : _mm_set1_ps(aec->normal_error_threshold);
84
85  int i;
86  // vectorized code (four at once)
87  for (i = 0; i + 3 < PART_LEN1; i += 4) {
88    const __m128 xPow = _mm_loadu_ps(&aec->xPow[i]);
89    const __m128 ef_re_base = _mm_loadu_ps(&ef[0][i]);
90    const __m128 ef_im_base = _mm_loadu_ps(&ef[1][i]);
91
92    const __m128 xPowPlus = _mm_add_ps(xPow, k1e_10f);
93    __m128 ef_re = _mm_div_ps(ef_re_base, xPowPlus);
94    __m128 ef_im = _mm_div_ps(ef_im_base, xPowPlus);
95    const __m128 ef_re2 = _mm_mul_ps(ef_re, ef_re);
96    const __m128 ef_im2 = _mm_mul_ps(ef_im, ef_im);
97    const __m128 ef_sum2 = _mm_add_ps(ef_re2, ef_im2);
98    const __m128 absEf = _mm_sqrt_ps(ef_sum2);
99    const __m128 bigger = _mm_cmpgt_ps(absEf, kThresh);
100    __m128 absEfPlus = _mm_add_ps(absEf, k1e_10f);
101    const __m128 absEfInv = _mm_div_ps(kThresh, absEfPlus);
102    __m128 ef_re_if = _mm_mul_ps(ef_re, absEfInv);
103    __m128 ef_im_if = _mm_mul_ps(ef_im, absEfInv);
104    ef_re_if = _mm_and_ps(bigger, ef_re_if);
105    ef_im_if = _mm_and_ps(bigger, ef_im_if);
106    ef_re = _mm_andnot_ps(bigger, ef_re);
107    ef_im = _mm_andnot_ps(bigger, ef_im);
108    ef_re = _mm_or_ps(ef_re, ef_re_if);
109    ef_im = _mm_or_ps(ef_im, ef_im_if);
110    ef_re = _mm_mul_ps(ef_re, kMu);
111    ef_im = _mm_mul_ps(ef_im, kMu);
112
113    _mm_storeu_ps(&ef[0][i], ef_re);
114    _mm_storeu_ps(&ef[1][i], ef_im);
115  }
116  // scalar code for the remaining items.
117  {
118    const float mu =
119        aec->extended_filter_enabled ? kExtendedMu : aec->normal_mu;
120    const float error_threshold = aec->extended_filter_enabled
121                                      ? kExtendedErrorThreshold
122                                      : aec->normal_error_threshold;
123    for (; i < (PART_LEN1); i++) {
124      float abs_ef;
125      ef[0][i] /= (aec->xPow[i] + 1e-10f);
126      ef[1][i] /= (aec->xPow[i] + 1e-10f);
127      abs_ef = sqrtf(ef[0][i] * ef[0][i] + ef[1][i] * ef[1][i]);
128
129      if (abs_ef > error_threshold) {
130        abs_ef = error_threshold / (abs_ef + 1e-10f);
131        ef[0][i] *= abs_ef;
132        ef[1][i] *= abs_ef;
133      }
134
135      // Stepsize factor
136      ef[0][i] *= mu;
137      ef[1][i] *= mu;
138    }
139  }
140}
141
142static void FilterAdaptationSSE2(AecCore* aec,
143                                 float* fft,
144                                 float ef[2][PART_LEN1]) {
145  int i, j;
146  const int num_partitions = aec->num_partitions;
147  for (i = 0; i < num_partitions; i++) {
148    int xPos = (i + aec->xfBufBlockPos) * (PART_LEN1);
149    int pos = i * PART_LEN1;
150    // Check for wrap
151    if (i + aec->xfBufBlockPos >= num_partitions) {
152      xPos -= num_partitions * PART_LEN1;
153    }
154
155    // Process the whole array...
156    for (j = 0; j < PART_LEN; j += 4) {
157      // Load xfBuf and ef.
158      const __m128 xfBuf_re = _mm_loadu_ps(&aec->xfBuf[0][xPos + j]);
159      const __m128 xfBuf_im = _mm_loadu_ps(&aec->xfBuf[1][xPos + j]);
160      const __m128 ef_re = _mm_loadu_ps(&ef[0][j]);
161      const __m128 ef_im = _mm_loadu_ps(&ef[1][j]);
162      // Calculate the product of conjugate(xfBuf) by ef.
163      //   re(conjugate(a) * b) = aRe * bRe + aIm * bIm
164      //   im(conjugate(a) * b)=  aRe * bIm - aIm * bRe
165      const __m128 a = _mm_mul_ps(xfBuf_re, ef_re);
166      const __m128 b = _mm_mul_ps(xfBuf_im, ef_im);
167      const __m128 c = _mm_mul_ps(xfBuf_re, ef_im);
168      const __m128 d = _mm_mul_ps(xfBuf_im, ef_re);
169      const __m128 e = _mm_add_ps(a, b);
170      const __m128 f = _mm_sub_ps(c, d);
171      // Interleave real and imaginary parts.
172      const __m128 g = _mm_unpacklo_ps(e, f);
173      const __m128 h = _mm_unpackhi_ps(e, f);
174      // Store
175      _mm_storeu_ps(&fft[2 * j + 0], g);
176      _mm_storeu_ps(&fft[2 * j + 4], h);
177    }
178    // ... and fixup the first imaginary entry.
179    fft[1] = MulRe(aec->xfBuf[0][xPos + PART_LEN],
180                   -aec->xfBuf[1][xPos + PART_LEN],
181                   ef[0][PART_LEN],
182                   ef[1][PART_LEN]);
183
184    aec_rdft_inverse_128(fft);
185    memset(fft + PART_LEN, 0, sizeof(float) * PART_LEN);
186
187    // fft scaling
188    {
189      float scale = 2.0f / PART_LEN2;
190      const __m128 scale_ps = _mm_load_ps1(&scale);
191      for (j = 0; j < PART_LEN; j += 4) {
192        const __m128 fft_ps = _mm_loadu_ps(&fft[j]);
193        const __m128 fft_scale = _mm_mul_ps(fft_ps, scale_ps);
194        _mm_storeu_ps(&fft[j], fft_scale);
195      }
196    }
197    aec_rdft_forward_128(fft);
198
199    {
200      float wt1 = aec->wfBuf[1][pos];
201      aec->wfBuf[0][pos + PART_LEN] += fft[1];
202      for (j = 0; j < PART_LEN; j += 4) {
203        __m128 wtBuf_re = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
204        __m128 wtBuf_im = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
205        const __m128 fft0 = _mm_loadu_ps(&fft[2 * j + 0]);
206        const __m128 fft4 = _mm_loadu_ps(&fft[2 * j + 4]);
207        const __m128 fft_re =
208            _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(2, 0, 2, 0));
209        const __m128 fft_im =
210            _mm_shuffle_ps(fft0, fft4, _MM_SHUFFLE(3, 1, 3, 1));
211        wtBuf_re = _mm_add_ps(wtBuf_re, fft_re);
212        wtBuf_im = _mm_add_ps(wtBuf_im, fft_im);
213        _mm_storeu_ps(&aec->wfBuf[0][pos + j], wtBuf_re);
214        _mm_storeu_ps(&aec->wfBuf[1][pos + j], wtBuf_im);
215      }
216      aec->wfBuf[1][pos] = wt1;
217    }
218  }
219}
220
221static __m128 mm_pow_ps(__m128 a, __m128 b) {
222  // a^b = exp2(b * log2(a))
223  //   exp2(x) and log2(x) are calculated using polynomial approximations.
224  __m128 log2_a, b_log2_a, a_exp_b;
225
226  // Calculate log2(x), x = a.
227  {
228    // To calculate log2(x), we decompose x like this:
229    //   x = y * 2^n
230    //     n is an integer
231    //     y is in the [1.0, 2.0) range
232    //
233    //   log2(x) = log2(y) + n
234    //     n       can be evaluated by playing with float representation.
235    //     log2(y) in a small range can be approximated, this code uses an order
236    //             five polynomial approximation. The coefficients have been
237    //             estimated with the Remez algorithm and the resulting
238    //             polynomial has a maximum relative error of 0.00086%.
239
240    // Compute n.
241    //    This is done by masking the exponent, shifting it into the top bit of
242    //    the mantissa, putting eight into the biased exponent (to shift/
243    //    compensate the fact that the exponent has been shifted in the top/
244    //    fractional part and finally getting rid of the implicit leading one
245    //    from the mantissa by substracting it out.
246    static const ALIGN16_BEG int float_exponent_mask[4] ALIGN16_END = {
247        0x7F800000, 0x7F800000, 0x7F800000, 0x7F800000};
248    static const ALIGN16_BEG int eight_biased_exponent[4] ALIGN16_END = {
249        0x43800000, 0x43800000, 0x43800000, 0x43800000};
250    static const ALIGN16_BEG int implicit_leading_one[4] ALIGN16_END = {
251        0x43BF8000, 0x43BF8000, 0x43BF8000, 0x43BF8000};
252    static const int shift_exponent_into_top_mantissa = 8;
253    const __m128 two_n = _mm_and_ps(a, *((__m128*)float_exponent_mask));
254    const __m128 n_1 = _mm_castsi128_ps(_mm_srli_epi32(
255        _mm_castps_si128(two_n), shift_exponent_into_top_mantissa));
256    const __m128 n_0 = _mm_or_ps(n_1, *((__m128*)eight_biased_exponent));
257    const __m128 n = _mm_sub_ps(n_0, *((__m128*)implicit_leading_one));
258
259    // Compute y.
260    static const ALIGN16_BEG int mantissa_mask[4] ALIGN16_END = {
261        0x007FFFFF, 0x007FFFFF, 0x007FFFFF, 0x007FFFFF};
262    static const ALIGN16_BEG int zero_biased_exponent_is_one[4] ALIGN16_END = {
263        0x3F800000, 0x3F800000, 0x3F800000, 0x3F800000};
264    const __m128 mantissa = _mm_and_ps(a, *((__m128*)mantissa_mask));
265    const __m128 y =
266        _mm_or_ps(mantissa, *((__m128*)zero_biased_exponent_is_one));
267
268    // Approximate log2(y) ~= (y - 1) * pol5(y).
269    //    pol5(y) = C5 * y^5 + C4 * y^4 + C3 * y^3 + C2 * y^2 + C1 * y + C0
270    static const ALIGN16_BEG float ALIGN16_END C5[4] = {
271        -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f, -3.4436006e-2f};
272    static const ALIGN16_BEG float ALIGN16_END
273        C4[4] = {3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f, 3.1821337e-1f};
274    static const ALIGN16_BEG float ALIGN16_END
275        C3[4] = {-1.2315303f, -1.2315303f, -1.2315303f, -1.2315303f};
276    static const ALIGN16_BEG float ALIGN16_END
277        C2[4] = {2.5988452f, 2.5988452f, 2.5988452f, 2.5988452f};
278    static const ALIGN16_BEG float ALIGN16_END
279        C1[4] = {-3.3241990f, -3.3241990f, -3.3241990f, -3.3241990f};
280    static const ALIGN16_BEG float ALIGN16_END
281        C0[4] = {3.1157899f, 3.1157899f, 3.1157899f, 3.1157899f};
282    const __m128 pol5_y_0 = _mm_mul_ps(y, *((__m128*)C5));
283    const __m128 pol5_y_1 = _mm_add_ps(pol5_y_0, *((__m128*)C4));
284    const __m128 pol5_y_2 = _mm_mul_ps(pol5_y_1, y);
285    const __m128 pol5_y_3 = _mm_add_ps(pol5_y_2, *((__m128*)C3));
286    const __m128 pol5_y_4 = _mm_mul_ps(pol5_y_3, y);
287    const __m128 pol5_y_5 = _mm_add_ps(pol5_y_4, *((__m128*)C2));
288    const __m128 pol5_y_6 = _mm_mul_ps(pol5_y_5, y);
289    const __m128 pol5_y_7 = _mm_add_ps(pol5_y_6, *((__m128*)C1));
290    const __m128 pol5_y_8 = _mm_mul_ps(pol5_y_7, y);
291    const __m128 pol5_y = _mm_add_ps(pol5_y_8, *((__m128*)C0));
292    const __m128 y_minus_one =
293        _mm_sub_ps(y, *((__m128*)zero_biased_exponent_is_one));
294    const __m128 log2_y = _mm_mul_ps(y_minus_one, pol5_y);
295
296    // Combine parts.
297    log2_a = _mm_add_ps(n, log2_y);
298  }
299
300  // b * log2(a)
301  b_log2_a = _mm_mul_ps(b, log2_a);
302
303  // Calculate exp2(x), x = b * log2(a).
304  {
305    // To calculate 2^x, we decompose x like this:
306    //   x = n + y
307    //     n is an integer, the value of x - 0.5 rounded down, therefore
308    //     y is in the [0.5, 1.5) range
309    //
310    //   2^x = 2^n * 2^y
311    //     2^n can be evaluated by playing with float representation.
312    //     2^y in a small range can be approximated, this code uses an order two
313    //         polynomial approximation. The coefficients have been estimated
314    //         with the Remez algorithm and the resulting polynomial has a
315    //         maximum relative error of 0.17%.
316
317    // To avoid over/underflow, we reduce the range of input to ]-127, 129].
318    static const ALIGN16_BEG float max_input[4] ALIGN16_END = {129.f, 129.f,
319                                                               129.f, 129.f};
320    static const ALIGN16_BEG float min_input[4] ALIGN16_END = {
321        -126.99999f, -126.99999f, -126.99999f, -126.99999f};
322    const __m128 x_min = _mm_min_ps(b_log2_a, *((__m128*)max_input));
323    const __m128 x_max = _mm_max_ps(x_min, *((__m128*)min_input));
324    // Compute n.
325    static const ALIGN16_BEG float half[4] ALIGN16_END = {0.5f, 0.5f,
326                                                          0.5f, 0.5f};
327    const __m128 x_minus_half = _mm_sub_ps(x_max, *((__m128*)half));
328    const __m128i x_minus_half_floor = _mm_cvtps_epi32(x_minus_half);
329    // Compute 2^n.
330    static const ALIGN16_BEG int float_exponent_bias[4] ALIGN16_END = {
331        127, 127, 127, 127};
332    static const int float_exponent_shift = 23;
333    const __m128i two_n_exponent =
334        _mm_add_epi32(x_minus_half_floor, *((__m128i*)float_exponent_bias));
335    const __m128 two_n =
336        _mm_castsi128_ps(_mm_slli_epi32(two_n_exponent, float_exponent_shift));
337    // Compute y.
338    const __m128 y = _mm_sub_ps(x_max, _mm_cvtepi32_ps(x_minus_half_floor));
339    // Approximate 2^y ~= C2 * y^2 + C1 * y + C0.
340    static const ALIGN16_BEG float C2[4] ALIGN16_END = {
341        3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f, 3.3718944e-1f};
342    static const ALIGN16_BEG float C1[4] ALIGN16_END = {
343        6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f, 6.5763628e-1f};
344    static const ALIGN16_BEG float C0[4] ALIGN16_END = {1.0017247f, 1.0017247f,
345                                                        1.0017247f, 1.0017247f};
346    const __m128 exp2_y_0 = _mm_mul_ps(y, *((__m128*)C2));
347    const __m128 exp2_y_1 = _mm_add_ps(exp2_y_0, *((__m128*)C1));
348    const __m128 exp2_y_2 = _mm_mul_ps(exp2_y_1, y);
349    const __m128 exp2_y = _mm_add_ps(exp2_y_2, *((__m128*)C0));
350
351    // Combine parts.
352    a_exp_b = _mm_mul_ps(exp2_y, two_n);
353  }
354  return a_exp_b;
355}
356
357static void OverdriveAndSuppressSSE2(AecCore* aec,
358                                     float hNl[PART_LEN1],
359                                     const float hNlFb,
360                                     float efw[2][PART_LEN1]) {
361  int i;
362  const __m128 vec_hNlFb = _mm_set1_ps(hNlFb);
363  const __m128 vec_one = _mm_set1_ps(1.0f);
364  const __m128 vec_minus_one = _mm_set1_ps(-1.0f);
365  const __m128 vec_overDriveSm = _mm_set1_ps(aec->overDriveSm);
366  // vectorized code (four at once)
367  for (i = 0; i + 3 < PART_LEN1; i += 4) {
368    // Weight subbands
369    __m128 vec_hNl = _mm_loadu_ps(&hNl[i]);
370    const __m128 vec_weightCurve = _mm_loadu_ps(&WebRtcAec_weightCurve[i]);
371    const __m128 bigger = _mm_cmpgt_ps(vec_hNl, vec_hNlFb);
372    const __m128 vec_weightCurve_hNlFb = _mm_mul_ps(vec_weightCurve, vec_hNlFb);
373    const __m128 vec_one_weightCurve = _mm_sub_ps(vec_one, vec_weightCurve);
374    const __m128 vec_one_weightCurve_hNl =
375        _mm_mul_ps(vec_one_weightCurve, vec_hNl);
376    const __m128 vec_if0 = _mm_andnot_ps(bigger, vec_hNl);
377    const __m128 vec_if1 = _mm_and_ps(
378        bigger, _mm_add_ps(vec_weightCurve_hNlFb, vec_one_weightCurve_hNl));
379    vec_hNl = _mm_or_ps(vec_if0, vec_if1);
380
381    {
382      const __m128 vec_overDriveCurve =
383          _mm_loadu_ps(&WebRtcAec_overDriveCurve[i]);
384      const __m128 vec_overDriveSm_overDriveCurve =
385          _mm_mul_ps(vec_overDriveSm, vec_overDriveCurve);
386      vec_hNl = mm_pow_ps(vec_hNl, vec_overDriveSm_overDriveCurve);
387      _mm_storeu_ps(&hNl[i], vec_hNl);
388    }
389
390    // Suppress error signal
391    {
392      __m128 vec_efw_re = _mm_loadu_ps(&efw[0][i]);
393      __m128 vec_efw_im = _mm_loadu_ps(&efw[1][i]);
394      vec_efw_re = _mm_mul_ps(vec_efw_re, vec_hNl);
395      vec_efw_im = _mm_mul_ps(vec_efw_im, vec_hNl);
396
397      // Ooura fft returns incorrect sign on imaginary component. It matters
398      // here because we are making an additive change with comfort noise.
399      vec_efw_im = _mm_mul_ps(vec_efw_im, vec_minus_one);
400      _mm_storeu_ps(&efw[0][i], vec_efw_re);
401      _mm_storeu_ps(&efw[1][i], vec_efw_im);
402    }
403  }
404  // scalar code for the remaining items.
405  for (; i < PART_LEN1; i++) {
406    // Weight subbands
407    if (hNl[i] > hNlFb) {
408      hNl[i] = WebRtcAec_weightCurve[i] * hNlFb +
409               (1 - WebRtcAec_weightCurve[i]) * hNl[i];
410    }
411    hNl[i] = powf(hNl[i], aec->overDriveSm * WebRtcAec_overDriveCurve[i]);
412
413    // Suppress error signal
414    efw[0][i] *= hNl[i];
415    efw[1][i] *= hNl[i];
416
417    // Ooura fft returns incorrect sign on imaginary component. It matters
418    // here because we are making an additive change with comfort noise.
419    efw[1][i] *= -1;
420  }
421}
422
423__inline static void _mm_add_ps_4x1(__m128 sum, float *dst) {
424  // A+B C+D
425  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(0, 0, 3, 2)));
426  // A+B+C+D A+B+C+D
427  sum = _mm_add_ps(sum, _mm_shuffle_ps(sum, sum, _MM_SHUFFLE(1, 1, 1, 1)));
428  _mm_store_ss(dst, sum);
429}
430static int PartitionDelay(const AecCore* aec) {
431  // Measures the energy in each filter partition and returns the partition with
432  // highest energy.
433  // TODO(bjornv): Spread computational cost by computing one partition per
434  // block?
435  float wfEnMax = 0;
436  int i;
437  int delay = 0;
438
439  for (i = 0; i < aec->num_partitions; i++) {
440    int j;
441    int pos = i * PART_LEN1;
442    float wfEn = 0;
443    __m128 vec_wfEn = _mm_set1_ps(0.0f);
444    // vectorized code (four at once)
445    for (j = 0; j + 3 < PART_LEN1; j += 4) {
446      const __m128 vec_wfBuf0 = _mm_loadu_ps(&aec->wfBuf[0][pos + j]);
447      const __m128 vec_wfBuf1 = _mm_loadu_ps(&aec->wfBuf[1][pos + j]);
448      vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf0, vec_wfBuf0));
449      vec_wfEn = _mm_add_ps(vec_wfEn, _mm_mul_ps(vec_wfBuf1, vec_wfBuf1));
450    }
451    _mm_add_ps_4x1(vec_wfEn, &wfEn);
452
453    // scalar code for the remaining items.
454    for (; j < PART_LEN1; j++) {
455      wfEn += aec->wfBuf[0][pos + j] * aec->wfBuf[0][pos + j] +
456              aec->wfBuf[1][pos + j] * aec->wfBuf[1][pos + j];
457    }
458
459    if (wfEn > wfEnMax) {
460      wfEnMax = wfEn;
461      delay = i;
462    }
463  }
464  return delay;
465}
466
467// Updates the following smoothed  Power Spectral Densities (PSD):
468//  - sd  : near-end
469//  - se  : residual echo
470//  - sx  : far-end
471//  - sde : cross-PSD of near-end and residual echo
472//  - sxd : cross-PSD of near-end and far-end
473//
474// In addition to updating the PSDs, also the filter diverge state is determined
475// upon actions are taken.
476static void SmoothedPSD(AecCore* aec,
477                        float efw[2][PART_LEN1],
478                        float dfw[2][PART_LEN1],
479                        float xfw[2][PART_LEN1]) {
480  // Power estimate smoothing coefficients.
481  const float* ptrGCoh = aec->extended_filter_enabled
482      ? WebRtcAec_kExtendedSmoothingCoefficients[aec->mult - 1]
483      : WebRtcAec_kNormalSmoothingCoefficients[aec->mult - 1];
484  int i;
485  float sdSum = 0, seSum = 0;
486  const __m128 vec_15 =  _mm_set1_ps(WebRtcAec_kMinFarendPSD);
487  const __m128 vec_GCoh0 = _mm_set1_ps(ptrGCoh[0]);
488  const __m128 vec_GCoh1 = _mm_set1_ps(ptrGCoh[1]);
489  __m128 vec_sdSum = _mm_set1_ps(0.0f);
490  __m128 vec_seSum = _mm_set1_ps(0.0f);
491
492  for (i = 0; i + 3 < PART_LEN1; i += 4) {
493    const __m128 vec_dfw0 = _mm_loadu_ps(&dfw[0][i]);
494    const __m128 vec_dfw1 = _mm_loadu_ps(&dfw[1][i]);
495    const __m128 vec_efw0 = _mm_loadu_ps(&efw[0][i]);
496    const __m128 vec_efw1 = _mm_loadu_ps(&efw[1][i]);
497    const __m128 vec_xfw0 = _mm_loadu_ps(&xfw[0][i]);
498    const __m128 vec_xfw1 = _mm_loadu_ps(&xfw[1][i]);
499    __m128 vec_sd = _mm_mul_ps(_mm_loadu_ps(&aec->sd[i]), vec_GCoh0);
500    __m128 vec_se = _mm_mul_ps(_mm_loadu_ps(&aec->se[i]), vec_GCoh0);
501    __m128 vec_sx = _mm_mul_ps(_mm_loadu_ps(&aec->sx[i]), vec_GCoh0);
502    __m128 vec_dfw_sumsq = _mm_mul_ps(vec_dfw0, vec_dfw0);
503    __m128 vec_efw_sumsq = _mm_mul_ps(vec_efw0, vec_efw0);
504    __m128 vec_xfw_sumsq = _mm_mul_ps(vec_xfw0, vec_xfw0);
505    vec_dfw_sumsq = _mm_add_ps(vec_dfw_sumsq, _mm_mul_ps(vec_dfw1, vec_dfw1));
506    vec_efw_sumsq = _mm_add_ps(vec_efw_sumsq, _mm_mul_ps(vec_efw1, vec_efw1));
507    vec_xfw_sumsq = _mm_add_ps(vec_xfw_sumsq, _mm_mul_ps(vec_xfw1, vec_xfw1));
508    vec_xfw_sumsq = _mm_max_ps(vec_xfw_sumsq, vec_15);
509    vec_sd = _mm_add_ps(vec_sd, _mm_mul_ps(vec_dfw_sumsq, vec_GCoh1));
510    vec_se = _mm_add_ps(vec_se, _mm_mul_ps(vec_efw_sumsq, vec_GCoh1));
511    vec_sx = _mm_add_ps(vec_sx, _mm_mul_ps(vec_xfw_sumsq, vec_GCoh1));
512    _mm_storeu_ps(&aec->sd[i], vec_sd);
513    _mm_storeu_ps(&aec->se[i], vec_se);
514    _mm_storeu_ps(&aec->sx[i], vec_sx);
515
516    {
517      const __m128 vec_3210 = _mm_loadu_ps(&aec->sde[i][0]);
518      const __m128 vec_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]);
519      __m128 vec_a = _mm_shuffle_ps(vec_3210, vec_7654,
520                                    _MM_SHUFFLE(2, 0, 2, 0));
521      __m128 vec_b = _mm_shuffle_ps(vec_3210, vec_7654,
522                                    _MM_SHUFFLE(3, 1, 3, 1));
523      __m128 vec_dfwefw0011 = _mm_mul_ps(vec_dfw0, vec_efw0);
524      __m128 vec_dfwefw0110 = _mm_mul_ps(vec_dfw0, vec_efw1);
525      vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
526      vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
527      vec_dfwefw0011 = _mm_add_ps(vec_dfwefw0011,
528                                  _mm_mul_ps(vec_dfw1, vec_efw1));
529      vec_dfwefw0110 = _mm_sub_ps(vec_dfwefw0110,
530                                  _mm_mul_ps(vec_dfw1, vec_efw0));
531      vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwefw0011, vec_GCoh1));
532      vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwefw0110, vec_GCoh1));
533      _mm_storeu_ps(&aec->sde[i][0], _mm_unpacklo_ps(vec_a, vec_b));
534      _mm_storeu_ps(&aec->sde[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b));
535    }
536
537    {
538      const __m128 vec_3210 = _mm_loadu_ps(&aec->sxd[i][0]);
539      const __m128 vec_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]);
540      __m128 vec_a = _mm_shuffle_ps(vec_3210, vec_7654,
541                                    _MM_SHUFFLE(2, 0, 2, 0));
542      __m128 vec_b = _mm_shuffle_ps(vec_3210, vec_7654,
543                                    _MM_SHUFFLE(3, 1, 3, 1));
544      __m128 vec_dfwxfw0011 = _mm_mul_ps(vec_dfw0, vec_xfw0);
545      __m128 vec_dfwxfw0110 = _mm_mul_ps(vec_dfw0, vec_xfw1);
546      vec_a = _mm_mul_ps(vec_a, vec_GCoh0);
547      vec_b = _mm_mul_ps(vec_b, vec_GCoh0);
548      vec_dfwxfw0011 = _mm_add_ps(vec_dfwxfw0011,
549                                  _mm_mul_ps(vec_dfw1, vec_xfw1));
550      vec_dfwxfw0110 = _mm_sub_ps(vec_dfwxfw0110,
551                                  _mm_mul_ps(vec_dfw1, vec_xfw0));
552      vec_a = _mm_add_ps(vec_a, _mm_mul_ps(vec_dfwxfw0011, vec_GCoh1));
553      vec_b = _mm_add_ps(vec_b, _mm_mul_ps(vec_dfwxfw0110, vec_GCoh1));
554      _mm_storeu_ps(&aec->sxd[i][0], _mm_unpacklo_ps(vec_a, vec_b));
555      _mm_storeu_ps(&aec->sxd[i + 2][0], _mm_unpackhi_ps(vec_a, vec_b));
556    }
557
558    vec_sdSum = _mm_add_ps(vec_sdSum, vec_sd);
559    vec_seSum = _mm_add_ps(vec_seSum, vec_se);
560  }
561
562  _mm_add_ps_4x1(vec_sdSum, &sdSum);
563  _mm_add_ps_4x1(vec_seSum, &seSum);
564
565  for (; i < PART_LEN1; i++) {
566    aec->sd[i] = ptrGCoh[0] * aec->sd[i] +
567                 ptrGCoh[1] * (dfw[0][i] * dfw[0][i] + dfw[1][i] * dfw[1][i]);
568    aec->se[i] = ptrGCoh[0] * aec->se[i] +
569                 ptrGCoh[1] * (efw[0][i] * efw[0][i] + efw[1][i] * efw[1][i]);
570    // We threshold here to protect against the ill-effects of a zero farend.
571    // The threshold is not arbitrarily chosen, but balances protection and
572    // adverse interaction with the algorithm's tuning.
573    // TODO(bjornv): investigate further why this is so sensitive.
574    aec->sx[i] =
575        ptrGCoh[0] * aec->sx[i] +
576        ptrGCoh[1] * WEBRTC_SPL_MAX(
577            xfw[0][i] * xfw[0][i] + xfw[1][i] * xfw[1][i],
578            WebRtcAec_kMinFarendPSD);
579
580    aec->sde[i][0] =
581        ptrGCoh[0] * aec->sde[i][0] +
582        ptrGCoh[1] * (dfw[0][i] * efw[0][i] + dfw[1][i] * efw[1][i]);
583    aec->sde[i][1] =
584        ptrGCoh[0] * aec->sde[i][1] +
585        ptrGCoh[1] * (dfw[0][i] * efw[1][i] - dfw[1][i] * efw[0][i]);
586
587    aec->sxd[i][0] =
588        ptrGCoh[0] * aec->sxd[i][0] +
589        ptrGCoh[1] * (dfw[0][i] * xfw[0][i] + dfw[1][i] * xfw[1][i]);
590    aec->sxd[i][1] =
591        ptrGCoh[0] * aec->sxd[i][1] +
592        ptrGCoh[1] * (dfw[0][i] * xfw[1][i] - dfw[1][i] * xfw[0][i]);
593
594    sdSum += aec->sd[i];
595    seSum += aec->se[i];
596  }
597
598  // Divergent filter safeguard.
599  aec->divergeState = (aec->divergeState ? 1.05f : 1.0f) * seSum > sdSum;
600
601  if (aec->divergeState)
602    memcpy(efw, dfw, sizeof(efw[0][0]) * 2 * PART_LEN1);
603
604  // Reset if error is significantly larger than nearend (13 dB).
605  if (!aec->extended_filter_enabled && seSum > (19.95f * sdSum))
606    memset(aec->wfBuf, 0, sizeof(aec->wfBuf));
607}
608
609// Window time domain data to be used by the fft.
610__inline static void WindowData(float* x_windowed, const float* x) {
611  int i;
612  for (i = 0; i < PART_LEN; i += 4) {
613    const __m128 vec_Buf1 = _mm_loadu_ps(&x[i]);
614    const __m128 vec_Buf2 = _mm_loadu_ps(&x[PART_LEN + i]);
615    const __m128 vec_sqrtHanning = _mm_load_ps(&WebRtcAec_sqrtHanning[i]);
616    // A B C D
617    __m128 vec_sqrtHanning_rev =
618        _mm_loadu_ps(&WebRtcAec_sqrtHanning[PART_LEN - i - 3]);
619    // D C B A
620    vec_sqrtHanning_rev =
621        _mm_shuffle_ps(vec_sqrtHanning_rev, vec_sqrtHanning_rev,
622                       _MM_SHUFFLE(0, 1, 2, 3));
623    _mm_storeu_ps(&x_windowed[i], _mm_mul_ps(vec_Buf1, vec_sqrtHanning));
624    _mm_storeu_ps(&x_windowed[PART_LEN + i],
625                  _mm_mul_ps(vec_Buf2, vec_sqrtHanning_rev));
626  }
627}
628
629// Puts fft output data into a complex valued array.
630__inline static void StoreAsComplex(const float* data,
631                                    float data_complex[2][PART_LEN1]) {
632  int i;
633  for (i = 0; i < PART_LEN; i += 4) {
634    const __m128 vec_fft0 = _mm_loadu_ps(&data[2 * i]);
635    const __m128 vec_fft4 = _mm_loadu_ps(&data[2 * i + 4]);
636    const __m128 vec_a = _mm_shuffle_ps(vec_fft0, vec_fft4,
637                                        _MM_SHUFFLE(2, 0, 2, 0));
638    const __m128 vec_b = _mm_shuffle_ps(vec_fft0, vec_fft4,
639                                        _MM_SHUFFLE(3, 1, 3, 1));
640    _mm_storeu_ps(&data_complex[0][i], vec_a);
641    _mm_storeu_ps(&data_complex[1][i], vec_b);
642  }
643  // fix beginning/end values
644  data_complex[1][0] = 0;
645  data_complex[1][PART_LEN] = 0;
646  data_complex[0][0] = data[0];
647  data_complex[0][PART_LEN] = data[1];
648}
649
650static void SubbandCoherenceSSE2(AecCore* aec,
651                                 float efw[2][PART_LEN1],
652                                 float xfw[2][PART_LEN1],
653                                 float* fft,
654                                 float* cohde,
655                                 float* cohxd) {
656  float dfw[2][PART_LEN1];
657  int i;
658
659  if (aec->delayEstCtr == 0)
660    aec->delayIdx = PartitionDelay(aec);
661
662  // Use delayed far.
663  memcpy(xfw,
664         aec->xfwBuf + aec->delayIdx * PART_LEN1,
665         sizeof(xfw[0][0]) * 2 * PART_LEN1);
666
667  // Windowed near fft
668  WindowData(fft, aec->dBuf);
669  aec_rdft_forward_128(fft);
670  StoreAsComplex(fft, dfw);
671
672  // Windowed error fft
673  WindowData(fft, aec->eBuf);
674  aec_rdft_forward_128(fft);
675  StoreAsComplex(fft, efw);
676
677  SmoothedPSD(aec, efw, dfw, xfw);
678
679  {
680    const __m128 vec_1eminus10 =  _mm_set1_ps(1e-10f);
681
682    // Subband coherence
683    for (i = 0; i + 3 < PART_LEN1; i += 4) {
684      const __m128 vec_sd = _mm_loadu_ps(&aec->sd[i]);
685      const __m128 vec_se = _mm_loadu_ps(&aec->se[i]);
686      const __m128 vec_sx = _mm_loadu_ps(&aec->sx[i]);
687      const __m128 vec_sdse = _mm_add_ps(vec_1eminus10,
688                                         _mm_mul_ps(vec_sd, vec_se));
689      const __m128 vec_sdsx = _mm_add_ps(vec_1eminus10,
690                                         _mm_mul_ps(vec_sd, vec_sx));
691      const __m128 vec_sde_3210 = _mm_loadu_ps(&aec->sde[i][0]);
692      const __m128 vec_sde_7654 = _mm_loadu_ps(&aec->sde[i + 2][0]);
693      const __m128 vec_sxd_3210 = _mm_loadu_ps(&aec->sxd[i][0]);
694      const __m128 vec_sxd_7654 = _mm_loadu_ps(&aec->sxd[i + 2][0]);
695      const __m128 vec_sde_0 = _mm_shuffle_ps(vec_sde_3210, vec_sde_7654,
696                                              _MM_SHUFFLE(2, 0, 2, 0));
697      const __m128 vec_sde_1 = _mm_shuffle_ps(vec_sde_3210, vec_sde_7654,
698                                              _MM_SHUFFLE(3, 1, 3, 1));
699      const __m128 vec_sxd_0 = _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654,
700                                              _MM_SHUFFLE(2, 0, 2, 0));
701      const __m128 vec_sxd_1 = _mm_shuffle_ps(vec_sxd_3210, vec_sxd_7654,
702                                              _MM_SHUFFLE(3, 1, 3, 1));
703      __m128 vec_cohde = _mm_mul_ps(vec_sde_0, vec_sde_0);
704      __m128 vec_cohxd = _mm_mul_ps(vec_sxd_0, vec_sxd_0);
705      vec_cohde = _mm_add_ps(vec_cohde, _mm_mul_ps(vec_sde_1, vec_sde_1));
706      vec_cohde = _mm_div_ps(vec_cohde, vec_sdse);
707      vec_cohxd = _mm_add_ps(vec_cohxd, _mm_mul_ps(vec_sxd_1, vec_sxd_1));
708      vec_cohxd = _mm_div_ps(vec_cohxd, vec_sdsx);
709      _mm_storeu_ps(&cohde[i], vec_cohde);
710      _mm_storeu_ps(&cohxd[i], vec_cohxd);
711    }
712
713    // scalar code for the remaining items.
714    for (; i < PART_LEN1; i++) {
715      cohde[i] =
716          (aec->sde[i][0] * aec->sde[i][0] + aec->sde[i][1] * aec->sde[i][1]) /
717          (aec->sd[i] * aec->se[i] + 1e-10f);
718      cohxd[i] =
719          (aec->sxd[i][0] * aec->sxd[i][0] + aec->sxd[i][1] * aec->sxd[i][1]) /
720          (aec->sx[i] * aec->sd[i] + 1e-10f);
721    }
722  }
723}
724
725void WebRtcAec_InitAec_SSE2(void) {
726  WebRtcAec_FilterFar = FilterFarSSE2;
727  WebRtcAec_ScaleErrorSignal = ScaleErrorSignalSSE2;
728  WebRtcAec_FilterAdaptation = FilterAdaptationSSE2;
729  WebRtcAec_OverdriveAndSuppress = OverdriveAndSuppressSSE2;
730  WebRtcAec_SubbandCoherence = SubbandCoherenceSSE2;
731}
732