1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/modules/audio_coding/neteq/merge.h"
12
13#include <assert.h>
14#include <string.h>  // memmove, memcpy, memset, size_t
15
16#include <algorithm>  // min, max
17
18#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
19#include "webrtc/modules/audio_coding/neteq/audio_multi_vector.h"
20#include "webrtc/modules/audio_coding/neteq/dsp_helper.h"
21#include "webrtc/modules/audio_coding/neteq/expand.h"
22#include "webrtc/modules/audio_coding/neteq/sync_buffer.h"
23#include "webrtc/system_wrappers/interface/scoped_ptr.h"
24
25namespace webrtc {
26
27int Merge::Process(int16_t* input, size_t input_length,
28                   int16_t* external_mute_factor_array,
29                   AudioMultiVector* output) {
30  // TODO(hlundin): Change to an enumerator and skip assert.
31  assert(fs_hz_ == 8000 || fs_hz_ == 16000 || fs_hz_ ==  32000 ||
32         fs_hz_ == 48000);
33  assert(fs_hz_ <= kMaxSampleRate);  // Should not be possible.
34
35  int old_length;
36  int expand_period;
37  // Get expansion data to overlap and mix with.
38  int expanded_length = GetExpandedSignal(&old_length, &expand_period);
39
40  // Transfer input signal to an AudioMultiVector.
41  AudioMultiVector input_vector(num_channels_);
42  input_vector.PushBackInterleaved(input, input_length);
43  size_t input_length_per_channel = input_vector.Size();
44  assert(input_length_per_channel == input_length / num_channels_);
45
46  int16_t best_correlation_index = 0;
47  size_t output_length = 0;
48
49  for (size_t channel = 0; channel < num_channels_; ++channel) {
50    int16_t* input_channel = &input_vector[channel][0];
51    int16_t* expanded_channel = &expanded_[channel][0];
52    int16_t expanded_max, input_max;
53    int16_t new_mute_factor = SignalScaling(
54        input_channel, static_cast<int>(input_length_per_channel),
55        expanded_channel, &expanded_max, &input_max);
56
57    // Adjust muting factor (product of "main" muting factor and expand muting
58    // factor).
59    int16_t* external_mute_factor = &external_mute_factor_array[channel];
60    *external_mute_factor =
61        (*external_mute_factor * expand_->MuteFactor(channel)) >> 14;
62
63    // Update |external_mute_factor| if it is lower than |new_mute_factor|.
64    if (new_mute_factor > *external_mute_factor) {
65      *external_mute_factor = std::min(new_mute_factor,
66                                       static_cast<int16_t>(16384));
67    }
68
69    if (channel == 0) {
70      // Downsample, correlate, and find strongest correlation period for the
71      // master (i.e., first) channel only.
72      // Downsample to 4kHz sample rate.
73      Downsample(input_channel, static_cast<int>(input_length_per_channel),
74                 expanded_channel, expanded_length);
75
76      // Calculate the lag of the strongest correlation period.
77      best_correlation_index = CorrelateAndPeakSearch(
78          expanded_max, input_max, old_length,
79          static_cast<int>(input_length_per_channel), expand_period);
80    }
81
82    static const int kTempDataSize = 3600;
83    int16_t temp_data[kTempDataSize];  // TODO(hlundin) Remove this.
84    int16_t* decoded_output = temp_data + best_correlation_index;
85
86    // Mute the new decoded data if needed (and unmute it linearly).
87    // This is the overlapping part of expanded_signal.
88    int interpolation_length = std::min(
89        kMaxCorrelationLength * fs_mult_,
90        expanded_length - best_correlation_index);
91    interpolation_length = std::min(interpolation_length,
92                                    static_cast<int>(input_length_per_channel));
93    if (*external_mute_factor < 16384) {
94      // Set a suitable muting slope (Q20). 0.004 for NB, 0.002 for WB,
95      // and so on.
96      int increment = 4194 / fs_mult_;
97      *external_mute_factor = DspHelper::RampSignal(input_channel,
98                                                    interpolation_length,
99                                                    *external_mute_factor,
100                                                    increment);
101      DspHelper::UnmuteSignal(&input_channel[interpolation_length],
102                              input_length_per_channel - interpolation_length,
103                              external_mute_factor, increment,
104                              &decoded_output[interpolation_length]);
105    } else {
106      // No muting needed.
107      memmove(
108          &decoded_output[interpolation_length],
109          &input_channel[interpolation_length],
110          sizeof(int16_t) * (input_length_per_channel - interpolation_length));
111    }
112
113    // Do overlap and mix linearly.
114    int increment = 16384 / (interpolation_length + 1);  // In Q14.
115    int16_t mute_factor = 16384 - increment;
116    memmove(temp_data, expanded_channel,
117            sizeof(int16_t) * best_correlation_index);
118    DspHelper::CrossFade(&expanded_channel[best_correlation_index],
119                         input_channel, interpolation_length,
120                         &mute_factor, increment, decoded_output);
121
122    output_length = best_correlation_index + input_length_per_channel;
123    if (channel == 0) {
124      assert(output->Empty());  // Output should be empty at this point.
125      output->AssertSize(output_length);
126    } else {
127      assert(output->Size() == output_length);
128    }
129    memcpy(&(*output)[channel][0], temp_data,
130           sizeof(temp_data[0]) * output_length);
131  }
132
133  // Copy back the first part of the data to |sync_buffer_| and remove it from
134  // |output|.
135  sync_buffer_->ReplaceAtIndex(*output, old_length, sync_buffer_->next_index());
136  output->PopFront(old_length);
137
138  // Return new added length. |old_length| samples were borrowed from
139  // |sync_buffer_|.
140  return static_cast<int>(output_length) - old_length;
141}
142
143int Merge::GetExpandedSignal(int* old_length, int* expand_period) {
144  // Check how much data that is left since earlier.
145  *old_length = static_cast<int>(sync_buffer_->FutureLength());
146  // Should never be less than overlap_length.
147  assert(*old_length >= static_cast<int>(expand_->overlap_length()));
148  // Generate data to merge the overlap with using expand.
149  expand_->SetParametersForMergeAfterExpand();
150
151  if (*old_length >= 210 * kMaxSampleRate / 8000) {
152    // TODO(hlundin): Write test case for this.
153    // The number of samples available in the sync buffer is more than what fits
154    // in expanded_signal. Keep the first 210 * kMaxSampleRate / 8000 samples,
155    // but shift them towards the end of the buffer. This is ok, since all of
156    // the buffer will be expand data anyway, so as long as the beginning is
157    // left untouched, we're fine.
158    int16_t length_diff = *old_length - 210 * kMaxSampleRate / 8000;
159    sync_buffer_->InsertZerosAtIndex(length_diff, sync_buffer_->next_index());
160    *old_length = 210 * kMaxSampleRate / 8000;
161    // This is the truncated length.
162  }
163  // This assert should always be true thanks to the if statement above.
164  assert(210 * kMaxSampleRate / 8000 - *old_length >= 0);
165
166  AudioMultiVector expanded_temp(num_channels_);
167  expand_->Process(&expanded_temp);
168  *expand_period = static_cast<int>(expanded_temp.Size());  // Samples per
169                                                            // channel.
170
171  expanded_.Clear();
172  // Copy what is left since earlier into the expanded vector.
173  expanded_.PushBackFromIndex(*sync_buffer_, sync_buffer_->next_index());
174  assert(expanded_.Size() == static_cast<size_t>(*old_length));
175  assert(expanded_temp.Size() > 0);
176  // Do "ugly" copy and paste from the expanded in order to generate more data
177  // to correlate (but not interpolate) with.
178  const int required_length = (120 + 80 + 2) * fs_mult_;
179  if (expanded_.Size() < static_cast<size_t>(required_length)) {
180    while (expanded_.Size() < static_cast<size_t>(required_length)) {
181      // Append one more pitch period each time.
182      expanded_.PushBack(expanded_temp);
183    }
184    // Trim the length to exactly |required_length|.
185    expanded_.PopBack(expanded_.Size() - required_length);
186  }
187  assert(expanded_.Size() >= static_cast<size_t>(required_length));
188  return required_length;
189}
190
191int16_t Merge::SignalScaling(const int16_t* input, int input_length,
192                             const int16_t* expanded_signal,
193                             int16_t* expanded_max, int16_t* input_max) const {
194  // Adjust muting factor if new vector is more or less of the BGN energy.
195  const int mod_input_length = std::min(64 * fs_mult_, input_length);
196  *expanded_max = WebRtcSpl_MaxAbsValueW16(expanded_signal, mod_input_length);
197  *input_max = WebRtcSpl_MaxAbsValueW16(input, mod_input_length);
198
199  // Calculate energy of expanded signal.
200  // |log_fs_mult| is log2(fs_mult_), but is not exact for 48000 Hz.
201  int log_fs_mult = 30 - WebRtcSpl_NormW32(fs_mult_);
202  int expanded_shift = 6 + log_fs_mult
203      - WebRtcSpl_NormW32(*expanded_max * *expanded_max);
204  expanded_shift = std::max(expanded_shift, 0);
205  int32_t energy_expanded = WebRtcSpl_DotProductWithScale(expanded_signal,
206                                                          expanded_signal,
207                                                          mod_input_length,
208                                                          expanded_shift);
209
210  // Calculate energy of input signal.
211  int input_shift = 6 + log_fs_mult -
212      WebRtcSpl_NormW32(*input_max * *input_max);
213  input_shift = std::max(input_shift, 0);
214  int32_t energy_input = WebRtcSpl_DotProductWithScale(input, input,
215                                                       mod_input_length,
216                                                       input_shift);
217
218  // Align to the same Q-domain.
219  if (input_shift > expanded_shift) {
220    energy_expanded = energy_expanded >> (input_shift - expanded_shift);
221  } else {
222    energy_input = energy_input >> (expanded_shift - input_shift);
223  }
224
225  // Calculate muting factor to use for new frame.
226  int16_t mute_factor;
227  if (energy_input > energy_expanded) {
228    // Normalize |energy_input| to 14 bits.
229    int16_t temp_shift = WebRtcSpl_NormW32(energy_input) - 17;
230    energy_input = WEBRTC_SPL_SHIFT_W32(energy_input, temp_shift);
231    // Put |energy_expanded| in a domain 14 higher, so that
232    // energy_expanded / energy_input is in Q14.
233    energy_expanded = WEBRTC_SPL_SHIFT_W32(energy_expanded, temp_shift + 14);
234    // Calculate sqrt(energy_expanded / energy_input) in Q14.
235    mute_factor = WebRtcSpl_SqrtFloor((energy_expanded / energy_input) << 14);
236  } else {
237    // Set to 1 (in Q14) when |expanded| has higher energy than |input|.
238    mute_factor = 16384;
239  }
240
241  return mute_factor;
242}
243
244// TODO(hlundin): There are some parameter values in this method that seem
245// strange. Compare with Expand::Correlation.
246void Merge::Downsample(const int16_t* input, int input_length,
247                       const int16_t* expanded_signal, int expanded_length) {
248  const int16_t* filter_coefficients;
249  int num_coefficients;
250  int decimation_factor = fs_hz_ / 4000;
251  static const int kCompensateDelay = 0;
252  int length_limit = fs_hz_ / 100;  // 10 ms in samples.
253  if (fs_hz_ == 8000) {
254    filter_coefficients = DspHelper::kDownsample8kHzTbl;
255    num_coefficients = 3;
256  } else if (fs_hz_ == 16000) {
257    filter_coefficients = DspHelper::kDownsample16kHzTbl;
258    num_coefficients = 5;
259  } else if (fs_hz_ == 32000) {
260    filter_coefficients = DspHelper::kDownsample32kHzTbl;
261    num_coefficients = 7;
262  } else {  // fs_hz_ == 48000
263    filter_coefficients = DspHelper::kDownsample48kHzTbl;
264    num_coefficients = 7;
265  }
266  int signal_offset = num_coefficients - 1;
267  WebRtcSpl_DownsampleFast(&expanded_signal[signal_offset],
268                           expanded_length - signal_offset,
269                           expanded_downsampled_, kExpandDownsampLength,
270                           filter_coefficients, num_coefficients,
271                           decimation_factor, kCompensateDelay);
272  if (input_length <= length_limit) {
273    // Not quite long enough, so we have to cheat a bit.
274    int16_t temp_len = input_length - signal_offset;
275    // TODO(hlundin): Should |downsamp_temp_len| be corrected for round-off
276    // errors? I.e., (temp_len + decimation_factor - 1) / decimation_factor?
277    int16_t downsamp_temp_len = temp_len / decimation_factor;
278    WebRtcSpl_DownsampleFast(&input[signal_offset], temp_len,
279                             input_downsampled_, downsamp_temp_len,
280                             filter_coefficients, num_coefficients,
281                             decimation_factor, kCompensateDelay);
282    memset(&input_downsampled_[downsamp_temp_len], 0,
283           sizeof(int16_t) * (kInputDownsampLength - downsamp_temp_len));
284  } else {
285    WebRtcSpl_DownsampleFast(&input[signal_offset],
286                             input_length - signal_offset, input_downsampled_,
287                             kInputDownsampLength, filter_coefficients,
288                             num_coefficients, decimation_factor,
289                             kCompensateDelay);
290  }
291}
292
293int16_t Merge::CorrelateAndPeakSearch(int16_t expanded_max, int16_t input_max,
294                                      int start_position, int input_length,
295                                      int expand_period) const {
296  // Calculate correlation without any normalization.
297  const int max_corr_length = kMaxCorrelationLength;
298  int stop_position_downsamp = std::min(
299      max_corr_length, expand_->max_lag() / (fs_mult_ * 2) + 1);
300  int16_t correlation_shift = 0;
301  if (expanded_max * input_max > 26843546) {
302    correlation_shift = 3;
303  }
304
305  int32_t correlation[kMaxCorrelationLength];
306  WebRtcSpl_CrossCorrelation(correlation, input_downsampled_,
307                             expanded_downsampled_, kInputDownsampLength,
308                             stop_position_downsamp, correlation_shift, 1);
309
310  // Normalize correlation to 14 bits and copy to a 16-bit array.
311  const int pad_length = static_cast<int>(expand_->overlap_length() - 1);
312  const int correlation_buffer_size = 2 * pad_length + kMaxCorrelationLength;
313  scoped_ptr<int16_t[]> correlation16(new int16_t[correlation_buffer_size]);
314  memset(correlation16.get(), 0, correlation_buffer_size * sizeof(int16_t));
315  int16_t* correlation_ptr = &correlation16[pad_length];
316  int32_t max_correlation = WebRtcSpl_MaxAbsValueW32(correlation,
317                                                     stop_position_downsamp);
318  int16_t norm_shift = std::max(0, 17 - WebRtcSpl_NormW32(max_correlation));
319  WebRtcSpl_VectorBitShiftW32ToW16(correlation_ptr, stop_position_downsamp,
320                                   correlation, norm_shift);
321
322  // Calculate allowed starting point for peak finding.
323  // The peak location bestIndex must fulfill two criteria:
324  // (1) w16_bestIndex + input_length <
325  //     timestamps_per_call_ + expand_->overlap_length();
326  // (2) w16_bestIndex + input_length < start_position.
327  int start_index = timestamps_per_call_ +
328      static_cast<int>(expand_->overlap_length());
329  start_index = std::max(start_position, start_index);
330  start_index = std::max(start_index - input_length, 0);
331  // Downscale starting index to 4kHz domain. (fs_mult_ * 2 = fs_hz_ / 4000.)
332  int start_index_downsamp = start_index / (fs_mult_ * 2);
333
334  // Calculate a modified |stop_position_downsamp| to account for the increased
335  // start index |start_index_downsamp| and the effective array length.
336  int modified_stop_pos =
337      std::min(stop_position_downsamp,
338               kMaxCorrelationLength + pad_length - start_index_downsamp);
339  int best_correlation_index;
340  int16_t best_correlation;
341  static const int kNumCorrelationCandidates = 1;
342  DspHelper::PeakDetection(&correlation_ptr[start_index_downsamp],
343                           modified_stop_pos, kNumCorrelationCandidates,
344                           fs_mult_, &best_correlation_index,
345                           &best_correlation);
346  // Compensate for modified start index.
347  best_correlation_index += start_index;
348
349  // Ensure that underrun does not occur for 10ms case => we have to get at
350  // least 10ms + overlap . (This should never happen thanks to the above
351  // modification of peak-finding starting point.)
352  while ((best_correlation_index + input_length) <
353      static_cast<int>(timestamps_per_call_ + expand_->overlap_length()) ||
354      best_correlation_index + input_length < start_position) {
355    assert(false);  // Should never happen.
356    best_correlation_index += expand_period;  // Jump one lag ahead.
357  }
358  return best_correlation_index;
359}
360
361int Merge::RequiredFutureSamples() {
362  return static_cast<int>(fs_hz_ / 100 * num_channels_);  // 10 ms.
363}
364
365
366}  // namespace webrtc
367