audio_processing_impl.cc revision 13b96ba90f72164134019cbfc07d4a47cf1fd091
1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#include "webrtc/modules/audio_processing/audio_processing_impl.h"
12
13#include <assert.h>
14#include <algorithm>
15
16#include "webrtc/base/checks.h"
17#include "webrtc/base/platform_file.h"
18#include "webrtc/common_audio/audio_converter.h"
19#include "webrtc/common_audio/channel_buffer.h"
20#include "webrtc/common_audio/include/audio_util.h"
21#include "webrtc/common_audio/signal_processing/include/signal_processing_library.h"
22extern "C" {
23#include "webrtc/modules/audio_processing/aec/aec_core.h"
24}
25#include "webrtc/modules/audio_processing/agc/agc_manager_direct.h"
26#include "webrtc/modules/audio_processing/audio_buffer.h"
27#include "webrtc/modules/audio_processing/beamformer/nonlinear_beamformer.h"
28#include "webrtc/modules/audio_processing/common.h"
29#include "webrtc/modules/audio_processing/echo_cancellation_impl.h"
30#include "webrtc/modules/audio_processing/echo_control_mobile_impl.h"
31#include "webrtc/modules/audio_processing/gain_control_impl.h"
32#include "webrtc/modules/audio_processing/high_pass_filter_impl.h"
33#include "webrtc/modules/audio_processing/intelligibility/intelligibility_enhancer.h"
34#include "webrtc/modules/audio_processing/level_estimator_impl.h"
35#include "webrtc/modules/audio_processing/noise_suppression_impl.h"
36#include "webrtc/modules/audio_processing/processing_component.h"
37#include "webrtc/modules/audio_processing/transient/transient_suppressor.h"
38#include "webrtc/modules/audio_processing/voice_detection_impl.h"
39#include "webrtc/modules/interface/module_common_types.h"
40#include "webrtc/system_wrappers/interface/critical_section_wrapper.h"
41#include "webrtc/system_wrappers/interface/file_wrapper.h"
42#include "webrtc/system_wrappers/interface/logging.h"
43#include "webrtc/system_wrappers/interface/metrics.h"
44
45#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
46// Files generated at build-time by the protobuf compiler.
47#ifdef WEBRTC_ANDROID_PLATFORM_BUILD
48#include "external/webrtc/webrtc/modules/audio_processing/debug.pb.h"
49#else
50#include "webrtc/audio_processing/debug.pb.h"
51#endif
52#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
53
54#define RETURN_ON_ERR(expr) \
55  do {                      \
56    int err = (expr);       \
57    if (err != kNoError) {  \
58      return err;           \
59    }                       \
60  } while (0)
61
62namespace webrtc {
63namespace {
64
65static bool LayoutHasKeyboard(AudioProcessing::ChannelLayout layout) {
66  switch (layout) {
67    case AudioProcessing::kMono:
68    case AudioProcessing::kStereo:
69      return false;
70    case AudioProcessing::kMonoAndKeyboard:
71    case AudioProcessing::kStereoAndKeyboard:
72      return true;
73  }
74
75  assert(false);
76  return false;
77}
78
79}  // namespace
80
81// Throughout webrtc, it's assumed that success is represented by zero.
82static_assert(AudioProcessing::kNoError == 0, "kNoError must be zero");
83
84// This class has two main functionalities:
85//
86// 1) It is returned instead of the real GainControl after the new AGC has been
87//    enabled in order to prevent an outside user from overriding compression
88//    settings. It doesn't do anything in its implementation, except for
89//    delegating the const methods and Enable calls to the real GainControl, so
90//    AGC can still be disabled.
91//
92// 2) It is injected into AgcManagerDirect and implements volume callbacks for
93//    getting and setting the volume level. It just caches this value to be used
94//    in VoiceEngine later.
95class GainControlForNewAgc : public GainControl, public VolumeCallbacks {
96 public:
97  explicit GainControlForNewAgc(GainControlImpl* gain_control)
98      : real_gain_control_(gain_control), volume_(0) {}
99
100  // GainControl implementation.
101  int Enable(bool enable) override {
102    return real_gain_control_->Enable(enable);
103  }
104  bool is_enabled() const override { return real_gain_control_->is_enabled(); }
105  int set_stream_analog_level(int level) override {
106    volume_ = level;
107    return AudioProcessing::kNoError;
108  }
109  int stream_analog_level() override { return volume_; }
110  int set_mode(Mode mode) override { return AudioProcessing::kNoError; }
111  Mode mode() const override { return GainControl::kAdaptiveAnalog; }
112  int set_target_level_dbfs(int level) override {
113    return AudioProcessing::kNoError;
114  }
115  int target_level_dbfs() const override {
116    return real_gain_control_->target_level_dbfs();
117  }
118  int set_compression_gain_db(int gain) override {
119    return AudioProcessing::kNoError;
120  }
121  int compression_gain_db() const override {
122    return real_gain_control_->compression_gain_db();
123  }
124  int enable_limiter(bool enable) override { return AudioProcessing::kNoError; }
125  bool is_limiter_enabled() const override {
126    return real_gain_control_->is_limiter_enabled();
127  }
128  int set_analog_level_limits(int minimum, int maximum) override {
129    return AudioProcessing::kNoError;
130  }
131  int analog_level_minimum() const override {
132    return real_gain_control_->analog_level_minimum();
133  }
134  int analog_level_maximum() const override {
135    return real_gain_control_->analog_level_maximum();
136  }
137  bool stream_is_saturated() const override {
138    return real_gain_control_->stream_is_saturated();
139  }
140
141  // VolumeCallbacks implementation.
142  void SetMicVolume(int volume) override { volume_ = volume; }
143  int GetMicVolume() override { return volume_; }
144
145 private:
146  GainControl* real_gain_control_;
147  int volume_;
148};
149
150const int AudioProcessing::kNativeSampleRatesHz[] = {
151    AudioProcessing::kSampleRate8kHz,
152    AudioProcessing::kSampleRate16kHz,
153    AudioProcessing::kSampleRate32kHz,
154    AudioProcessing::kSampleRate48kHz};
155const size_t AudioProcessing::kNumNativeSampleRates =
156    arraysize(AudioProcessing::kNativeSampleRatesHz);
157const int AudioProcessing::kMaxNativeSampleRateHz = AudioProcessing::
158    kNativeSampleRatesHz[AudioProcessing::kNumNativeSampleRates - 1];
159const int AudioProcessing::kMaxAECMSampleRateHz = kSampleRate16kHz;
160
161AudioProcessing* AudioProcessing::Create() {
162  Config config;
163  return Create(config, nullptr);
164}
165
166AudioProcessing* AudioProcessing::Create(const Config& config) {
167  return Create(config, nullptr);
168}
169
170AudioProcessing* AudioProcessing::Create(const Config& config,
171                                         Beamformer<float>* beamformer) {
172  AudioProcessingImpl* apm = new AudioProcessingImpl(config, beamformer);
173  if (apm->Initialize() != kNoError) {
174    delete apm;
175    apm = NULL;
176  }
177
178  return apm;
179}
180
181AudioProcessingImpl::AudioProcessingImpl(const Config& config)
182    : AudioProcessingImpl(config, nullptr) {}
183
184AudioProcessingImpl::AudioProcessingImpl(const Config& config,
185                                         Beamformer<float>* beamformer)
186    : echo_cancellation_(NULL),
187      echo_control_mobile_(NULL),
188      gain_control_(NULL),
189      high_pass_filter_(NULL),
190      level_estimator_(NULL),
191      noise_suppression_(NULL),
192      voice_detection_(NULL),
193      crit_(CriticalSectionWrapper::CreateCriticalSection()),
194#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
195      debug_file_(FileWrapper::Create()),
196      event_msg_(new audioproc::Event()),
197#endif
198      api_format_({{{kSampleRate16kHz, 1, false},
199                    {kSampleRate16kHz, 1, false},
200                    {kSampleRate16kHz, 1, false},
201                    {kSampleRate16kHz, 1, false}}}),
202      fwd_proc_format_(kSampleRate16kHz),
203      rev_proc_format_(kSampleRate16kHz, 1),
204      split_rate_(kSampleRate16kHz),
205      stream_delay_ms_(0),
206      delay_offset_ms_(0),
207      was_stream_delay_set_(false),
208      last_stream_delay_ms_(0),
209      last_aec_system_delay_ms_(0),
210      stream_delay_jumps_(-1),
211      aec_system_delay_jumps_(-1),
212      output_will_be_muted_(false),
213      key_pressed_(false),
214#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
215      use_new_agc_(false),
216#else
217      use_new_agc_(config.Get<ExperimentalAgc>().enabled),
218#endif
219      agc_startup_min_volume_(config.Get<ExperimentalAgc>().startup_min_volume),
220#if defined(WEBRTC_ANDROID) || defined(WEBRTC_IOS)
221      transient_suppressor_enabled_(false),
222#else
223      transient_suppressor_enabled_(config.Get<ExperimentalNs>().enabled),
224#endif
225      beamformer_enabled_(config.Get<Beamforming>().enabled),
226      beamformer_(beamformer),
227      array_geometry_(config.Get<Beamforming>().array_geometry),
228      intelligibility_enabled_(config.Get<Intelligibility>().enabled) {
229  echo_cancellation_ = new EchoCancellationImpl(this, crit_);
230  component_list_.push_back(echo_cancellation_);
231
232  echo_control_mobile_ = new EchoControlMobileImpl(this, crit_);
233  component_list_.push_back(echo_control_mobile_);
234
235  gain_control_ = new GainControlImpl(this, crit_);
236  component_list_.push_back(gain_control_);
237
238  high_pass_filter_ = new HighPassFilterImpl(this, crit_);
239  component_list_.push_back(high_pass_filter_);
240
241  level_estimator_ = new LevelEstimatorImpl(this, crit_);
242  component_list_.push_back(level_estimator_);
243
244  noise_suppression_ = new NoiseSuppressionImpl(this, crit_);
245  component_list_.push_back(noise_suppression_);
246
247  voice_detection_ = new VoiceDetectionImpl(this, crit_);
248  component_list_.push_back(voice_detection_);
249
250  gain_control_for_new_agc_.reset(new GainControlForNewAgc(gain_control_));
251
252  SetExtraOptions(config);
253}
254
255AudioProcessingImpl::~AudioProcessingImpl() {
256  {
257    CriticalSectionScoped crit_scoped(crit_);
258    // Depends on gain_control_ and gain_control_for_new_agc_.
259    agc_manager_.reset();
260    // Depends on gain_control_.
261    gain_control_for_new_agc_.reset();
262    while (!component_list_.empty()) {
263      ProcessingComponent* component = component_list_.front();
264      component->Destroy();
265      delete component;
266      component_list_.pop_front();
267    }
268
269#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
270    if (debug_file_->Open()) {
271      debug_file_->CloseFile();
272    }
273#endif
274  }
275  delete crit_;
276  crit_ = NULL;
277}
278
279int AudioProcessingImpl::Initialize() {
280  CriticalSectionScoped crit_scoped(crit_);
281  return InitializeLocked();
282}
283
284int AudioProcessingImpl::Initialize(int input_sample_rate_hz,
285                                    int output_sample_rate_hz,
286                                    int reverse_sample_rate_hz,
287                                    ChannelLayout input_layout,
288                                    ChannelLayout output_layout,
289                                    ChannelLayout reverse_layout) {
290  const ProcessingConfig processing_config = {
291      {{input_sample_rate_hz,
292        ChannelsFromLayout(input_layout),
293        LayoutHasKeyboard(input_layout)},
294       {output_sample_rate_hz,
295        ChannelsFromLayout(output_layout),
296        LayoutHasKeyboard(output_layout)},
297       {reverse_sample_rate_hz,
298        ChannelsFromLayout(reverse_layout),
299        LayoutHasKeyboard(reverse_layout)},
300       {reverse_sample_rate_hz,
301        ChannelsFromLayout(reverse_layout),
302        LayoutHasKeyboard(reverse_layout)}}};
303
304  return Initialize(processing_config);
305}
306
307int AudioProcessingImpl::Initialize(const ProcessingConfig& processing_config) {
308  CriticalSectionScoped crit_scoped(crit_);
309  return InitializeLocked(processing_config);
310}
311
312int AudioProcessingImpl::InitializeLocked() {
313  const int fwd_audio_buffer_channels =
314      beamformer_enabled_ ? api_format_.input_stream().num_channels()
315                          : api_format_.output_stream().num_channels();
316  const int rev_audio_buffer_out_num_frames =
317      api_format_.reverse_output_stream().num_frames() == 0
318          ? rev_proc_format_.num_frames()
319          : api_format_.reverse_output_stream().num_frames();
320  if (api_format_.reverse_input_stream().num_channels() > 0) {
321    render_audio_.reset(new AudioBuffer(
322        api_format_.reverse_input_stream().num_frames(),
323        api_format_.reverse_input_stream().num_channels(),
324        rev_proc_format_.num_frames(), rev_proc_format_.num_channels(),
325        rev_audio_buffer_out_num_frames));
326    if (rev_conversion_needed()) {
327      render_converter_ = AudioConverter::Create(
328          api_format_.reverse_input_stream().num_channels(),
329          api_format_.reverse_input_stream().num_frames(),
330          api_format_.reverse_output_stream().num_channels(),
331          api_format_.reverse_output_stream().num_frames());
332    } else {
333      render_converter_.reset(nullptr);
334    }
335  } else {
336    render_audio_.reset(nullptr);
337    render_converter_.reset(nullptr);
338  }
339  capture_audio_.reset(new AudioBuffer(
340      api_format_.input_stream().num_frames(),
341      api_format_.input_stream().num_channels(), fwd_proc_format_.num_frames(),
342      fwd_audio_buffer_channels, api_format_.output_stream().num_frames()));
343
344  // Initialize all components.
345  for (auto item : component_list_) {
346    int err = item->Initialize();
347    if (err != kNoError) {
348      return err;
349    }
350  }
351
352  InitializeExperimentalAgc();
353
354  InitializeTransient();
355
356  InitializeBeamformer();
357
358  InitializeIntelligibility();
359
360#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
361  if (debug_file_->Open()) {
362    int err = WriteInitMessage();
363    if (err != kNoError) {
364      return err;
365    }
366  }
367#endif
368
369  return kNoError;
370}
371
372int AudioProcessingImpl::InitializeLocked(const ProcessingConfig& config) {
373  for (const auto& stream : config.streams) {
374    if (stream.num_channels() < 0) {
375      return kBadNumberChannelsError;
376    }
377    if (stream.num_channels() > 0 && stream.sample_rate_hz() <= 0) {
378      return kBadSampleRateError;
379    }
380  }
381
382  const int num_in_channels = config.input_stream().num_channels();
383  const int num_out_channels = config.output_stream().num_channels();
384
385  // Need at least one input channel.
386  // Need either one output channel or as many outputs as there are inputs.
387  if (num_in_channels == 0 ||
388      !(num_out_channels == 1 || num_out_channels == num_in_channels)) {
389    return kBadNumberChannelsError;
390  }
391
392  if (beamformer_enabled_ &&
393      (static_cast<size_t>(num_in_channels) != array_geometry_.size() ||
394       num_out_channels > 1)) {
395    return kBadNumberChannelsError;
396  }
397
398  api_format_ = config;
399
400  // We process at the closest native rate >= min(input rate, output rate)...
401  const int min_proc_rate =
402      std::min(api_format_.input_stream().sample_rate_hz(),
403               api_format_.output_stream().sample_rate_hz());
404  int fwd_proc_rate;
405  for (size_t i = 0; i < kNumNativeSampleRates; ++i) {
406    fwd_proc_rate = kNativeSampleRatesHz[i];
407    if (fwd_proc_rate >= min_proc_rate) {
408      break;
409    }
410  }
411  // ...with one exception.
412  if (echo_control_mobile_->is_enabled() &&
413      min_proc_rate > kMaxAECMSampleRateHz) {
414    fwd_proc_rate = kMaxAECMSampleRateHz;
415  }
416
417  fwd_proc_format_ = StreamConfig(fwd_proc_rate);
418
419  // We normally process the reverse stream at 16 kHz. Unless...
420  int rev_proc_rate = kSampleRate16kHz;
421  if (fwd_proc_format_.sample_rate_hz() == kSampleRate8kHz) {
422    // ...the forward stream is at 8 kHz.
423    rev_proc_rate = kSampleRate8kHz;
424  } else {
425    if (api_format_.reverse_input_stream().sample_rate_hz() ==
426        kSampleRate32kHz) {
427      // ...or the input is at 32 kHz, in which case we use the splitting
428      // filter rather than the resampler.
429      rev_proc_rate = kSampleRate32kHz;
430    }
431  }
432
433  // Always downmix the reverse stream to mono for analysis. This has been
434  // demonstrated to work well for AEC in most practical scenarios.
435  rev_proc_format_ = StreamConfig(rev_proc_rate, 1);
436
437  if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
438      fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
439    split_rate_ = kSampleRate16kHz;
440  } else {
441    split_rate_ = fwd_proc_format_.sample_rate_hz();
442  }
443
444  return InitializeLocked();
445}
446
447// Calls InitializeLocked() if any of the audio parameters have changed from
448// their current values.
449int AudioProcessingImpl::MaybeInitializeLocked(
450    const ProcessingConfig& processing_config) {
451  if (processing_config == api_format_) {
452    return kNoError;
453  }
454  return InitializeLocked(processing_config);
455}
456
457void AudioProcessingImpl::SetExtraOptions(const Config& config) {
458  CriticalSectionScoped crit_scoped(crit_);
459  for (auto item : component_list_) {
460    item->SetExtraOptions(config);
461  }
462
463  if (transient_suppressor_enabled_ != config.Get<ExperimentalNs>().enabled) {
464    transient_suppressor_enabled_ = config.Get<ExperimentalNs>().enabled;
465    InitializeTransient();
466  }
467}
468
469
470int AudioProcessingImpl::proc_sample_rate_hz() const {
471  return fwd_proc_format_.sample_rate_hz();
472}
473
474int AudioProcessingImpl::proc_split_sample_rate_hz() const {
475  return split_rate_;
476}
477
478int AudioProcessingImpl::num_reverse_channels() const {
479  return rev_proc_format_.num_channels();
480}
481
482int AudioProcessingImpl::num_input_channels() const {
483  return api_format_.input_stream().num_channels();
484}
485
486int AudioProcessingImpl::num_output_channels() const {
487  return api_format_.output_stream().num_channels();
488}
489
490void AudioProcessingImpl::set_output_will_be_muted(bool muted) {
491  CriticalSectionScoped lock(crit_);
492  output_will_be_muted_ = muted;
493  if (agc_manager_.get()) {
494    agc_manager_->SetCaptureMuted(output_will_be_muted_);
495  }
496}
497
498
499int AudioProcessingImpl::ProcessStream(const float* const* src,
500                                       size_t samples_per_channel,
501                                       int input_sample_rate_hz,
502                                       ChannelLayout input_layout,
503                                       int output_sample_rate_hz,
504                                       ChannelLayout output_layout,
505                                       float* const* dest) {
506  CriticalSectionScoped crit_scoped(crit_);
507  StreamConfig input_stream = api_format_.input_stream();
508  input_stream.set_sample_rate_hz(input_sample_rate_hz);
509  input_stream.set_num_channels(ChannelsFromLayout(input_layout));
510  input_stream.set_has_keyboard(LayoutHasKeyboard(input_layout));
511
512  StreamConfig output_stream = api_format_.output_stream();
513  output_stream.set_sample_rate_hz(output_sample_rate_hz);
514  output_stream.set_num_channels(ChannelsFromLayout(output_layout));
515  output_stream.set_has_keyboard(LayoutHasKeyboard(output_layout));
516
517  if (samples_per_channel != input_stream.num_frames()) {
518    return kBadDataLengthError;
519  }
520  return ProcessStream(src, input_stream, output_stream, dest);
521}
522
523int AudioProcessingImpl::ProcessStream(const float* const* src,
524                                       const StreamConfig& input_config,
525                                       const StreamConfig& output_config,
526                                       float* const* dest) {
527  CriticalSectionScoped crit_scoped(crit_);
528  if (!src || !dest) {
529    return kNullPointerError;
530  }
531
532  ProcessingConfig processing_config = api_format_;
533  processing_config.input_stream() = input_config;
534  processing_config.output_stream() = output_config;
535
536  RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
537  assert(processing_config.input_stream().num_frames() ==
538         api_format_.input_stream().num_frames());
539
540#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
541  if (debug_file_->Open()) {
542    RETURN_ON_ERR(WriteConfigMessage(false));
543
544    event_msg_->set_type(audioproc::Event::STREAM);
545    audioproc::Stream* msg = event_msg_->mutable_stream();
546    const size_t channel_size =
547        sizeof(float) * api_format_.input_stream().num_frames();
548    for (int i = 0; i < api_format_.input_stream().num_channels(); ++i)
549      msg->add_input_channel(src[i], channel_size);
550  }
551#endif
552
553  capture_audio_->CopyFrom(src, api_format_.input_stream());
554  RETURN_ON_ERR(ProcessStreamLocked());
555  capture_audio_->CopyTo(api_format_.output_stream(), dest);
556
557#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
558  if (debug_file_->Open()) {
559    audioproc::Stream* msg = event_msg_->mutable_stream();
560    const size_t channel_size =
561        sizeof(float) * api_format_.output_stream().num_frames();
562    for (int i = 0; i < api_format_.output_stream().num_channels(); ++i)
563      msg->add_output_channel(dest[i], channel_size);
564    RETURN_ON_ERR(WriteMessageToDebugFile());
565  }
566#endif
567
568  return kNoError;
569}
570
571int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
572  CriticalSectionScoped crit_scoped(crit_);
573  if (!frame) {
574    return kNullPointerError;
575  }
576  // Must be a native rate.
577  if (frame->sample_rate_hz_ != kSampleRate8kHz &&
578      frame->sample_rate_hz_ != kSampleRate16kHz &&
579      frame->sample_rate_hz_ != kSampleRate32kHz &&
580      frame->sample_rate_hz_ != kSampleRate48kHz) {
581    return kBadSampleRateError;
582  }
583  if (echo_control_mobile_->is_enabled() &&
584      frame->sample_rate_hz_ > kMaxAECMSampleRateHz) {
585    LOG(LS_ERROR) << "AECM only supports 16 or 8 kHz sample rates";
586    return kUnsupportedComponentError;
587  }
588
589  // TODO(ajm): The input and output rates and channels are currently
590  // constrained to be identical in the int16 interface.
591  ProcessingConfig processing_config = api_format_;
592  processing_config.input_stream().set_sample_rate_hz(frame->sample_rate_hz_);
593  processing_config.input_stream().set_num_channels(frame->num_channels_);
594  processing_config.output_stream().set_sample_rate_hz(frame->sample_rate_hz_);
595  processing_config.output_stream().set_num_channels(frame->num_channels_);
596
597  RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
598  if (frame->samples_per_channel_ != api_format_.input_stream().num_frames()) {
599    return kBadDataLengthError;
600  }
601
602#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
603  if (debug_file_->Open()) {
604    event_msg_->set_type(audioproc::Event::STREAM);
605    audioproc::Stream* msg = event_msg_->mutable_stream();
606    const size_t data_size =
607        sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
608    msg->set_input_data(frame->data_, data_size);
609  }
610#endif
611
612  capture_audio_->DeinterleaveFrom(frame);
613  RETURN_ON_ERR(ProcessStreamLocked());
614  capture_audio_->InterleaveTo(frame, output_copy_needed(is_data_processed()));
615
616#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
617  if (debug_file_->Open()) {
618    audioproc::Stream* msg = event_msg_->mutable_stream();
619    const size_t data_size =
620        sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
621    msg->set_output_data(frame->data_, data_size);
622    RETURN_ON_ERR(WriteMessageToDebugFile());
623  }
624#endif
625
626  return kNoError;
627}
628
629int AudioProcessingImpl::ProcessStreamLocked() {
630#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
631  if (debug_file_->Open()) {
632    audioproc::Stream* msg = event_msg_->mutable_stream();
633    msg->set_delay(stream_delay_ms_);
634    msg->set_drift(echo_cancellation_->stream_drift_samples());
635    msg->set_level(gain_control()->stream_analog_level());
636    msg->set_keypress(key_pressed_);
637  }
638#endif
639
640  MaybeUpdateHistograms();
641
642  AudioBuffer* ca = capture_audio_.get();  // For brevity.
643
644  if (use_new_agc_ && gain_control_->is_enabled()) {
645    agc_manager_->AnalyzePreProcess(ca->channels()[0], ca->num_channels(),
646                                    fwd_proc_format_.num_frames());
647  }
648
649  bool data_processed = is_data_processed();
650  if (analysis_needed(data_processed)) {
651    ca->SplitIntoFrequencyBands();
652  }
653
654  if (intelligibility_enabled_) {
655    intelligibility_enhancer_->AnalyzeCaptureAudio(
656        ca->split_channels_f(kBand0To8kHz), split_rate_, ca->num_channels());
657  }
658
659  if (beamformer_enabled_) {
660    beamformer_->ProcessChunk(*ca->split_data_f(), ca->split_data_f());
661    ca->set_num_channels(1);
662  }
663
664  RETURN_ON_ERR(high_pass_filter_->ProcessCaptureAudio(ca));
665  RETURN_ON_ERR(gain_control_->AnalyzeCaptureAudio(ca));
666  RETURN_ON_ERR(noise_suppression_->AnalyzeCaptureAudio(ca));
667  RETURN_ON_ERR(echo_cancellation_->ProcessCaptureAudio(ca));
668
669  if (echo_control_mobile_->is_enabled() && noise_suppression_->is_enabled()) {
670    ca->CopyLowPassToReference();
671  }
672  RETURN_ON_ERR(noise_suppression_->ProcessCaptureAudio(ca));
673  RETURN_ON_ERR(echo_control_mobile_->ProcessCaptureAudio(ca));
674  RETURN_ON_ERR(voice_detection_->ProcessCaptureAudio(ca));
675
676  if (use_new_agc_ && gain_control_->is_enabled() &&
677      (!beamformer_enabled_ || beamformer_->is_target_present())) {
678    agc_manager_->Process(ca->split_bands_const(0)[kBand0To8kHz],
679                          ca->num_frames_per_band(), split_rate_);
680  }
681  RETURN_ON_ERR(gain_control_->ProcessCaptureAudio(ca));
682
683  if (synthesis_needed(data_processed)) {
684    ca->MergeFrequencyBands();
685  }
686
687  // TODO(aluebs): Investigate if the transient suppression placement should be
688  // before or after the AGC.
689  if (transient_suppressor_enabled_) {
690    float voice_probability =
691        agc_manager_.get() ? agc_manager_->voice_probability() : 1.f;
692
693    transient_suppressor_->Suppress(
694        ca->channels_f()[0], ca->num_frames(), ca->num_channels(),
695        ca->split_bands_const_f(0)[kBand0To8kHz], ca->num_frames_per_band(),
696        ca->keyboard_data(), ca->num_keyboard_frames(), voice_probability,
697        key_pressed_);
698  }
699
700  // The level estimator operates on the recombined data.
701  RETURN_ON_ERR(level_estimator_->ProcessStream(ca));
702
703  was_stream_delay_set_ = false;
704  return kNoError;
705}
706
707int AudioProcessingImpl::AnalyzeReverseStream(const float* const* data,
708                                              size_t samples_per_channel,
709                                              int rev_sample_rate_hz,
710                                              ChannelLayout layout) {
711  const StreamConfig reverse_config = {
712      rev_sample_rate_hz, ChannelsFromLayout(layout), LayoutHasKeyboard(layout),
713  };
714  if (samples_per_channel != reverse_config.num_frames()) {
715    return kBadDataLengthError;
716  }
717  return AnalyzeReverseStream(data, reverse_config, reverse_config);
718}
719
720int AudioProcessingImpl::ProcessReverseStream(
721    const float* const* src,
722    const StreamConfig& reverse_input_config,
723    const StreamConfig& reverse_output_config,
724    float* const* dest) {
725  RETURN_ON_ERR(
726      AnalyzeReverseStream(src, reverse_input_config, reverse_output_config));
727  if (is_rev_processed()) {
728    render_audio_->CopyTo(api_format_.reverse_output_stream(), dest);
729  } else if (rev_conversion_needed()) {
730    render_converter_->Convert(src, reverse_input_config.num_samples(), dest,
731                               reverse_output_config.num_samples());
732  } else {
733    CopyAudioIfNeeded(src, reverse_input_config.num_frames(),
734                      reverse_input_config.num_channels(), dest);
735  }
736
737  return kNoError;
738}
739
740int AudioProcessingImpl::AnalyzeReverseStream(
741    const float* const* src,
742    const StreamConfig& reverse_input_config,
743    const StreamConfig& reverse_output_config) {
744  CriticalSectionScoped crit_scoped(crit_);
745  if (src == NULL) {
746    return kNullPointerError;
747  }
748
749  if (reverse_input_config.num_channels() <= 0) {
750    return kBadNumberChannelsError;
751  }
752
753  ProcessingConfig processing_config = api_format_;
754  processing_config.reverse_input_stream() = reverse_input_config;
755  processing_config.reverse_output_stream() = reverse_output_config;
756
757  RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
758  assert(reverse_input_config.num_frames() ==
759         api_format_.reverse_input_stream().num_frames());
760
761#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
762  if (debug_file_->Open()) {
763    event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
764    audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
765    const size_t channel_size =
766        sizeof(float) * api_format_.reverse_input_stream().num_frames();
767    for (int i = 0; i < api_format_.reverse_input_stream().num_channels(); ++i)
768      msg->add_channel(src[i], channel_size);
769    RETURN_ON_ERR(WriteMessageToDebugFile());
770  }
771#endif
772
773  render_audio_->CopyFrom(src, api_format_.reverse_input_stream());
774  return ProcessReverseStreamLocked();
775}
776
777int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
778  RETURN_ON_ERR(AnalyzeReverseStream(frame));
779  if (is_rev_processed()) {
780    render_audio_->InterleaveTo(frame, true);
781  }
782
783  return kNoError;
784}
785
786int AudioProcessingImpl::AnalyzeReverseStream(AudioFrame* frame) {
787  CriticalSectionScoped crit_scoped(crit_);
788  if (frame == NULL) {
789    return kNullPointerError;
790  }
791  // Must be a native rate.
792  if (frame->sample_rate_hz_ != kSampleRate8kHz &&
793      frame->sample_rate_hz_ != kSampleRate16kHz &&
794      frame->sample_rate_hz_ != kSampleRate32kHz &&
795      frame->sample_rate_hz_ != kSampleRate48kHz) {
796    return kBadSampleRateError;
797  }
798  // This interface does not tolerate different forward and reverse rates.
799  if (frame->sample_rate_hz_ != api_format_.input_stream().sample_rate_hz()) {
800    return kBadSampleRateError;
801  }
802
803  if (frame->num_channels_ <= 0) {
804    return kBadNumberChannelsError;
805  }
806
807  ProcessingConfig processing_config = api_format_;
808  processing_config.reverse_input_stream().set_sample_rate_hz(
809      frame->sample_rate_hz_);
810  processing_config.reverse_input_stream().set_num_channels(
811      frame->num_channels_);
812  processing_config.reverse_output_stream().set_sample_rate_hz(
813      frame->sample_rate_hz_);
814  processing_config.reverse_output_stream().set_num_channels(
815      frame->num_channels_);
816
817  RETURN_ON_ERR(MaybeInitializeLocked(processing_config));
818  if (frame->samples_per_channel_ !=
819      api_format_.reverse_input_stream().num_frames()) {
820    return kBadDataLengthError;
821  }
822
823#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
824  if (debug_file_->Open()) {
825    event_msg_->set_type(audioproc::Event::REVERSE_STREAM);
826    audioproc::ReverseStream* msg = event_msg_->mutable_reverse_stream();
827    const size_t data_size =
828        sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
829    msg->set_data(frame->data_, data_size);
830    RETURN_ON_ERR(WriteMessageToDebugFile());
831  }
832#endif
833  render_audio_->DeinterleaveFrom(frame);
834  return ProcessReverseStreamLocked();
835}
836
837int AudioProcessingImpl::ProcessReverseStreamLocked() {
838  AudioBuffer* ra = render_audio_.get();  // For brevity.
839  if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz) {
840    ra->SplitIntoFrequencyBands();
841  }
842
843  if (intelligibility_enabled_) {
844    intelligibility_enhancer_->ProcessRenderAudio(
845        ra->split_channels_f(kBand0To8kHz), split_rate_, ra->num_channels());
846  }
847
848  RETURN_ON_ERR(echo_cancellation_->ProcessRenderAudio(ra));
849  RETURN_ON_ERR(echo_control_mobile_->ProcessRenderAudio(ra));
850  if (!use_new_agc_) {
851    RETURN_ON_ERR(gain_control_->ProcessRenderAudio(ra));
852  }
853
854  if (rev_proc_format_.sample_rate_hz() == kSampleRate32kHz &&
855      is_rev_processed()) {
856    ra->MergeFrequencyBands();
857  }
858
859  return kNoError;
860}
861
862int AudioProcessingImpl::set_stream_delay_ms(int delay) {
863  Error retval = kNoError;
864  was_stream_delay_set_ = true;
865  delay += delay_offset_ms_;
866
867  if (delay < 0) {
868    delay = 0;
869    retval = kBadStreamParameterWarning;
870  }
871
872  // TODO(ajm): the max is rather arbitrarily chosen; investigate.
873  if (delay > 500) {
874    delay = 500;
875    retval = kBadStreamParameterWarning;
876  }
877
878  stream_delay_ms_ = delay;
879  return retval;
880}
881
882int AudioProcessingImpl::stream_delay_ms() const {
883  return stream_delay_ms_;
884}
885
886bool AudioProcessingImpl::was_stream_delay_set() const {
887  return was_stream_delay_set_;
888}
889
890void AudioProcessingImpl::set_stream_key_pressed(bool key_pressed) {
891  key_pressed_ = key_pressed;
892}
893
894void AudioProcessingImpl::set_delay_offset_ms(int offset) {
895  CriticalSectionScoped crit_scoped(crit_);
896  delay_offset_ms_ = offset;
897}
898
899int AudioProcessingImpl::delay_offset_ms() const {
900  return delay_offset_ms_;
901}
902
903int AudioProcessingImpl::StartDebugRecording(
904    const char filename[AudioProcessing::kMaxFilenameSize]) {
905  CriticalSectionScoped crit_scoped(crit_);
906  static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
907
908  if (filename == NULL) {
909    return kNullPointerError;
910  }
911
912#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
913  // Stop any ongoing recording.
914  if (debug_file_->Open()) {
915    if (debug_file_->CloseFile() == -1) {
916      return kFileError;
917    }
918  }
919
920  if (debug_file_->OpenFile(filename, false) == -1) {
921    debug_file_->CloseFile();
922    return kFileError;
923  }
924
925  RETURN_ON_ERR(WriteConfigMessage(true));
926  RETURN_ON_ERR(WriteInitMessage());
927  return kNoError;
928#else
929  return kUnsupportedFunctionError;
930#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
931}
932
933int AudioProcessingImpl::StartDebugRecording(FILE* handle) {
934  CriticalSectionScoped crit_scoped(crit_);
935
936  if (handle == NULL) {
937    return kNullPointerError;
938  }
939
940#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
941  // Stop any ongoing recording.
942  if (debug_file_->Open()) {
943    if (debug_file_->CloseFile() == -1) {
944      return kFileError;
945    }
946  }
947
948  if (debug_file_->OpenFromFileHandle(handle, true, false) == -1) {
949    return kFileError;
950  }
951
952  RETURN_ON_ERR(WriteConfigMessage(true));
953  RETURN_ON_ERR(WriteInitMessage());
954  return kNoError;
955#else
956  return kUnsupportedFunctionError;
957#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
958}
959
960int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
961    rtc::PlatformFile handle) {
962  FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
963  return StartDebugRecording(stream);
964}
965
966int AudioProcessingImpl::StopDebugRecording() {
967  CriticalSectionScoped crit_scoped(crit_);
968
969#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
970  // We just return if recording hasn't started.
971  if (debug_file_->Open()) {
972    if (debug_file_->CloseFile() == -1) {
973      return kFileError;
974    }
975  }
976  return kNoError;
977#else
978  return kUnsupportedFunctionError;
979#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
980}
981
982EchoCancellation* AudioProcessingImpl::echo_cancellation() const {
983  return echo_cancellation_;
984}
985
986EchoControlMobile* AudioProcessingImpl::echo_control_mobile() const {
987  return echo_control_mobile_;
988}
989
990GainControl* AudioProcessingImpl::gain_control() const {
991  if (use_new_agc_) {
992    return gain_control_for_new_agc_.get();
993  }
994  return gain_control_;
995}
996
997HighPassFilter* AudioProcessingImpl::high_pass_filter() const {
998  return high_pass_filter_;
999}
1000
1001LevelEstimator* AudioProcessingImpl::level_estimator() const {
1002  return level_estimator_;
1003}
1004
1005NoiseSuppression* AudioProcessingImpl::noise_suppression() const {
1006  return noise_suppression_;
1007}
1008
1009VoiceDetection* AudioProcessingImpl::voice_detection() const {
1010  return voice_detection_;
1011}
1012
1013bool AudioProcessingImpl::is_data_processed() const {
1014  if (beamformer_enabled_) {
1015    return true;
1016  }
1017
1018  int enabled_count = 0;
1019  for (auto item : component_list_) {
1020    if (item->is_component_enabled()) {
1021      enabled_count++;
1022    }
1023  }
1024
1025  // Data is unchanged if no components are enabled, or if only level_estimator_
1026  // or voice_detection_ is enabled.
1027  if (enabled_count == 0) {
1028    return false;
1029  } else if (enabled_count == 1) {
1030    if (level_estimator_->is_enabled() || voice_detection_->is_enabled()) {
1031      return false;
1032    }
1033  } else if (enabled_count == 2) {
1034    if (level_estimator_->is_enabled() && voice_detection_->is_enabled()) {
1035      return false;
1036    }
1037  }
1038  return true;
1039}
1040
1041bool AudioProcessingImpl::output_copy_needed(bool is_data_processed) const {
1042  // Check if we've upmixed or downmixed the audio.
1043  return ((api_format_.output_stream().num_channels() !=
1044           api_format_.input_stream().num_channels()) ||
1045          is_data_processed || transient_suppressor_enabled_);
1046}
1047
1048bool AudioProcessingImpl::synthesis_needed(bool is_data_processed) const {
1049  return (is_data_processed &&
1050          (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
1051           fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz));
1052}
1053
1054bool AudioProcessingImpl::analysis_needed(bool is_data_processed) const {
1055  if (!is_data_processed && !voice_detection_->is_enabled() &&
1056      !transient_suppressor_enabled_) {
1057    // Only level_estimator_ is enabled.
1058    return false;
1059  } else if (fwd_proc_format_.sample_rate_hz() == kSampleRate32kHz ||
1060             fwd_proc_format_.sample_rate_hz() == kSampleRate48kHz) {
1061    // Something besides level_estimator_ is enabled, and we have super-wb.
1062    return true;
1063  }
1064  return false;
1065}
1066
1067bool AudioProcessingImpl::is_rev_processed() const {
1068  return intelligibility_enabled_ && intelligibility_enhancer_->active();
1069}
1070
1071bool AudioProcessingImpl::rev_conversion_needed() const {
1072  return (api_format_.reverse_input_stream() !=
1073          api_format_.reverse_output_stream());
1074}
1075
1076void AudioProcessingImpl::InitializeExperimentalAgc() {
1077  if (use_new_agc_) {
1078    if (!agc_manager_.get()) {
1079      agc_manager_.reset(new AgcManagerDirect(gain_control_,
1080                                              gain_control_for_new_agc_.get(),
1081                                              agc_startup_min_volume_));
1082    }
1083    agc_manager_->Initialize();
1084    agc_manager_->SetCaptureMuted(output_will_be_muted_);
1085  }
1086}
1087
1088void AudioProcessingImpl::InitializeTransient() {
1089  if (transient_suppressor_enabled_) {
1090    if (!transient_suppressor_.get()) {
1091      transient_suppressor_.reset(new TransientSuppressor());
1092    }
1093    transient_suppressor_->Initialize(
1094        fwd_proc_format_.sample_rate_hz(), split_rate_,
1095        api_format_.output_stream().num_channels());
1096  }
1097}
1098
1099void AudioProcessingImpl::InitializeBeamformer() {
1100  if (beamformer_enabled_) {
1101    if (!beamformer_) {
1102      beamformer_.reset(new NonlinearBeamformer(array_geometry_));
1103    }
1104    beamformer_->Initialize(kChunkSizeMs, split_rate_);
1105  }
1106}
1107
1108void AudioProcessingImpl::InitializeIntelligibility() {
1109  if (intelligibility_enabled_) {
1110    IntelligibilityEnhancer::Config config;
1111    config.sample_rate_hz = split_rate_;
1112    config.num_capture_channels = capture_audio_->num_channels();
1113    config.num_render_channels = render_audio_->num_channels();
1114    intelligibility_enhancer_.reset(new IntelligibilityEnhancer(config));
1115  }
1116}
1117
1118void AudioProcessingImpl::MaybeUpdateHistograms() {
1119  static const int kMinDiffDelayMs = 60;
1120
1121  if (echo_cancellation()->is_enabled()) {
1122    // Activate delay_jumps_ counters if we know echo_cancellation is runnning.
1123    // If a stream has echo we know that the echo_cancellation is in process.
1124    if (stream_delay_jumps_ == -1 && echo_cancellation()->stream_has_echo()) {
1125      stream_delay_jumps_ = 0;
1126    }
1127    if (aec_system_delay_jumps_ == -1 &&
1128        echo_cancellation()->stream_has_echo()) {
1129      aec_system_delay_jumps_ = 0;
1130    }
1131
1132    // Detect a jump in platform reported system delay and log the difference.
1133    const int diff_stream_delay_ms = stream_delay_ms_ - last_stream_delay_ms_;
1134    if (diff_stream_delay_ms > kMinDiffDelayMs && last_stream_delay_ms_ != 0) {
1135      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.PlatformReportedStreamDelayJump",
1136                           diff_stream_delay_ms, kMinDiffDelayMs, 1000, 100);
1137      if (stream_delay_jumps_ == -1) {
1138        stream_delay_jumps_ = 0;  // Activate counter if needed.
1139      }
1140      stream_delay_jumps_++;
1141    }
1142    last_stream_delay_ms_ = stream_delay_ms_;
1143
1144    // Detect a jump in AEC system delay and log the difference.
1145    const int frames_per_ms = rtc::CheckedDivExact(split_rate_, 1000);
1146    const int aec_system_delay_ms =
1147        WebRtcAec_system_delay(echo_cancellation()->aec_core()) / frames_per_ms;
1148    const int diff_aec_system_delay_ms =
1149        aec_system_delay_ms - last_aec_system_delay_ms_;
1150    if (diff_aec_system_delay_ms > kMinDiffDelayMs &&
1151        last_aec_system_delay_ms_ != 0) {
1152      RTC_HISTOGRAM_COUNTS("WebRTC.Audio.AecSystemDelayJump",
1153                           diff_aec_system_delay_ms, kMinDiffDelayMs, 1000,
1154                           100);
1155      if (aec_system_delay_jumps_ == -1) {
1156        aec_system_delay_jumps_ = 0;  // Activate counter if needed.
1157      }
1158      aec_system_delay_jumps_++;
1159    }
1160    last_aec_system_delay_ms_ = aec_system_delay_ms;
1161  }
1162}
1163
1164void AudioProcessingImpl::UpdateHistogramsOnCallEnd() {
1165  CriticalSectionScoped crit_scoped(crit_);
1166  if (stream_delay_jumps_ > -1) {
1167    RTC_HISTOGRAM_ENUMERATION(
1168        "WebRTC.Audio.NumOfPlatformReportedStreamDelayJumps",
1169        stream_delay_jumps_, 51);
1170  }
1171  stream_delay_jumps_ = -1;
1172  last_stream_delay_ms_ = 0;
1173
1174  if (aec_system_delay_jumps_ > -1) {
1175    RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
1176                              aec_system_delay_jumps_, 51);
1177  }
1178  aec_system_delay_jumps_ = -1;
1179  last_aec_system_delay_ms_ = 0;
1180}
1181
1182#ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1183int AudioProcessingImpl::WriteMessageToDebugFile() {
1184  int32_t size = event_msg_->ByteSize();
1185  if (size <= 0) {
1186    return kUnspecifiedError;
1187  }
1188#if defined(WEBRTC_ARCH_BIG_ENDIAN)
1189// TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1190//            pretty safe in assuming little-endian.
1191#endif
1192
1193  if (!event_msg_->SerializeToString(&event_str_)) {
1194    return kUnspecifiedError;
1195  }
1196
1197  // Write message preceded by its size.
1198  if (!debug_file_->Write(&size, sizeof(int32_t))) {
1199    return kFileError;
1200  }
1201  if (!debug_file_->Write(event_str_.data(), event_str_.length())) {
1202    return kFileError;
1203  }
1204
1205  event_msg_->Clear();
1206
1207  return kNoError;
1208}
1209
1210int AudioProcessingImpl::WriteInitMessage() {
1211  event_msg_->set_type(audioproc::Event::INIT);
1212  audioproc::Init* msg = event_msg_->mutable_init();
1213  msg->set_sample_rate(api_format_.input_stream().sample_rate_hz());
1214  msg->set_num_input_channels(api_format_.input_stream().num_channels());
1215  msg->set_num_output_channels(api_format_.output_stream().num_channels());
1216  msg->set_num_reverse_channels(
1217      api_format_.reverse_input_stream().num_channels());
1218  msg->set_reverse_sample_rate(
1219      api_format_.reverse_input_stream().sample_rate_hz());
1220  msg->set_output_sample_rate(api_format_.output_stream().sample_rate_hz());
1221  // TODO(ekmeyerson): Add reverse output fields to event_msg_.
1222
1223  RETURN_ON_ERR(WriteMessageToDebugFile());
1224  return kNoError;
1225}
1226
1227int AudioProcessingImpl::WriteConfigMessage(bool forced) {
1228  audioproc::Config config;
1229
1230  config.set_aec_enabled(echo_cancellation_->is_enabled());
1231  config.set_aec_delay_agnostic_enabled(
1232      echo_cancellation_->is_delay_agnostic_enabled());
1233  config.set_aec_drift_compensation_enabled(
1234      echo_cancellation_->is_drift_compensation_enabled());
1235  config.set_aec_extended_filter_enabled(
1236      echo_cancellation_->is_extended_filter_enabled());
1237  config.set_aec_suppression_level(
1238      static_cast<int>(echo_cancellation_->suppression_level()));
1239
1240  config.set_aecm_enabled(echo_control_mobile_->is_enabled());
1241  config.set_aecm_comfort_noise_enabled(
1242      echo_control_mobile_->is_comfort_noise_enabled());
1243  config.set_aecm_routing_mode(
1244      static_cast<int>(echo_control_mobile_->routing_mode()));
1245
1246  config.set_agc_enabled(gain_control_->is_enabled());
1247  config.set_agc_mode(static_cast<int>(gain_control_->mode()));
1248  config.set_agc_limiter_enabled(gain_control_->is_limiter_enabled());
1249  config.set_noise_robust_agc_enabled(use_new_agc_);
1250
1251  config.set_hpf_enabled(high_pass_filter_->is_enabled());
1252
1253  config.set_ns_enabled(noise_suppression_->is_enabled());
1254  config.set_ns_level(static_cast<int>(noise_suppression_->level()));
1255
1256  config.set_transient_suppression_enabled(transient_suppressor_enabled_);
1257
1258  std::string serialized_config = config.SerializeAsString();
1259  if (!forced && last_serialized_config_ == serialized_config) {
1260    return kNoError;
1261  }
1262
1263  last_serialized_config_ = serialized_config;
1264
1265  event_msg_->set_type(audioproc::Event::CONFIG);
1266  event_msg_->mutable_config()->CopyFrom(config);
1267
1268  RETURN_ON_ERR(WriteMessageToDebugFile());
1269  return kNoError;
1270}
1271#endif  // WEBRTC_AUDIOPROC_DEBUG_DUMP
1272
1273}  // namespace webrtc
1274