1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include <vector>
6
7#include "base/environment.h"
8#include "base/file_util.h"
9#include "base/files/file_path.h"
10#include "base/path_service.h"
11#include "base/strings/stringprintf.h"
12#include "base/test/test_timeouts.h"
13#include "content/renderer/media/webrtc_audio_capturer.h"
14#include "content/renderer/media/webrtc_audio_device_impl.h"
15#include "content/renderer/media/webrtc_audio_renderer.h"
16#include "content/renderer/media/webrtc_local_audio_track.h"
17#include "content/renderer/render_thread_impl.h"
18#include "content/test/webrtc_audio_device_test.h"
19#include "media/audio/audio_manager_base.h"
20#include "media/base/audio_hardware_config.h"
21#include "testing/gmock/include/gmock/gmock.h"
22#include "third_party/webrtc/voice_engine/include/voe_audio_processing.h"
23#include "third_party/webrtc/voice_engine/include/voe_base.h"
24#include "third_party/webrtc/voice_engine/include/voe_codec.h"
25#include "third_party/webrtc/voice_engine/include/voe_external_media.h"
26#include "third_party/webrtc/voice_engine/include/voe_file.h"
27#include "third_party/webrtc/voice_engine/include/voe_network.h"
28
29#if defined(OS_WIN)
30#include "base/win/windows_version.h"
31#endif
32
33using media::AudioParameters;
34using media::CHANNEL_LAYOUT_STEREO;
35using testing::_;
36using testing::AnyNumber;
37using testing::InvokeWithoutArgs;
38using testing::Return;
39using testing::StrEq;
40
41namespace content {
42
43namespace {
44
45const int kRenderViewId = 1;
46
47// The number of packers that RunWebRtcLoopbackTimeTest() uses for measurement.
48const int kNumberOfPacketsForLoopbackTest = 100;
49
50// The hardware latency we feed to WebRtc.
51const int kHardwareLatencyInMs = 50;
52
53scoped_ptr<media::AudioHardwareConfig> CreateRealHardwareConfig(
54    media::AudioManager* manager) {
55  const AudioParameters output_parameters =
56      manager->GetDefaultOutputStreamParameters();
57  const AudioParameters input_parameters =
58      manager->GetInputStreamParameters(
59          media::AudioManagerBase::kDefaultDeviceId);
60
61  return make_scoped_ptr(new media::AudioHardwareConfig(
62      input_parameters, output_parameters));
63}
64
65// Return true if at least one element in the array matches |value|.
66bool FindElementInArray(const int* array, int size, int value) {
67  return (std::find(&array[0], &array[0] + size, value) != &array[size]);
68}
69
70// This method returns false if a non-supported rate is detected on the
71// input or output side.
72// TODO(henrika): add support for automatic fallback to Windows Wave audio
73// if a non-supported rate is detected. It is probably better to detect
74// invalid audio settings by actually trying to open the audio streams instead
75// of relying on hard coded conditions.
76bool HardwareSampleRatesAreValid() {
77  // These are the currently supported hardware sample rates in both directions.
78  // The actual WebRTC client can limit these ranges further depending on
79  // platform but this is the maximum range we support today.
80  int valid_input_rates[] = {16000, 32000, 44100, 48000, 96000};
81  int valid_output_rates[] = {16000, 32000, 44100, 48000, 96000};
82
83  media::AudioHardwareConfig* hardware_config =
84      RenderThreadImpl::current()->GetAudioHardwareConfig();
85
86  // Verify the input sample rate.
87  int input_sample_rate = hardware_config->GetInputSampleRate();
88
89  if (!FindElementInArray(valid_input_rates, arraysize(valid_input_rates),
90                          input_sample_rate)) {
91    LOG(WARNING) << "Non-supported input sample rate detected.";
92    return false;
93  }
94
95  // Given that the input rate was OK, verify the output rate as well.
96  int output_sample_rate = hardware_config->GetOutputSampleRate();
97  if (!FindElementInArray(valid_output_rates, arraysize(valid_output_rates),
98                          output_sample_rate)) {
99    LOG(WARNING) << "Non-supported output sample rate detected.";
100    return false;
101  }
102
103  return true;
104}
105
106// Utility method which creates and initializes the audio capturer and adds it
107// to WebRTC audio device. This method should be used in tests where
108// HardwareSampleRatesAreValid() has been called and returned true.
109bool CreateAndInitializeCapturer(WebRtcAudioDeviceImpl* webrtc_audio_device) {
110  DCHECK(webrtc_audio_device);
111  scoped_refptr<WebRtcAudioCapturer> capturer(
112      WebRtcAudioCapturer::CreateCapturer());
113
114  media::AudioHardwareConfig* hardware_config =
115      RenderThreadImpl::current()->GetAudioHardwareConfig();
116
117  // Use native capture sample rate and channel configuration to get some
118  // action in this test.
119  int sample_rate = hardware_config->GetInputSampleRate();
120  media::ChannelLayout channel_layout =
121      hardware_config->GetInputChannelLayout();
122  if (!capturer->Initialize(kRenderViewId, channel_layout, sample_rate, 1,
123                            media::AudioManagerBase::kDefaultDeviceId)) {
124    return false;
125  }
126
127  // Add the capturer to the WebRtcAudioDeviceImpl.
128  webrtc_audio_device->AddAudioCapturer(capturer);
129
130  return true;
131}
132
133// Create and start a local audio track. Starting the audio track will connect
134// the audio track to the capturer and also start the source of the capturer.
135// Also, connect the sink to the audio track.
136scoped_refptr<WebRtcLocalAudioTrack>
137CreateAndStartLocalAudioTrack(WebRtcAudioCapturer* capturer,
138                              WebRtcAudioCapturerSink* sink) {
139  scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
140      WebRtcLocalAudioTrack::Create(std::string(), capturer, NULL));
141  local_audio_track->AddSink(sink);
142  local_audio_track->Start();
143  return local_audio_track;
144}
145
146class WebRTCMediaProcessImpl : public webrtc::VoEMediaProcess {
147 public:
148  explicit WebRTCMediaProcessImpl(base::WaitableEvent* event)
149      : event_(event),
150        channel_id_(-1),
151        type_(webrtc::kPlaybackPerChannel),
152        packet_size_(0),
153        sample_rate_(0),
154        channels_(0) {
155  }
156  virtual ~WebRTCMediaProcessImpl() {}
157
158  // TODO(henrika): Refactor in WebRTC and convert to Chrome coding style.
159  virtual void Process(int channel,
160                       webrtc::ProcessingTypes type,
161                       int16_t audio_10ms[],
162                       int length,
163                       int sampling_freq,
164                       bool is_stereo) OVERRIDE {
165    base::AutoLock auto_lock(lock_);
166    channel_id_ = channel;
167    type_ = type;
168    packet_size_ = length;
169    sample_rate_ = sampling_freq;
170    channels_ = (is_stereo ? 2 : 1);
171    if (event_) {
172      // Signal that a new callback has been received.
173      event_->Signal();
174    }
175  }
176
177  int channel_id() const {
178    base::AutoLock auto_lock(lock_);
179    return channel_id_;
180  }
181
182  int type() const {
183    base::AutoLock auto_lock(lock_);
184    return type_;
185  }
186
187  int packet_size() const {
188    base::AutoLock auto_lock(lock_);
189    return packet_size_;
190  }
191
192  int sample_rate() const {
193    base::AutoLock auto_lock(lock_);
194    return sample_rate_;
195  }
196
197 private:
198  base::WaitableEvent* event_;
199  int channel_id_;
200  webrtc::ProcessingTypes type_;
201  int packet_size_;
202  int sample_rate_;
203  int channels_;
204  mutable base::Lock lock_;
205  DISALLOW_COPY_AND_ASSIGN(WebRTCMediaProcessImpl);
206};
207
208class MockWebRtcAudioCapturerSink : public WebRtcAudioCapturerSink {
209 public:
210  explicit MockWebRtcAudioCapturerSink(base::WaitableEvent* event)
211      : event_(event) {
212    DCHECK(event_);
213  }
214  virtual ~MockWebRtcAudioCapturerSink() {}
215
216  // WebRtcAudioCapturerSink implementation.
217  virtual int CaptureData(const std::vector<int>& channels,
218                          const int16* audio_data,
219                          int sample_rate,
220                          int number_of_channels,
221                          int number_of_frames,
222                          int audio_delay_milliseconds,
223                          int current_volume,
224                          bool need_audio_processing) OVERRIDE {
225    // Signal that a callback has been received.
226    event_->Signal();
227    return 0;
228  }
229
230  // Set the format for the capture audio parameters.
231  virtual void SetCaptureFormat(
232      const media::AudioParameters& params) OVERRIDE {}
233
234 private:
235   base::WaitableEvent* event_;
236
237   DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioCapturerSink);
238};
239
240class MockWebRtcAudioRendererSource : public WebRtcAudioRendererSource {
241 public:
242  explicit MockWebRtcAudioRendererSource(base::WaitableEvent* event)
243      : event_(event) {
244    DCHECK(event_);
245  }
246  virtual ~MockWebRtcAudioRendererSource() {}
247
248  // WebRtcAudioRendererSource implementation.
249  virtual void RenderData(uint8* audio_data,
250                          int number_of_channels,
251                          int number_of_frames,
252                          int audio_delay_milliseconds) OVERRIDE {
253    // Signal that a callback has been received.
254    // Initialize the memory to zero to avoid uninitialized warning from
255    // Valgrind.
256    memset(audio_data, 0,
257           sizeof(int16) * number_of_channels * number_of_frames);
258    event_->Signal();
259  }
260
261  virtual void SetRenderFormat(const media::AudioParameters& params) OVERRIDE {
262  }
263
264  virtual void RemoveAudioRenderer(WebRtcAudioRenderer* renderer) OVERRIDE {};
265
266 private:
267   base::WaitableEvent* event_;
268
269   DISALLOW_COPY_AND_ASSIGN(MockWebRtcAudioRendererSource);
270};
271
272// Prints numerical information to stdout in a controlled format so we can plot
273// the result.
274void PrintPerfResultMs(const char* graph, const char* trace, float time_ms) {
275  std::string times;
276  base::StringAppendF(&times, "%.2f,", time_ms);
277  std::string result = base::StringPrintf(
278      "%sRESULT %s%s: %s= %s%s%s %s\n", "*", graph, "",
279      trace,  "[", times.c_str(), "]", "ms");
280
281  fflush(stdout);
282  printf("%s", result.c_str());
283  fflush(stdout);
284}
285
286void ReadDataFromSpeechFile(char* data, int length) {
287  base::FilePath data_file;
288  CHECK(PathService::Get(base::DIR_SOURCE_ROOT, &data_file));
289  data_file =
290      data_file.Append(FILE_PATH_LITERAL("media"))
291               .Append(FILE_PATH_LITERAL("test"))
292               .Append(FILE_PATH_LITERAL("data"))
293               .Append(FILE_PATH_LITERAL("speech_16b_stereo_48kHz.raw"));
294  DCHECK(base::PathExists(data_file));
295  int64 data_file_size64 = 0;
296  DCHECK(file_util::GetFileSize(data_file, &data_file_size64));
297  EXPECT_EQ(length, file_util::ReadFile(data_file, data, length));
298  DCHECK(data_file_size64 > length);
299}
300
301void SetChannelCodec(webrtc::VoiceEngine* engine, int channel) {
302  // TODO(xians): move the codec as an input param to this function, and add
303  // tests for different codecs, also add support to Android and IOS.
304#if !defined(OS_ANDROID) && !defined(OS_IOS)
305  webrtc::CodecInst isac;
306  strcpy(isac.plname, "ISAC");
307  isac.pltype = 104;
308  isac.pacsize = 960;
309  isac.plfreq = 32000;
310  isac.channels = 1;
311  isac.rate = -1;
312  ScopedWebRTCPtr<webrtc::VoECodec> codec(engine);
313  EXPECT_EQ(0, codec->SetRecPayloadType(channel, isac));
314  EXPECT_EQ(0, codec->SetSendCodec(channel, isac));
315#endif
316}
317
318// Returns the time in millisecond for sending packets to WebRtc for encoding,
319// signal processing, decoding and receiving them back.
320int RunWebRtcLoopbackTimeTest(media::AudioManager* manager,
321                              bool enable_apm) {
322  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
323      new WebRtcAudioDeviceImpl());
324  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
325  EXPECT_TRUE(engine.valid());
326  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
327  EXPECT_TRUE(base.valid());
328  int err = base->Init(webrtc_audio_device.get());
329  EXPECT_EQ(0, err);
330
331  // We use SetCaptureFormat() and SetRenderFormat() to configure the audio
332  // parameters so that this test can run on machine without hardware device.
333  const media::AudioParameters params = media::AudioParameters(
334      media::AudioParameters::AUDIO_PCM_LOW_LATENCY, CHANNEL_LAYOUT_STEREO,
335      48000, 2, 480);
336  WebRtcAudioCapturerSink* capturer_sink =
337      static_cast<WebRtcAudioCapturerSink*>(webrtc_audio_device.get());
338  WebRtcAudioRendererSource* renderer_source =
339      static_cast<WebRtcAudioRendererSource*>(webrtc_audio_device.get());
340  renderer_source->SetRenderFormat(params);
341
342  // Turn on/off all the signal processing components like AGC, AEC and NS.
343  ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
344  EXPECT_TRUE(audio_processing.valid());
345  audio_processing->SetAgcStatus(enable_apm);
346  audio_processing->SetNsStatus(enable_apm);
347  audio_processing->SetEcStatus(enable_apm);
348
349  // Create a voice channel for the WebRtc.
350  int channel = base->CreateChannel();
351  EXPECT_NE(-1, channel);
352  SetChannelCodec(engine.get(), channel);
353
354  // Use our fake network transmission and start playout and recording.
355  ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
356  EXPECT_TRUE(network.valid());
357  scoped_ptr<WebRTCTransportImpl> transport(
358      new WebRTCTransportImpl(network.get()));
359  EXPECT_EQ(0, network->RegisterExternalTransport(channel, *transport.get()));
360  EXPECT_EQ(0, base->StartPlayout(channel));
361  EXPECT_EQ(0, base->StartSend(channel));
362
363  // Read speech data from a speech test file.
364  const int input_packet_size =
365      params.frames_per_buffer() * 2 * params.channels();
366  const int num_output_channels = webrtc_audio_device->output_channels();
367  const int output_packet_size = webrtc_audio_device->output_buffer_size() * 2 *
368      num_output_channels;
369  const size_t length = input_packet_size * kNumberOfPacketsForLoopbackTest;
370  scoped_ptr<char[]> capture_data(new char[length]);
371  ReadDataFromSpeechFile(capture_data.get(), length);
372
373  // Start the timer.
374  scoped_ptr<uint8[]> buffer(new uint8[output_packet_size]);
375  base::Time start_time = base::Time::Now();
376  int delay = 0;
377  std::vector<int> voe_channels;
378  voe_channels.push_back(channel);
379  for (int j = 0; j < kNumberOfPacketsForLoopbackTest; ++j) {
380    // Sending fake capture data to WebRtc.
381    capturer_sink->CaptureData(
382        voe_channels,
383        reinterpret_cast<int16*>(capture_data.get() + input_packet_size * j),
384        params.sample_rate(), params.channels(), params.frames_per_buffer(),
385        kHardwareLatencyInMs, 1.0, enable_apm);
386
387    // Receiving data from WebRtc.
388    renderer_source->RenderData(
389        reinterpret_cast<uint8*>(buffer.get()),
390        num_output_channels, webrtc_audio_device->output_buffer_size(),
391        kHardwareLatencyInMs + delay);
392    delay = (base::Time::Now() - start_time).InMilliseconds();
393  }
394
395  int latency = (base::Time::Now() - start_time).InMilliseconds();
396
397  EXPECT_EQ(0, base->StopSend(channel));
398  EXPECT_EQ(0, base->StopPlayout(channel));
399  EXPECT_EQ(0, base->DeleteChannel(channel));
400  EXPECT_EQ(0, base->Terminate());
401
402  return latency;
403}
404
405}  // namespace
406
407// Trivial test which verifies that one part of the test harness
408// (HardwareSampleRatesAreValid()) works as intended for all supported
409// hardware input sample rates.
410TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidInputRates) {
411  int valid_rates[] = {16000, 32000, 44100, 48000, 96000};
412
413  // Verify that we will approve all rates listed in |valid_rates|.
414  for (size_t i = 0; i < arraysize(valid_rates); ++i) {
415    EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
416        valid_rates[i]));
417  }
418
419  // Verify that any value outside the valid range results in negative
420  // find results.
421  int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 192000};
422  for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
423    EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
424        invalid_rates[i]));
425  }
426}
427
428// Trivial test which verifies that one part of the test harness
429// (HardwareSampleRatesAreValid()) works as intended for all supported
430// hardware output sample rates.
431TEST_F(MAYBE_WebRTCAudioDeviceTest, TestValidOutputRates) {
432  int valid_rates[] = {44100, 48000, 96000};
433
434  // Verify that we will approve all rates listed in |valid_rates|.
435  for (size_t i = 0; i < arraysize(valid_rates); ++i) {
436    EXPECT_TRUE(FindElementInArray(valid_rates, arraysize(valid_rates),
437        valid_rates[i]));
438  }
439
440  // Verify that any value outside the valid range results in negative
441  // find results.
442  int invalid_rates[] = {-1, 0, 8000, 11025, 22050, 32000, 192000};
443  for (size_t i = 0; i < arraysize(invalid_rates); ++i) {
444    EXPECT_FALSE(FindElementInArray(valid_rates, arraysize(valid_rates),
445        invalid_rates[i]));
446  }
447}
448
449// Basic test that instantiates and initializes an instance of
450// WebRtcAudioDeviceImpl.
451TEST_F(MAYBE_WebRTCAudioDeviceTest, Construct) {
452#if defined(OS_WIN)
453  // This test crashes on Win XP bots.
454  if (base::win::GetVersion() <= base::win::VERSION_XP)
455    return;
456#endif
457
458  AudioParameters input_params(
459      AudioParameters::AUDIO_PCM_LOW_LATENCY,
460      media::CHANNEL_LAYOUT_MONO,
461      48000,
462      16,
463      480);
464
465  AudioParameters output_params(
466      AudioParameters::AUDIO_PCM_LOW_LATENCY,
467      media::CHANNEL_LAYOUT_STEREO,
468      48000,
469      16,
470      480);
471
472  media::AudioHardwareConfig audio_config(input_params, output_params);
473  SetAudioHardwareConfig(&audio_config);
474
475  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
476      new WebRtcAudioDeviceImpl());
477
478  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
479  ASSERT_TRUE(engine.valid());
480
481  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
482  int err = base->Init(webrtc_audio_device.get());
483  EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
484  EXPECT_EQ(0, err);
485  EXPECT_EQ(0, base->Terminate());
486}
487
488// Verify that a call to webrtc::VoEBase::StartPlayout() starts audio output
489// with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
490// be utilized to implement the actual audio path. The test registers a
491// webrtc::VoEExternalMedia implementation to hijack the output audio and
492// verify that streaming starts correctly.
493// Disabled when running headless since the bots don't have the required config.
494// Flaky, http://crbug.com/167299 .
495TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_StartPlayout) {
496  if (!has_output_devices_) {
497    LOG(WARNING) << "No output device detected.";
498    return;
499  }
500
501  scoped_ptr<media::AudioHardwareConfig> config =
502      CreateRealHardwareConfig(audio_manager_.get());
503  SetAudioHardwareConfig(config.get());
504
505  if (!HardwareSampleRatesAreValid())
506    return;
507
508  EXPECT_CALL(media_observer(),
509      OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1);
510  EXPECT_CALL(media_observer(),
511      OnSetAudioStreamPlaying(_, 1, true)).Times(1);
512  EXPECT_CALL(media_observer(),
513      OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1);
514  EXPECT_CALL(media_observer(),
515      OnDeleteAudioStream(_, 1)).Times(AnyNumber());
516
517  scoped_refptr<WebRtcAudioRenderer> renderer =
518      new WebRtcAudioRenderer(kRenderViewId);
519  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
520      new WebRtcAudioDeviceImpl());
521  EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
522
523  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
524  ASSERT_TRUE(engine.valid());
525
526  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
527  ASSERT_TRUE(base.valid());
528  int err = base->Init(webrtc_audio_device.get());
529  ASSERT_EQ(0, err);
530
531  int ch = base->CreateChannel();
532  EXPECT_NE(-1, ch);
533
534  ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
535  ASSERT_TRUE(external_media.valid());
536
537  base::WaitableEvent event(false, false);
538  scoped_ptr<WebRTCMediaProcessImpl> media_process(
539      new WebRTCMediaProcessImpl(&event));
540  EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
541      ch, webrtc::kPlaybackPerChannel, *media_process.get()));
542
543  EXPECT_EQ(0, base->StartPlayout(ch));
544  renderer->Play();
545
546  EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
547  WaitForIOThreadCompletion();
548
549  EXPECT_TRUE(webrtc_audio_device->Playing());
550  EXPECT_FALSE(webrtc_audio_device->Recording());
551  EXPECT_EQ(ch, media_process->channel_id());
552  EXPECT_EQ(webrtc::kPlaybackPerChannel, media_process->type());
553  EXPECT_EQ(80, media_process->packet_size());
554  EXPECT_EQ(8000, media_process->sample_rate());
555
556  EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
557      ch, webrtc::kPlaybackPerChannel));
558  EXPECT_EQ(0, base->StopPlayout(ch));
559  renderer->Stop();
560
561  EXPECT_EQ(0, base->DeleteChannel(ch));
562  EXPECT_EQ(0, base->Terminate());
563}
564
565// Verify that a call to webrtc::VoEBase::StartRecording() starts audio input
566// with the correct set of parameters. A WebRtcAudioDeviceImpl instance will
567// be utilized to implement the actual audio path. The test registers a
568// webrtc::VoEExternalMedia implementation to hijack the input audio and
569// verify that streaming starts correctly. An external transport implementation
570// is also required to ensure that "sending" can start without actually trying
571// to send encoded packets to the network. Our main interest here is to ensure
572// that the audio capturing starts as it should.
573// Disabled when running headless since the bots don't have the required config.
574
575// TODO(leozwang): Because ExternalMediaProcessing is disabled in webrtc,
576// disable this unit test on Android for now.
577#if defined(OS_ANDROID)
578#define MAYBE_StartRecording DISABLED_StartRecording
579#elif defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
580// This test is failing on ARM linux: http://crbug.com/238490
581#define MAYBE_StartRecording DISABLED_StartRecording
582#else
583// Flakily hangs on all other platforms as well: crbug.com/268376.
584// When the flakiness has been fixed, you probably want to leave it disabled
585// on the above platforms.
586#define MAYBE_StartRecording DISABLED_StartRecording
587#endif
588
589TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_StartRecording) {
590  if (!has_input_devices_ || !has_output_devices_) {
591    LOG(WARNING) << "Missing audio devices.";
592    return;
593  }
594
595  scoped_ptr<media::AudioHardwareConfig> config =
596      CreateRealHardwareConfig(audio_manager_.get());
597  SetAudioHardwareConfig(config.get());
598
599  if (!HardwareSampleRatesAreValid())
600    return;
601
602  // TODO(tommi): extend MediaObserver and MockMediaObserver with support
603  // for new interfaces, like OnSetAudioStreamRecording(). When done, add
604  // EXPECT_CALL() macros here.
605  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
606      new WebRtcAudioDeviceImpl());
607
608  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
609  ASSERT_TRUE(engine.valid());
610
611  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
612  ASSERT_TRUE(base.valid());
613  int err = base->Init(webrtc_audio_device.get());
614  ASSERT_EQ(0, err);
615
616  int ch = base->CreateChannel();
617  EXPECT_NE(-1, ch);
618
619  ScopedWebRTCPtr<webrtc::VoEExternalMedia> external_media(engine.get());
620  ASSERT_TRUE(external_media.valid());
621
622  base::WaitableEvent event(false, false);
623  scoped_ptr<WebRTCMediaProcessImpl> media_process(
624      new WebRTCMediaProcessImpl(&event));
625  EXPECT_EQ(0, external_media->RegisterExternalMediaProcessing(
626      ch, webrtc::kRecordingPerChannel, *media_process.get()));
627
628  // We must add an external transport implementation to be able to start
629  // recording without actually sending encoded packets to the network. All
630  // we want to do here is to verify that audio capturing starts as it should.
631  ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
632  scoped_ptr<WebRTCTransportImpl> transport(
633      new WebRTCTransportImpl(network.get()));
634  EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
635  EXPECT_EQ(0, base->StartSend(ch));
636
637  // Create and initialize the capturer which starts the source of the data
638  // flow.
639  EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
640
641  // Create and start a local audio track which is bridging the data flow
642  // between the capturer and WebRtcAudioDeviceImpl.
643  scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
644      CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
645                                    webrtc_audio_device));
646  // connect the VoE voice channel to the audio track
647  static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
648      GetRenderer()->AddChannel(ch);
649
650  // Verify we get the data flow.
651  EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
652  WaitForIOThreadCompletion();
653
654  EXPECT_FALSE(webrtc_audio_device->Playing());
655  EXPECT_TRUE(webrtc_audio_device->Recording());
656  EXPECT_EQ(ch, media_process->channel_id());
657  EXPECT_EQ(webrtc::kRecordingPerChannel, media_process->type());
658  EXPECT_EQ(80, media_process->packet_size());
659  EXPECT_EQ(8000, media_process->sample_rate());
660
661  EXPECT_EQ(0, external_media->DeRegisterExternalMediaProcessing(
662      ch, webrtc::kRecordingPerChannel));
663  EXPECT_EQ(0, base->StopSend(ch));
664
665  local_audio_track->Stop();
666  EXPECT_EQ(0, base->DeleteChannel(ch));
667  EXPECT_EQ(0, base->Terminate());
668}
669
670// Uses WebRtcAudioDeviceImpl to play a local wave file.
671// Disabled when running headless since the bots don't have the required config.
672// Flaky, http://crbug.com/167298 .
673TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_PlayLocalFile) {
674  if (!has_output_devices_) {
675    LOG(WARNING) << "No output device detected.";
676    return;
677  }
678
679  std::string file_path(
680      GetTestDataPath(FILE_PATH_LITERAL("speechmusic_mono_16kHz.pcm")));
681
682  scoped_ptr<media::AudioHardwareConfig> config =
683      CreateRealHardwareConfig(audio_manager_.get());
684  SetAudioHardwareConfig(config.get());
685
686  if (!HardwareSampleRatesAreValid())
687    return;
688
689  EXPECT_CALL(media_observer(),
690      OnSetAudioStreamStatus(_, 1, StrEq("created"))).Times(1);
691  EXPECT_CALL(media_observer(),
692      OnSetAudioStreamPlaying(_, 1, true)).Times(1);
693  EXPECT_CALL(media_observer(),
694      OnSetAudioStreamStatus(_, 1, StrEq("closed"))).Times(1);
695  EXPECT_CALL(media_observer(),
696      OnDeleteAudioStream(_, 1)).Times(AnyNumber());
697
698  scoped_refptr<WebRtcAudioRenderer> renderer =
699      new WebRtcAudioRenderer(kRenderViewId);
700  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
701      new WebRtcAudioDeviceImpl());
702  EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
703
704  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
705  ASSERT_TRUE(engine.valid());
706
707  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
708  ASSERT_TRUE(base.valid());
709  int err = base->Init(webrtc_audio_device.get());
710  ASSERT_EQ(0, err);
711
712  int ch = base->CreateChannel();
713  EXPECT_NE(-1, ch);
714  EXPECT_EQ(0, base->StartPlayout(ch));
715  renderer->Play();
716
717  ScopedWebRTCPtr<webrtc::VoEFile> file(engine.get());
718  ASSERT_TRUE(file.valid());
719  int duration = 0;
720  EXPECT_EQ(0, file->GetFileDuration(file_path.c_str(), duration,
721                                     webrtc::kFileFormatPcm16kHzFile));
722  EXPECT_NE(0, duration);
723
724  EXPECT_EQ(0, file->StartPlayingFileLocally(ch, file_path.c_str(), false,
725                                             webrtc::kFileFormatPcm16kHzFile));
726
727  // Play 2 seconds worth of audio and then quit.
728  message_loop_.PostDelayedTask(FROM_HERE,
729                                base::MessageLoop::QuitClosure(),
730                                base::TimeDelta::FromSeconds(6));
731  message_loop_.Run();
732
733  renderer->Stop();
734  EXPECT_EQ(0, base->StopSend(ch));
735  EXPECT_EQ(0, base->StopPlayout(ch));
736  EXPECT_EQ(0, base->DeleteChannel(ch));
737  EXPECT_EQ(0, base->Terminate());
738}
739
740// Uses WebRtcAudioDeviceImpl to play out recorded audio in loopback.
741// An external transport implementation is utilized to feed back RTP packets
742// which are recorded, encoded, packetized into RTP packets and finally
743// "transmitted". The RTP packets are then fed back into the VoiceEngine
744// where they are decoded and played out on the default audio output device.
745// Disabled when running headless since the bots don't have the required config.
746// TODO(henrika): improve quality by using a wideband codec, enabling noise-
747// suppressions etc.
748// FullDuplexAudioWithAGC is flaky on Android, disable it for now.
749// Also flakily hangs on Windows: crbug.com/269348.
750#if defined(OS_ANDROID) || defined(OS_WIN)
751#define MAYBE_FullDuplexAudioWithAGC DISABLED_FullDuplexAudioWithAGC
752#else
753#define MAYBE_FullDuplexAudioWithAGC FullDuplexAudioWithAGC
754#endif
755TEST_F(MAYBE_WebRTCAudioDeviceTest, MAYBE_FullDuplexAudioWithAGC) {
756  if (!has_output_devices_ || !has_input_devices_) {
757    LOG(WARNING) << "Missing audio devices.";
758    return;
759  }
760
761  scoped_ptr<media::AudioHardwareConfig> config =
762      CreateRealHardwareConfig(audio_manager_.get());
763  SetAudioHardwareConfig(config.get());
764
765  if (!HardwareSampleRatesAreValid())
766    return;
767
768  EXPECT_CALL(media_observer(),
769      OnSetAudioStreamStatus(_, 1, StrEq("created")));
770  EXPECT_CALL(media_observer(),
771      OnSetAudioStreamPlaying(_, 1, true));
772  EXPECT_CALL(media_observer(),
773      OnSetAudioStreamStatus(_, 1, StrEq("closed")));
774  EXPECT_CALL(media_observer(),
775      OnDeleteAudioStream(_, 1)).Times(AnyNumber());
776
777  scoped_refptr<WebRtcAudioRenderer> renderer =
778      new WebRtcAudioRenderer(kRenderViewId);
779  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
780      new WebRtcAudioDeviceImpl());
781  EXPECT_TRUE(webrtc_audio_device->SetAudioRenderer(renderer.get()));
782
783  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
784  ASSERT_TRUE(engine.valid());
785
786  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
787  ASSERT_TRUE(base.valid());
788  int err = base->Init(webrtc_audio_device.get());
789  ASSERT_EQ(0, err);
790
791  ScopedWebRTCPtr<webrtc::VoEAudioProcessing> audio_processing(engine.get());
792  ASSERT_TRUE(audio_processing.valid());
793#if defined(OS_ANDROID)
794  // On Android, by default AGC is off.
795  bool enabled = true;
796  webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
797  EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
798  EXPECT_FALSE(enabled);
799#else
800  bool enabled = false;
801  webrtc::AgcModes agc_mode = webrtc::kAgcDefault;
802  EXPECT_EQ(0, audio_processing->GetAgcStatus(enabled, agc_mode));
803  EXPECT_TRUE(enabled);
804  EXPECT_EQ(agc_mode, webrtc::kAgcAdaptiveAnalog);
805#endif
806
807  int ch = base->CreateChannel();
808  EXPECT_NE(-1, ch);
809
810  EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
811  scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
812      CreateAndStartLocalAudioTrack(webrtc_audio_device->GetDefaultCapturer(),
813                                    webrtc_audio_device));
814  // connect the VoE voice channel to the audio track
815  static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
816      GetRenderer()->AddChannel(ch);
817
818  ScopedWebRTCPtr<webrtc::VoENetwork> network(engine.get());
819  ASSERT_TRUE(network.valid());
820  scoped_ptr<WebRTCTransportImpl> transport(
821      new WebRTCTransportImpl(network.get()));
822  EXPECT_EQ(0, network->RegisterExternalTransport(ch, *transport.get()));
823  EXPECT_EQ(0, base->StartPlayout(ch));
824  EXPECT_EQ(0, base->StartSend(ch));
825  renderer->Play();
826
827  LOG(INFO) << ">> You should now be able to hear yourself in loopback...";
828  message_loop_.PostDelayedTask(FROM_HERE,
829                                base::MessageLoop::QuitClosure(),
830                                base::TimeDelta::FromSeconds(2));
831  message_loop_.Run();
832
833  local_audio_track->Stop();
834  renderer->Stop();
835  EXPECT_EQ(0, base->StopSend(ch));
836  EXPECT_EQ(0, base->StopPlayout(ch));
837
838  EXPECT_EQ(0, base->DeleteChannel(ch));
839  EXPECT_EQ(0, base->Terminate());
840}
841
842// Test times out on bots, see http://crbug.com/247447
843TEST_F(MAYBE_WebRTCAudioDeviceTest, DISABLED_WebRtcRecordingSetupTime) {
844  if (!has_input_devices_) {
845    LOG(WARNING) << "Missing audio capture devices.";
846    return;
847  }
848
849  scoped_ptr<media::AudioHardwareConfig> config =
850      CreateRealHardwareConfig(audio_manager_.get());
851  SetAudioHardwareConfig(config.get());
852
853  if (!HardwareSampleRatesAreValid())
854    return;
855
856  scoped_refptr<WebRtcAudioDeviceImpl> webrtc_audio_device(
857      new WebRtcAudioDeviceImpl());
858
859  WebRTCAutoDelete<webrtc::VoiceEngine> engine(webrtc::VoiceEngine::Create());
860  ASSERT_TRUE(engine.valid());
861
862  ScopedWebRTCPtr<webrtc::VoEBase> base(engine.get());
863  ASSERT_TRUE(base.valid());
864  int err = base->Init(webrtc_audio_device.get());
865  ASSERT_EQ(0, err);
866
867  int ch = base->CreateChannel();
868  EXPECT_NE(-1, ch);
869
870  EXPECT_TRUE(CreateAndInitializeCapturer(webrtc_audio_device.get()));
871  base::WaitableEvent event(false, false);
872  scoped_ptr<MockWebRtcAudioCapturerSink> sink(
873      new MockWebRtcAudioCapturerSink(&event));
874
875  // Create and start a local audio track. Starting the audio track will connect
876  // the audio track to the capturer and also start the source of the capturer.
877  scoped_refptr<WebRtcLocalAudioTrack> local_audio_track(
878      CreateAndStartLocalAudioTrack(
879          webrtc_audio_device->GetDefaultCapturer().get(), sink.get()));
880
881  // connect the VoE voice channel to the audio track.
882  static_cast<webrtc::AudioTrackInterface*>(local_audio_track.get())->
883      GetRenderer()->AddChannel(ch);
884
885  base::Time start_time = base::Time::Now();
886  EXPECT_EQ(0, base->StartSend(ch));
887
888  EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
889  int delay = (base::Time::Now() - start_time).InMilliseconds();
890  PrintPerfResultMs("webrtc_recording_setup_c", "t", delay);
891
892  local_audio_track->RemoveSink(sink.get());
893  local_audio_track->Stop();
894  EXPECT_EQ(0, base->StopSend(ch));
895  EXPECT_EQ(0, base->DeleteChannel(ch));
896  EXPECT_EQ(0, base->Terminate());
897}
898
899TEST_F(MAYBE_WebRTCAudioDeviceTest, WebRtcPlayoutSetupTime) {
900  if (!has_output_devices_) {
901    LOG(WARNING) << "No output device detected.";
902    return;
903  }
904
905  scoped_ptr<media::AudioHardwareConfig> config =
906      CreateRealHardwareConfig(audio_manager_.get());
907  SetAudioHardwareConfig(config.get());
908
909  if (!HardwareSampleRatesAreValid())
910    return;
911
912  EXPECT_CALL(media_observer(),
913              OnSetAudioStreamStatus(_, 1, _)).Times(AnyNumber());
914  EXPECT_CALL(media_observer(),
915              OnSetAudioStreamPlaying(_, 1, true));
916  EXPECT_CALL(media_observer(),
917              OnDeleteAudioStream(_, 1)).Times(AnyNumber());
918
919  base::WaitableEvent event(false, false);
920  scoped_ptr<MockWebRtcAudioRendererSource> renderer_source(
921      new MockWebRtcAudioRendererSource(&event));
922  scoped_refptr<WebRtcAudioRenderer> renderer =
923      new WebRtcAudioRenderer(kRenderViewId);
924  renderer->Initialize(renderer_source.get());
925
926  // Start the timer and playout.
927  base::Time start_time = base::Time::Now();
928  renderer->Play();
929  EXPECT_TRUE(event.TimedWait(TestTimeouts::action_timeout()));
930  int delay = (base::Time::Now() - start_time).InMilliseconds();
931  PrintPerfResultMs("webrtc_playout_setup_c", "t", delay);
932
933  renderer->Stop();
934}
935
936#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
937// Timing out on ARM linux bot: http://crbug.com/238490
938#define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
939        DISABLED_WebRtcLoopbackTimeWithoutSignalProcessing
940#else
941#define MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing \
942        WebRtcLoopbackTimeWithoutSignalProcessing
943#endif
944
945TEST_F(MAYBE_WebRTCAudioDeviceTest,
946       MAYBE_WebRtcLoopbackTimeWithoutSignalProcessing) {
947  int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), false);
948  PrintPerfResultMs("webrtc_loopback_without_sigal_processing (100 packets)",
949                    "t", latency);
950}
951
952#if defined(OS_LINUX) && !defined(OS_CHROMEOS) && defined(ARCH_CPU_ARM_FAMILY)
953// Timing out on ARM linux bot: http://crbug.com/238490
954#define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
955        DISABLED_WebRtcLoopbackTimeWithSignalProcessing
956#else
957#define MAYBE_WebRtcLoopbackTimeWithSignalProcessing \
958        WebRtcLoopbackTimeWithSignalProcessing
959#endif
960
961TEST_F(MAYBE_WebRTCAudioDeviceTest,
962       MAYBE_WebRtcLoopbackTimeWithSignalProcessing) {
963  int latency = RunWebRtcLoopbackTimeTest(audio_manager_.get(), true);
964  PrintPerfResultMs("webrtc_loopback_with_signal_processing (100 packets)",
965                    "t", latency);
966}
967
968}  // namespace content
969