1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#if !defined(__has_feature) || !__has_feature(objc_arc)
12#error "This file requires ARC support."
13#endif
14
15#import <AVFoundation/AVFoundation.h>
16#import <Foundation/Foundation.h>
17
18#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
19
20#include "webrtc/base/atomicops.h"
21#include "webrtc/base/checks.h"
22#include "webrtc/base/criticalsection.h"
23#include "webrtc/base/logging.h"
24#include "webrtc/base/thread_annotations.h"
25#include "webrtc/modules/audio_device/fine_audio_buffer.h"
26#include "webrtc/modules/utility/include/helpers_ios.h"
27
28namespace webrtc {
29
30// Protects |g_audio_session_users|.
31static rtc::GlobalLockPod g_lock;
32
33// Counts number of users (=instances of this object) who needs an active
34// audio session. This variable is used to ensure that we only activate an audio
35// session for the first user and deactivate it for the last.
36// Member is static to ensure that the value is counted for all instances
37// and not per instance.
38static int g_audio_session_users GUARDED_BY(g_lock) = 0;
39
40#define LOGI() LOG(LS_INFO) << "AudioDeviceIOS::"
41
42#define LOG_AND_RETURN_IF_ERROR(error, message) \
43  do {                                          \
44    OSStatus err = error;                       \
45    if (err) {                                  \
46      LOG(LS_ERROR) << message << ": " << err;  \
47      return false;                             \
48    }                                           \
49  } while (0)
50
51#define LOG_IF_ERROR(error, message)           \
52  do {                                         \
53    OSStatus err = error;                      \
54    if (err) {                                 \
55      LOG(LS_ERROR) << message << ": " << err; \
56    }                                          \
57  } while (0)
58
59// Preferred hardware sample rate (unit is in Hertz). The client sample rate
60// will be set to this value as well to avoid resampling the the audio unit's
61// format converter. Note that, some devices, e.g. BT headsets, only supports
62// 8000Hz as native sample rate.
63const double kPreferredSampleRate = 48000.0;
64// Use a hardware I/O buffer size (unit is in seconds) that matches the 10ms
65// size used by WebRTC. The exact actual size will differ between devices.
66// Example: using 48kHz on iPhone 6 results in a native buffer size of
67// ~10.6667ms or 512 audio frames per buffer. The FineAudioBuffer instance will
68// take care of any buffering required to convert between native buffers and
69// buffers used by WebRTC. It is beneficial for the performance if the native
70// size is as close to 10ms as possible since it results in "clean" callback
71// sequence without bursts of callbacks back to back.
72const double kPreferredIOBufferDuration = 0.01;
73// Try to use mono to save resources. Also avoids channel format conversion
74// in the I/O audio unit. Initial tests have shown that it is possible to use
75// mono natively for built-in microphones and for BT headsets but not for
76// wired headsets. Wired headsets only support stereo as native channel format
77// but it is a low cost operation to do a format conversion to mono in the
78// audio unit. Hence, we will not hit a RTC_CHECK in
79// VerifyAudioParametersForActiveAudioSession() for a mismatch between the
80// preferred number of channels and the actual number of channels.
81const int kPreferredNumberOfChannels = 1;
82// Number of bytes per audio sample for 16-bit signed integer representation.
83const UInt32 kBytesPerSample = 2;
84// Hardcoded delay estimates based on real measurements.
85// TODO(henrika): these value is not used in combination with built-in AEC.
86// Can most likely be removed.
87const UInt16 kFixedPlayoutDelayEstimate = 30;
88const UInt16 kFixedRecordDelayEstimate = 30;
89// Calls to AudioUnitInitialize() can fail if called back-to-back on different
90// ADM instances. A fall-back solution is to allow multiple sequential calls
91// with as small delay between each. This factor sets the max number of allowed
92// initialization attempts.
93const int kMaxNumberOfAudioUnitInitializeAttempts = 5;
94
95
96using ios::CheckAndLogError;
97
98// Verifies that the current audio session supports input audio and that the
99// required category and mode are enabled.
100static bool VerifyAudioSession(AVAudioSession* session) {
101  LOG(LS_INFO) << "VerifyAudioSession";
102  // Ensure that the device currently supports audio input.
103  if (!session.isInputAvailable) {
104    LOG(LS_ERROR) << "No audio input path is available!";
105    return false;
106  }
107
108  // Ensure that the required category and mode are actually activated.
109  if (![session.category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
110    LOG(LS_ERROR)
111        << "Failed to set category to AVAudioSessionCategoryPlayAndRecord";
112    return false;
113  }
114  if (![session.mode isEqualToString:AVAudioSessionModeVoiceChat]) {
115    LOG(LS_ERROR) << "Failed to set mode to AVAudioSessionModeVoiceChat";
116    return false;
117  }
118  return true;
119}
120
121// Activates an audio session suitable for full duplex VoIP sessions when
122// |activate| is true. Also sets the preferred sample rate and IO buffer
123// duration. Deactivates an active audio session if |activate| is set to false.
124static bool ActivateAudioSession(AVAudioSession* session, bool activate)
125    EXCLUSIVE_LOCKS_REQUIRED(g_lock) {
126  LOG(LS_INFO) << "ActivateAudioSession(" << activate << ")";
127  @autoreleasepool {
128    NSError* error = nil;
129    BOOL success = NO;
130
131    if (!activate) {
132      // Deactivate the audio session using an extra option and then return.
133      // AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation is used to
134      // ensure that other audio sessions that were interrupted by our session
135      // can return to their active state. It is recommended for VoIP apps to
136      // use this option.
137      success = [session
138            setActive:NO
139          withOptions:AVAudioSessionSetActiveOptionNotifyOthersOnDeactivation
140                error:&error];
141      return CheckAndLogError(success, error);
142    }
143
144    // Go ahead and active our own audio session since |activate| is true.
145    // Use a category which supports simultaneous recording and playback.
146    // By default, using this category implies that our app’s audio is
147    // nonmixable, hence activating the session will interrupt any other
148    // audio sessions which are also nonmixable.
149    if (session.category != AVAudioSessionCategoryPlayAndRecord) {
150      error = nil;
151      success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
152                         withOptions:AVAudioSessionCategoryOptionAllowBluetooth
153                               error:&error];
154      RTC_DCHECK(CheckAndLogError(success, error));
155    }
156
157    // Specify mode for two-way voice communication (e.g. VoIP).
158    if (session.mode != AVAudioSessionModeVoiceChat) {
159      error = nil;
160      success = [session setMode:AVAudioSessionModeVoiceChat error:&error];
161      RTC_DCHECK(CheckAndLogError(success, error));
162    }
163
164    // Set the session's sample rate or the hardware sample rate.
165    // It is essential that we use the same sample rate as stream format
166    // to ensure that the I/O unit does not have to do sample rate conversion.
167    error = nil;
168    success =
169        [session setPreferredSampleRate:kPreferredSampleRate error:&error];
170    RTC_DCHECK(CheckAndLogError(success, error));
171
172    // Set the preferred audio I/O buffer duration, in seconds.
173    error = nil;
174    success = [session setPreferredIOBufferDuration:kPreferredIOBufferDuration
175                                              error:&error];
176    RTC_DCHECK(CheckAndLogError(success, error));
177
178    // Activate the audio session. Activation can fail if another active audio
179    // session (e.g. phone call) has higher priority than ours.
180    error = nil;
181    success = [session setActive:YES error:&error];
182    if (!CheckAndLogError(success, error)) {
183      return false;
184    }
185
186    // Ensure that the active audio session has the correct category and mode.
187    if (!VerifyAudioSession(session)) {
188      LOG(LS_ERROR) << "Failed to verify audio session category and mode";
189      return false;
190    }
191
192    // Try to set the preferred number of hardware audio channels. These calls
193    // must be done after setting the audio session’s category and mode and
194    // activating the session.
195    // We try to use mono in both directions to save resources and format
196    // conversions in the audio unit. Some devices does only support stereo;
197    // e.g. wired headset on iPhone 6.
198    // TODO(henrika): add support for stereo if needed.
199    error = nil;
200    success =
201        [session setPreferredInputNumberOfChannels:kPreferredNumberOfChannels
202                                             error:&error];
203    RTC_DCHECK(CheckAndLogError(success, error));
204    error = nil;
205    success =
206        [session setPreferredOutputNumberOfChannels:kPreferredNumberOfChannels
207                                              error:&error];
208    RTC_DCHECK(CheckAndLogError(success, error));
209    return true;
210  }
211}
212
213// An application can create more than one ADM and start audio streaming
214// for all of them. It is essential that we only activate the app's audio
215// session once (for the first one) and deactivate it once (for the last).
216static bool ActivateAudioSession() {
217  LOGI() << "ActivateAudioSession";
218  rtc::GlobalLockScope ls(&g_lock);
219  if (g_audio_session_users == 0) {
220    // The system provides an audio session object upon launch of an
221    // application. However, we must initialize the session in order to
222    // handle interruptions. Implicit initialization occurs when obtaining
223    // a reference to the AVAudioSession object.
224    AVAudioSession* session = [AVAudioSession sharedInstance];
225    // Try to activate the audio session and ask for a set of preferred audio
226    // parameters.
227    if (!ActivateAudioSession(session, true)) {
228      LOG(LS_ERROR) << "Failed to activate the audio session";
229      return false;
230    }
231    LOG(LS_INFO) << "The audio session is now activated";
232  }
233  ++g_audio_session_users;
234  LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users;
235  return true;
236}
237
238// If more than one object is using the audio session, ensure that only the
239// last object deactivates. Apple recommends: "activate your audio session
240// only as needed and deactivate it when you are not using audio".
241static bool DeactivateAudioSession() {
242  LOGI() << "DeactivateAudioSession";
243  rtc::GlobalLockScope ls(&g_lock);
244  if (g_audio_session_users == 1) {
245    AVAudioSession* session = [AVAudioSession sharedInstance];
246    if (!ActivateAudioSession(session, false)) {
247      LOG(LS_ERROR) << "Failed to deactivate the audio session";
248      return false;
249    }
250    LOG(LS_INFO) << "Our audio session is now deactivated";
251  }
252  --g_audio_session_users;
253  LOG(LS_INFO) << "Number of audio session users: " << g_audio_session_users;
254  return true;
255}
256
257#if !defined(NDEBUG)
258// Helper method for printing out an AudioStreamBasicDescription structure.
259static void LogABSD(AudioStreamBasicDescription absd) {
260  char formatIDString[5];
261  UInt32 formatID = CFSwapInt32HostToBig(absd.mFormatID);
262  bcopy(&formatID, formatIDString, 4);
263  formatIDString[4] = '\0';
264  LOG(LS_INFO) << "LogABSD";
265  LOG(LS_INFO) << " sample rate: " << absd.mSampleRate;
266  LOG(LS_INFO) << " format ID: " << formatIDString;
267  LOG(LS_INFO) << " format flags: " << std::hex << absd.mFormatFlags;
268  LOG(LS_INFO) << " bytes per packet: " << absd.mBytesPerPacket;
269  LOG(LS_INFO) << " frames per packet: " << absd.mFramesPerPacket;
270  LOG(LS_INFO) << " bytes per frame: " << absd.mBytesPerFrame;
271  LOG(LS_INFO) << " channels per packet: " << absd.mChannelsPerFrame;
272  LOG(LS_INFO) << " bits per channel: " << absd.mBitsPerChannel;
273  LOG(LS_INFO) << " reserved: " << absd.mReserved;
274}
275
276// Helper method that logs essential device information strings.
277static void LogDeviceInfo() {
278  LOG(LS_INFO) << "LogDeviceInfo";
279  @autoreleasepool {
280    LOG(LS_INFO) << " system name: " << ios::GetSystemName();
281    LOG(LS_INFO) << " system version: " << ios::GetSystemVersion();
282    LOG(LS_INFO) << " device type: " << ios::GetDeviceType();
283    LOG(LS_INFO) << " device name: " << ios::GetDeviceName();
284  }
285}
286#endif  // !defined(NDEBUG)
287
288AudioDeviceIOS::AudioDeviceIOS()
289    : audio_device_buffer_(nullptr),
290      vpio_unit_(nullptr),
291      recording_(0),
292      playing_(0),
293      initialized_(false),
294      rec_is_initialized_(false),
295      play_is_initialized_(false),
296      audio_interruption_observer_(nullptr),
297      route_change_observer_(nullptr) {
298  LOGI() << "ctor" << ios::GetCurrentThreadDescription();
299}
300
301AudioDeviceIOS::~AudioDeviceIOS() {
302  LOGI() << "~dtor" << ios::GetCurrentThreadDescription();
303  RTC_DCHECK(thread_checker_.CalledOnValidThread());
304  Terminate();
305}
306
307void AudioDeviceIOS::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer) {
308  LOGI() << "AttachAudioBuffer";
309  RTC_DCHECK(audioBuffer);
310  RTC_DCHECK(thread_checker_.CalledOnValidThread());
311  audio_device_buffer_ = audioBuffer;
312}
313
314int32_t AudioDeviceIOS::Init() {
315  LOGI() << "Init";
316  RTC_DCHECK(thread_checker_.CalledOnValidThread());
317  if (initialized_) {
318    return 0;
319  }
320#if !defined(NDEBUG)
321  LogDeviceInfo();
322#endif
323  // Store the preferred sample rate and preferred number of channels already
324  // here. They have not been set and confirmed yet since ActivateAudioSession()
325  // is not called until audio is about to start. However, it makes sense to
326  // store the parameters now and then verify at a later stage.
327  playout_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
328  record_parameters_.reset(kPreferredSampleRate, kPreferredNumberOfChannels);
329  // Ensure that the audio device buffer (ADB) knows about the internal audio
330  // parameters. Note that, even if we are unable to get a mono audio session,
331  // we will always tell the I/O audio unit to do a channel format conversion
332  // to guarantee mono on the "input side" of the audio unit.
333  UpdateAudioDeviceBuffer();
334  initialized_ = true;
335  return 0;
336}
337
338int32_t AudioDeviceIOS::Terminate() {
339  LOGI() << "Terminate";
340  RTC_DCHECK(thread_checker_.CalledOnValidThread());
341  if (!initialized_) {
342    return 0;
343  }
344  StopPlayout();
345  StopRecording();
346  initialized_ = false;
347  {
348    rtc::GlobalLockScope ls(&g_lock);
349    if (g_audio_session_users != 0) {
350      LOG(LS_WARNING) << "Object is destructed with an active audio session";
351    }
352    RTC_DCHECK_GE(g_audio_session_users, 0);
353  }
354  return 0;
355}
356
357int32_t AudioDeviceIOS::InitPlayout() {
358  LOGI() << "InitPlayout";
359  RTC_DCHECK(thread_checker_.CalledOnValidThread());
360  RTC_DCHECK(initialized_);
361  RTC_DCHECK(!play_is_initialized_);
362  RTC_DCHECK(!playing_);
363  if (!rec_is_initialized_) {
364    if (!InitPlayOrRecord()) {
365      LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitPlayout!";
366      return -1;
367    }
368  }
369  play_is_initialized_ = true;
370  return 0;
371}
372
373int32_t AudioDeviceIOS::InitRecording() {
374  LOGI() << "InitRecording";
375  RTC_DCHECK(thread_checker_.CalledOnValidThread());
376  RTC_DCHECK(initialized_);
377  RTC_DCHECK(!rec_is_initialized_);
378  RTC_DCHECK(!recording_);
379  if (!play_is_initialized_) {
380    if (!InitPlayOrRecord()) {
381      LOG_F(LS_ERROR) << "InitPlayOrRecord failed for InitRecording!";
382      return -1;
383    }
384  }
385  rec_is_initialized_ = true;
386  return 0;
387}
388
389int32_t AudioDeviceIOS::StartPlayout() {
390  LOGI() << "StartPlayout";
391  RTC_DCHECK(thread_checker_.CalledOnValidThread());
392  RTC_DCHECK(play_is_initialized_);
393  RTC_DCHECK(!playing_);
394  fine_audio_buffer_->ResetPlayout();
395  if (!recording_) {
396    OSStatus result = AudioOutputUnitStart(vpio_unit_);
397    if (result != noErr) {
398      LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartPlayout: "
399                      << result;
400      return -1;
401    }
402    LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
403  }
404  rtc::AtomicOps::ReleaseStore(&playing_, 1);
405  return 0;
406}
407
408int32_t AudioDeviceIOS::StopPlayout() {
409  LOGI() << "StopPlayout";
410  RTC_DCHECK(thread_checker_.CalledOnValidThread());
411  if (!play_is_initialized_ || !playing_) {
412    return 0;
413  }
414  if (!recording_) {
415    ShutdownPlayOrRecord();
416  }
417  play_is_initialized_ = false;
418  rtc::AtomicOps::ReleaseStore(&playing_, 0);
419  return 0;
420}
421
422int32_t AudioDeviceIOS::StartRecording() {
423  LOGI() << "StartRecording";
424  RTC_DCHECK(thread_checker_.CalledOnValidThread());
425  RTC_DCHECK(rec_is_initialized_);
426  RTC_DCHECK(!recording_);
427  fine_audio_buffer_->ResetRecord();
428  if (!playing_) {
429    OSStatus result = AudioOutputUnitStart(vpio_unit_);
430    if (result != noErr) {
431      LOG_F(LS_ERROR) << "AudioOutputUnitStart failed for StartRecording: "
432                      << result;
433      return -1;
434    }
435    LOG(LS_INFO) << "Voice-Processing I/O audio unit is now started";
436  }
437  rtc::AtomicOps::ReleaseStore(&recording_, 1);
438  return 0;
439}
440
441int32_t AudioDeviceIOS::StopRecording() {
442  LOGI() << "StopRecording";
443  RTC_DCHECK(thread_checker_.CalledOnValidThread());
444  if (!rec_is_initialized_ || !recording_) {
445    return 0;
446  }
447  if (!playing_) {
448    ShutdownPlayOrRecord();
449  }
450  rec_is_initialized_ = false;
451  rtc::AtomicOps::ReleaseStore(&recording_, 0);
452  return 0;
453}
454
455// Change the default receiver playout route to speaker.
456int32_t AudioDeviceIOS::SetLoudspeakerStatus(bool enable) {
457  LOGI() << "SetLoudspeakerStatus(" << enable << ")";
458
459  AVAudioSession* session = [AVAudioSession sharedInstance];
460  NSString* category = session.category;
461  AVAudioSessionCategoryOptions options = session.categoryOptions;
462  // Respect old category options if category is
463  // AVAudioSessionCategoryPlayAndRecord. Otherwise reset it since old options
464  // might not be valid for this category.
465  if ([category isEqualToString:AVAudioSessionCategoryPlayAndRecord]) {
466    if (enable) {
467      options |= AVAudioSessionCategoryOptionDefaultToSpeaker;
468    } else {
469      options &= ~AVAudioSessionCategoryOptionDefaultToSpeaker;
470    }
471  } else {
472    options = AVAudioSessionCategoryOptionDefaultToSpeaker;
473  }
474  NSError* error = nil;
475  BOOL success = [session setCategory:AVAudioSessionCategoryPlayAndRecord
476                          withOptions:options
477                                error:&error];
478  ios::CheckAndLogError(success, error);
479  return (error == nil) ? 0 : -1;
480}
481
482int32_t AudioDeviceIOS::GetLoudspeakerStatus(bool& enabled) const {
483  LOGI() << "GetLoudspeakerStatus";
484  AVAudioSession* session = [AVAudioSession sharedInstance];
485  AVAudioSessionCategoryOptions options = session.categoryOptions;
486  enabled = options & AVAudioSessionCategoryOptionDefaultToSpeaker;
487  return 0;
488}
489
490int32_t AudioDeviceIOS::PlayoutDelay(uint16_t& delayMS) const {
491  delayMS = kFixedPlayoutDelayEstimate;
492  return 0;
493}
494
495int32_t AudioDeviceIOS::RecordingDelay(uint16_t& delayMS) const {
496  delayMS = kFixedRecordDelayEstimate;
497  return 0;
498}
499
500int AudioDeviceIOS::GetPlayoutAudioParameters(AudioParameters* params) const {
501  LOGI() << "GetPlayoutAudioParameters";
502  RTC_DCHECK(playout_parameters_.is_valid());
503  RTC_DCHECK(thread_checker_.CalledOnValidThread());
504  *params = playout_parameters_;
505  return 0;
506}
507
508int AudioDeviceIOS::GetRecordAudioParameters(AudioParameters* params) const {
509  LOGI() << "GetRecordAudioParameters";
510  RTC_DCHECK(record_parameters_.is_valid());
511  RTC_DCHECK(thread_checker_.CalledOnValidThread());
512  *params = record_parameters_;
513  return 0;
514}
515
516void AudioDeviceIOS::UpdateAudioDeviceBuffer() {
517  LOGI() << "UpdateAudioDevicebuffer";
518  // AttachAudioBuffer() is called at construction by the main class but check
519  // just in case.
520  RTC_DCHECK(audio_device_buffer_) << "AttachAudioBuffer must be called first";
521  // Inform the audio device buffer (ADB) about the new audio format.
522  audio_device_buffer_->SetPlayoutSampleRate(playout_parameters_.sample_rate());
523  audio_device_buffer_->SetPlayoutChannels(playout_parameters_.channels());
524  audio_device_buffer_->SetRecordingSampleRate(
525      record_parameters_.sample_rate());
526  audio_device_buffer_->SetRecordingChannels(record_parameters_.channels());
527}
528
529void AudioDeviceIOS::RegisterNotificationObservers() {
530  LOGI() << "RegisterNotificationObservers";
531  // This code block will be called when AVAudioSessionInterruptionNotification
532  // is observed.
533  void (^interrupt_block)(NSNotification*) = ^(NSNotification* notification) {
534    NSNumber* type_number =
535        notification.userInfo[AVAudioSessionInterruptionTypeKey];
536    AVAudioSessionInterruptionType type =
537        (AVAudioSessionInterruptionType)type_number.unsignedIntegerValue;
538    LOG(LS_INFO) << "Audio session interruption:";
539    switch (type) {
540      case AVAudioSessionInterruptionTypeBegan:
541        // The system has deactivated our audio session.
542        // Stop the active audio unit.
543        LOG(LS_INFO) << " Began => stopping the audio unit";
544        LOG_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
545                     "Failed to stop the the Voice-Processing I/O unit");
546        break;
547      case AVAudioSessionInterruptionTypeEnded:
548        // The interruption has ended. Restart the audio session and start the
549        // initialized audio unit again.
550        LOG(LS_INFO) << " Ended => restarting audio session and audio unit";
551        NSError* error = nil;
552        BOOL success = NO;
553        AVAudioSession* session = [AVAudioSession sharedInstance];
554        success = [session setActive:YES error:&error];
555        if (CheckAndLogError(success, error)) {
556          LOG_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
557                       "Failed to start the the Voice-Processing I/O unit");
558        }
559        break;
560    }
561  };
562
563  // This code block will be called when AVAudioSessionRouteChangeNotification
564  // is observed.
565  void (^route_change_block)(NSNotification*) =
566      ^(NSNotification* notification) {
567        // Get reason for current route change.
568        NSNumber* reason_number =
569            notification.userInfo[AVAudioSessionRouteChangeReasonKey];
570        AVAudioSessionRouteChangeReason reason =
571            (AVAudioSessionRouteChangeReason)reason_number.unsignedIntegerValue;
572        bool valid_route_change = true;
573        LOG(LS_INFO) << "Route change:";
574        switch (reason) {
575          case AVAudioSessionRouteChangeReasonUnknown:
576            LOG(LS_INFO) << " ReasonUnknown";
577            break;
578          case AVAudioSessionRouteChangeReasonNewDeviceAvailable:
579            LOG(LS_INFO) << " NewDeviceAvailable";
580            break;
581          case AVAudioSessionRouteChangeReasonOldDeviceUnavailable:
582            LOG(LS_INFO) << " OldDeviceUnavailable";
583            break;
584          case AVAudioSessionRouteChangeReasonCategoryChange:
585            // It turns out that we see this notification (at least in iOS 9.2)
586            // when making a switch from a BT device to e.g. Speaker using the
587            // iOS Control Center and that we therefore must check if the sample
588            // rate has changed. And if so is the case, restart the audio unit.
589            LOG(LS_INFO) << " CategoryChange";
590            LOG(LS_INFO) << " New category: " << ios::GetAudioSessionCategory();
591            break;
592          case AVAudioSessionRouteChangeReasonOverride:
593            LOG(LS_INFO) << " Override";
594            break;
595          case AVAudioSessionRouteChangeReasonWakeFromSleep:
596            LOG(LS_INFO) << " WakeFromSleep";
597            break;
598          case AVAudioSessionRouteChangeReasonNoSuitableRouteForCategory:
599            LOG(LS_INFO) << " NoSuitableRouteForCategory";
600            break;
601          case AVAudioSessionRouteChangeReasonRouteConfigurationChange:
602            // The set of input and output ports has not changed, but their
603            // configuration has, e.g., a port’s selected data source has
604            // changed. Ignore this type of route change since we are focusing
605            // on detecting headset changes.
606            LOG(LS_INFO) << " RouteConfigurationChange (ignored)";
607            valid_route_change = false;
608            break;
609        }
610
611        if (valid_route_change) {
612          // Log previous route configuration.
613          AVAudioSessionRouteDescription* prev_route =
614              notification.userInfo[AVAudioSessionRouteChangePreviousRouteKey];
615          LOG(LS_INFO) << "Previous route:";
616          LOG(LS_INFO) << ios::StdStringFromNSString(
617              [NSString stringWithFormat:@"%@", prev_route]);
618
619          // Only restart audio for a valid route change and if the
620          // session sample rate has changed.
621          AVAudioSession* session = [AVAudioSession sharedInstance];
622          const double session_sample_rate = session.sampleRate;
623          LOG(LS_INFO) << "session sample rate: " << session_sample_rate;
624          if (playout_parameters_.sample_rate() != session_sample_rate) {
625            if (!RestartAudioUnitWithNewFormat(session_sample_rate)) {
626              LOG(LS_ERROR) << "Audio restart failed";
627            }
628          }
629        }
630      };
631
632  // Get the default notification center of the current process.
633  NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
634
635  // Add AVAudioSessionInterruptionNotification observer.
636  id interruption_observer =
637      [center addObserverForName:AVAudioSessionInterruptionNotification
638                          object:nil
639                           queue:[NSOperationQueue mainQueue]
640                      usingBlock:interrupt_block];
641  // Add AVAudioSessionRouteChangeNotification observer.
642  id route_change_observer =
643      [center addObserverForName:AVAudioSessionRouteChangeNotification
644                          object:nil
645                           queue:[NSOperationQueue mainQueue]
646                      usingBlock:route_change_block];
647
648  // Increment refcount on observers using ARC bridge. Instance variable is a
649  // void* instead of an id because header is included in other pure C++
650  // files.
651  audio_interruption_observer_ = (__bridge_retained void*)interruption_observer;
652  route_change_observer_ = (__bridge_retained void*)route_change_observer;
653}
654
655void AudioDeviceIOS::UnregisterNotificationObservers() {
656  LOGI() << "UnregisterNotificationObservers";
657  // Transfer ownership of observer back to ARC, which will deallocate the
658  // observer once it exits this scope.
659  NSNotificationCenter* center = [NSNotificationCenter defaultCenter];
660  if (audio_interruption_observer_ != nullptr) {
661    id observer = (__bridge_transfer id)audio_interruption_observer_;
662    [center removeObserver:observer];
663    audio_interruption_observer_ = nullptr;
664  }
665  if (route_change_observer_ != nullptr) {
666    id observer = (__bridge_transfer id)route_change_observer_;
667    [center removeObserver:observer];
668    route_change_observer_ = nullptr;
669  }
670}
671
672void AudioDeviceIOS::SetupAudioBuffersForActiveAudioSession() {
673  LOGI() << "SetupAudioBuffersForActiveAudioSession";
674  // Verify the current values once the audio session has been activated.
675  AVAudioSession* session = [AVAudioSession sharedInstance];
676  LOG(LS_INFO) << " sample rate: " << session.sampleRate;
677  LOG(LS_INFO) << " IO buffer duration: " << session.IOBufferDuration;
678  LOG(LS_INFO) << " output channels: " << session.outputNumberOfChannels;
679  LOG(LS_INFO) << " input channels: " << session.inputNumberOfChannels;
680  LOG(LS_INFO) << " output latency: " << session.outputLatency;
681  LOG(LS_INFO) << " input latency: " << session.inputLatency;
682
683  // Log a warning message for the case when we are unable to set the preferred
684  // hardware sample rate but continue and use the non-ideal sample rate after
685  // reinitializing the audio parameters. Most BT headsets only support 8kHz or
686  // 16kHz.
687  if (session.sampleRate != kPreferredSampleRate) {
688    LOG(LS_WARNING) << "Unable to set the preferred sample rate";
689  }
690
691  // At this stage, we also know the exact IO buffer duration and can add
692  // that info to the existing audio parameters where it is converted into
693  // number of audio frames.
694  // Example: IO buffer size = 0.008 seconds <=> 128 audio frames at 16kHz.
695  // Hence, 128 is the size we expect to see in upcoming render callbacks.
696  playout_parameters_.reset(session.sampleRate, playout_parameters_.channels(),
697                            session.IOBufferDuration);
698  RTC_DCHECK(playout_parameters_.is_complete());
699  record_parameters_.reset(session.sampleRate, record_parameters_.channels(),
700                           session.IOBufferDuration);
701  RTC_DCHECK(record_parameters_.is_complete());
702  LOG(LS_INFO) << " frames per I/O buffer: "
703               << playout_parameters_.frames_per_buffer();
704  LOG(LS_INFO) << " bytes per I/O buffer: "
705               << playout_parameters_.GetBytesPerBuffer();
706  RTC_DCHECK_EQ(playout_parameters_.GetBytesPerBuffer(),
707                record_parameters_.GetBytesPerBuffer());
708
709  // Update the ADB parameters since the sample rate might have changed.
710  UpdateAudioDeviceBuffer();
711
712  // Create a modified audio buffer class which allows us to ask for,
713  // or deliver, any number of samples (and not only multiple of 10ms) to match
714  // the native audio unit buffer size.
715  RTC_DCHECK(audio_device_buffer_);
716  fine_audio_buffer_.reset(new FineAudioBuffer(
717      audio_device_buffer_, playout_parameters_.GetBytesPerBuffer(),
718      playout_parameters_.sample_rate()));
719
720  // The extra/temporary playoutbuffer must be of this size to avoid
721  // unnecessary memcpy while caching data between successive callbacks.
722  const int required_playout_buffer_size =
723      fine_audio_buffer_->RequiredPlayoutBufferSizeBytes();
724  LOG(LS_INFO) << " required playout buffer size: "
725               << required_playout_buffer_size;
726  playout_audio_buffer_.reset(new SInt8[required_playout_buffer_size]);
727
728  // Allocate AudioBuffers to be used as storage for the received audio.
729  // The AudioBufferList structure works as a placeholder for the
730  // AudioBuffer structure, which holds a pointer to the actual data buffer
731  // in |record_audio_buffer_|. Recorded audio will be rendered into this memory
732  // at each input callback when calling AudioUnitRender().
733  const int data_byte_size = record_parameters_.GetBytesPerBuffer();
734  record_audio_buffer_.reset(new SInt8[data_byte_size]);
735  audio_record_buffer_list_.mNumberBuffers = 1;
736  AudioBuffer* audio_buffer = &audio_record_buffer_list_.mBuffers[0];
737  audio_buffer->mNumberChannels = record_parameters_.channels();
738  audio_buffer->mDataByteSize = data_byte_size;
739  audio_buffer->mData = record_audio_buffer_.get();
740}
741
742bool AudioDeviceIOS::SetupAndInitializeVoiceProcessingAudioUnit() {
743  LOGI() << "SetupAndInitializeVoiceProcessingAudioUnit";
744  RTC_DCHECK(!vpio_unit_) << "VoiceProcessingIO audio unit already exists";
745  // Create an audio component description to identify the Voice-Processing
746  // I/O audio unit.
747  AudioComponentDescription vpio_unit_description;
748  vpio_unit_description.componentType = kAudioUnitType_Output;
749  vpio_unit_description.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
750  vpio_unit_description.componentManufacturer = kAudioUnitManufacturer_Apple;
751  vpio_unit_description.componentFlags = 0;
752  vpio_unit_description.componentFlagsMask = 0;
753
754  // Obtain an audio unit instance given the description.
755  AudioComponent found_vpio_unit_ref =
756      AudioComponentFindNext(nullptr, &vpio_unit_description);
757
758  // Create a Voice-Processing IO audio unit.
759  OSStatus result = noErr;
760  result = AudioComponentInstanceNew(found_vpio_unit_ref, &vpio_unit_);
761  if (result != noErr) {
762    vpio_unit_ = nullptr;
763    LOG(LS_ERROR) << "AudioComponentInstanceNew failed: " << result;
764    return false;
765  }
766
767  // A VP I/O unit's bus 1 connects to input hardware (microphone). Enable
768  // input on the input scope of the input element.
769  AudioUnitElement input_bus = 1;
770  UInt32 enable_input = 1;
771  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
772                                kAudioUnitScope_Input, input_bus, &enable_input,
773                                sizeof(enable_input));
774  if (result != noErr) {
775    DisposeAudioUnit();
776    LOG(LS_ERROR) << "Failed to enable input on input scope of input element: "
777                  << result;
778    return false;
779  }
780
781  // A VP I/O unit's bus 0 connects to output hardware (speaker). Enable
782  // output on the output scope of the output element.
783  AudioUnitElement output_bus = 0;
784  UInt32 enable_output = 1;
785  result = AudioUnitSetProperty(vpio_unit_, kAudioOutputUnitProperty_EnableIO,
786                                kAudioUnitScope_Output, output_bus,
787                                &enable_output, sizeof(enable_output));
788  if (result != noErr) {
789    DisposeAudioUnit();
790    LOG(LS_ERROR)
791        << "Failed to enable output on output scope of output element: "
792        << result;
793    return false;
794  }
795
796  // Set the application formats for input and output:
797  // - use same format in both directions
798  // - avoid resampling in the I/O unit by using the hardware sample rate
799  // - linear PCM => noncompressed audio data format with one frame per packet
800  // - no need to specify interleaving since only mono is supported
801  AudioStreamBasicDescription application_format = {0};
802  UInt32 size = sizeof(application_format);
803  RTC_DCHECK_EQ(playout_parameters_.sample_rate(),
804                record_parameters_.sample_rate());
805  RTC_DCHECK_EQ(1, kPreferredNumberOfChannels);
806  application_format.mSampleRate = playout_parameters_.sample_rate();
807  application_format.mFormatID = kAudioFormatLinearPCM;
808  application_format.mFormatFlags =
809      kLinearPCMFormatFlagIsSignedInteger | kLinearPCMFormatFlagIsPacked;
810  application_format.mBytesPerPacket = kBytesPerSample;
811  application_format.mFramesPerPacket = 1;  // uncompressed
812  application_format.mBytesPerFrame = kBytesPerSample;
813  application_format.mChannelsPerFrame = kPreferredNumberOfChannels;
814  application_format.mBitsPerChannel = 8 * kBytesPerSample;
815  // Store the new format.
816  application_format_ = application_format;
817#if !defined(NDEBUG)
818  LogABSD(application_format_);
819#endif
820
821  // Set the application format on the output scope of the input element/bus.
822  result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
823                                kAudioUnitScope_Output, input_bus,
824                                &application_format, size);
825  if (result != noErr) {
826    DisposeAudioUnit();
827    LOG(LS_ERROR)
828        << "Failed to set application format on output scope of input bus: "
829        << result;
830    return false;
831  }
832
833  // Set the application format on the input scope of the output element/bus.
834  result = AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
835                                kAudioUnitScope_Input, output_bus,
836                                &application_format, size);
837  if (result != noErr) {
838    DisposeAudioUnit();
839    LOG(LS_ERROR)
840        << "Failed to set application format on input scope of output bus: "
841        << result;
842    return false;
843  }
844
845  // Specify the callback function that provides audio samples to the audio
846  // unit.
847  AURenderCallbackStruct render_callback;
848  render_callback.inputProc = GetPlayoutData;
849  render_callback.inputProcRefCon = this;
850  result = AudioUnitSetProperty(
851      vpio_unit_, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Input,
852      output_bus, &render_callback, sizeof(render_callback));
853  if (result != noErr) {
854    DisposeAudioUnit();
855    LOG(LS_ERROR) << "Failed to specify the render callback on the output bus: "
856                  << result;
857    return false;
858  }
859
860  // Disable AU buffer allocation for the recorder, we allocate our own.
861  // TODO(henrika): not sure that it actually saves resource to make this call.
862  UInt32 flag = 0;
863  result = AudioUnitSetProperty(
864      vpio_unit_, kAudioUnitProperty_ShouldAllocateBuffer,
865      kAudioUnitScope_Output, input_bus, &flag, sizeof(flag));
866  if (result != noErr) {
867    DisposeAudioUnit();
868    LOG(LS_ERROR) << "Failed to disable buffer allocation on the input bus: "
869                  << result;
870  }
871
872  // Specify the callback to be called by the I/O thread to us when input audio
873  // is available. The recorded samples can then be obtained by calling the
874  // AudioUnitRender() method.
875  AURenderCallbackStruct input_callback;
876  input_callback.inputProc = RecordedDataIsAvailable;
877  input_callback.inputProcRefCon = this;
878  result = AudioUnitSetProperty(vpio_unit_,
879                                kAudioOutputUnitProperty_SetInputCallback,
880                                kAudioUnitScope_Global, input_bus,
881                                &input_callback, sizeof(input_callback));
882  if (result != noErr) {
883    DisposeAudioUnit();
884    LOG(LS_ERROR) << "Failed to specify the input callback on the input bus: "
885                  << result;
886  }
887
888  // Initialize the Voice-Processing I/O unit instance.
889  // Calls to AudioUnitInitialize() can fail if called back-to-back on
890  // different ADM instances. The error message in this case is -66635 which is
891  // undocumented. Tests have shown that calling AudioUnitInitialize a second
892  // time, after a short sleep, avoids this issue.
893  // See webrtc:5166 for details.
894  int failed_initalize_attempts = 0;
895  result = AudioUnitInitialize(vpio_unit_);
896  while (result != noErr) {
897    LOG(LS_ERROR) << "Failed to initialize the Voice-Processing I/O unit: "
898                  << result;
899    ++failed_initalize_attempts;
900    if (failed_initalize_attempts == kMaxNumberOfAudioUnitInitializeAttempts) {
901      // Max number of initialization attempts exceeded, hence abort.
902      LOG(LS_WARNING) << "Too many initialization attempts";
903      DisposeAudioUnit();
904      return false;
905    }
906    LOG(LS_INFO) << "pause 100ms and try audio unit initialization again...";
907    [NSThread sleepForTimeInterval:0.1f];
908    result = AudioUnitInitialize(vpio_unit_);
909  }
910  LOG(LS_INFO) << "Voice-Processing I/O unit is now initialized";
911  return true;
912}
913
914bool AudioDeviceIOS::RestartAudioUnitWithNewFormat(float sample_rate) {
915  LOGI() << "RestartAudioUnitWithNewFormat(sample_rate=" << sample_rate << ")";
916  // Stop the active audio unit.
917  LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStop(vpio_unit_),
918                          "Failed to stop the the Voice-Processing I/O unit");
919
920  // The stream format is about to be changed and it requires that we first
921  // uninitialize it to deallocate its resources.
922  LOG_AND_RETURN_IF_ERROR(
923      AudioUnitUninitialize(vpio_unit_),
924      "Failed to uninitialize the the Voice-Processing I/O unit");
925
926  // Allocate new buffers given the new stream format.
927  SetupAudioBuffersForActiveAudioSession();
928
929  // Update the existing application format using the new sample rate.
930  application_format_.mSampleRate = playout_parameters_.sample_rate();
931  UInt32 size = sizeof(application_format_);
932  AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
933                       kAudioUnitScope_Output, 1, &application_format_, size);
934  AudioUnitSetProperty(vpio_unit_, kAudioUnitProperty_StreamFormat,
935                       kAudioUnitScope_Input, 0, &application_format_, size);
936
937  // Prepare the audio unit to render audio again.
938  LOG_AND_RETURN_IF_ERROR(AudioUnitInitialize(vpio_unit_),
939                          "Failed to initialize the Voice-Processing I/O unit");
940  LOG(LS_INFO) << "Voice-Processing I/O unit is now reinitialized";
941
942  // Start rendering audio using the new format.
943  LOG_AND_RETURN_IF_ERROR(AudioOutputUnitStart(vpio_unit_),
944                          "Failed to start the Voice-Processing I/O unit");
945  LOG(LS_INFO) << "Voice-Processing I/O unit is now restarted";
946  return true;
947}
948
949bool AudioDeviceIOS::InitPlayOrRecord() {
950  LOGI() << "InitPlayOrRecord";
951  // Activate the audio session if not already activated.
952  if (!ActivateAudioSession()) {
953    return false;
954  }
955
956  // Ensure that the active audio session has the correct category and mode.
957  AVAudioSession* session = [AVAudioSession sharedInstance];
958  if (!VerifyAudioSession(session)) {
959    DeactivateAudioSession();
960    LOG(LS_ERROR) << "Failed to verify audio session category and mode";
961    return false;
962  }
963
964  // Start observing audio session interruptions and route changes.
965  RegisterNotificationObservers();
966
967  // Ensure that we got what what we asked for in our active audio session.
968  SetupAudioBuffersForActiveAudioSession();
969
970  // Create, setup and initialize a new Voice-Processing I/O unit.
971  if (!SetupAndInitializeVoiceProcessingAudioUnit()) {
972    // Reduce usage count for the audio session and possibly deactivate it if
973    // this object is the only user.
974    DeactivateAudioSession();
975    return false;
976  }
977  return true;
978}
979
980void AudioDeviceIOS::ShutdownPlayOrRecord() {
981  LOGI() << "ShutdownPlayOrRecord";
982  // Close and delete the voice-processing I/O unit.
983  OSStatus result = -1;
984  if (nullptr != vpio_unit_) {
985    result = AudioOutputUnitStop(vpio_unit_);
986    if (result != noErr) {
987      LOG_F(LS_ERROR) << "AudioOutputUnitStop failed: " << result;
988    }
989    result = AudioUnitUninitialize(vpio_unit_);
990    if (result != noErr) {
991      LOG_F(LS_ERROR) << "AudioUnitUninitialize failed: " << result;
992    }
993    DisposeAudioUnit();
994  }
995
996  // Remove audio session notification observers.
997  UnregisterNotificationObservers();
998
999  // All I/O should be stopped or paused prior to deactivating the audio
1000  // session, hence we deactivate as last action.
1001  DeactivateAudioSession();
1002}
1003
1004void AudioDeviceIOS::DisposeAudioUnit() {
1005  if (nullptr == vpio_unit_)
1006    return;
1007  OSStatus result = AudioComponentInstanceDispose(vpio_unit_);
1008  if (result != noErr) {
1009    LOG(LS_ERROR) << "AudioComponentInstanceDispose failed:" << result;
1010  }
1011  vpio_unit_ = nullptr;
1012}
1013
1014OSStatus AudioDeviceIOS::RecordedDataIsAvailable(
1015    void* in_ref_con,
1016    AudioUnitRenderActionFlags* io_action_flags,
1017    const AudioTimeStamp* in_time_stamp,
1018    UInt32 in_bus_number,
1019    UInt32 in_number_frames,
1020    AudioBufferList* io_data) {
1021  RTC_DCHECK_EQ(1u, in_bus_number);
1022  RTC_DCHECK(
1023      !io_data);  // no buffer should be allocated for input at this stage
1024  AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
1025  return audio_device_ios->OnRecordedDataIsAvailable(
1026      io_action_flags, in_time_stamp, in_bus_number, in_number_frames);
1027}
1028
1029OSStatus AudioDeviceIOS::OnRecordedDataIsAvailable(
1030    AudioUnitRenderActionFlags* io_action_flags,
1031    const AudioTimeStamp* in_time_stamp,
1032    UInt32 in_bus_number,
1033    UInt32 in_number_frames) {
1034  OSStatus result = noErr;
1035  // Simply return if recording is not enabled.
1036  if (!rtc::AtomicOps::AcquireLoad(&recording_))
1037    return result;
1038  if (in_number_frames != record_parameters_.frames_per_buffer()) {
1039    // We have seen short bursts (1-2 frames) where |in_number_frames| changes.
1040    // Add a log to keep track of longer sequences if that should ever happen.
1041    // Also return since calling AudioUnitRender in this state will only result
1042    // in kAudio_ParamError (-50) anyhow.
1043    LOG(LS_WARNING) << "in_number_frames (" << in_number_frames
1044                    << ") != " << record_parameters_.frames_per_buffer();
1045    return noErr;
1046  }
1047  // Obtain the recorded audio samples by initiating a rendering cycle.
1048  // Since it happens on the input bus, the |io_data| parameter is a reference
1049  // to the preallocated audio buffer list that the audio unit renders into.
1050  // TODO(henrika): should error handling be improved?
1051  AudioBufferList* io_data = &audio_record_buffer_list_;
1052  result = AudioUnitRender(vpio_unit_, io_action_flags, in_time_stamp,
1053                           in_bus_number, in_number_frames, io_data);
1054  if (result != noErr) {
1055    LOG_F(LS_ERROR) << "AudioUnitRender failed: " << result;
1056    return result;
1057  }
1058  // Get a pointer to the recorded audio and send it to the WebRTC ADB.
1059  // Use the FineAudioBuffer instance to convert between native buffer size
1060  // and the 10ms buffer size used by WebRTC.
1061  const UInt32 data_size_in_bytes = io_data->mBuffers[0].mDataByteSize;
1062  RTC_CHECK_EQ(data_size_in_bytes / kBytesPerSample, in_number_frames);
1063  SInt8* data = static_cast<SInt8*>(io_data->mBuffers[0].mData);
1064  fine_audio_buffer_->DeliverRecordedData(data, data_size_in_bytes,
1065                                          kFixedPlayoutDelayEstimate,
1066                                          kFixedRecordDelayEstimate);
1067  return noErr;
1068}
1069
1070OSStatus AudioDeviceIOS::GetPlayoutData(
1071    void* in_ref_con,
1072    AudioUnitRenderActionFlags* io_action_flags,
1073    const AudioTimeStamp* in_time_stamp,
1074    UInt32 in_bus_number,
1075    UInt32 in_number_frames,
1076    AudioBufferList* io_data) {
1077  RTC_DCHECK_EQ(0u, in_bus_number);
1078  RTC_DCHECK(io_data);
1079  AudioDeviceIOS* audio_device_ios = static_cast<AudioDeviceIOS*>(in_ref_con);
1080  return audio_device_ios->OnGetPlayoutData(io_action_flags, in_number_frames,
1081                                            io_data);
1082}
1083
1084OSStatus AudioDeviceIOS::OnGetPlayoutData(
1085    AudioUnitRenderActionFlags* io_action_flags,
1086    UInt32 in_number_frames,
1087    AudioBufferList* io_data) {
1088  // Verify 16-bit, noninterleaved mono PCM signal format.
1089  RTC_DCHECK_EQ(1u, io_data->mNumberBuffers);
1090  RTC_DCHECK_EQ(1u, io_data->mBuffers[0].mNumberChannels);
1091  // Get pointer to internal audio buffer to which new audio data shall be
1092  // written.
1093  const UInt32 dataSizeInBytes = io_data->mBuffers[0].mDataByteSize;
1094  RTC_CHECK_EQ(dataSizeInBytes / kBytesPerSample, in_number_frames);
1095  SInt8* destination = static_cast<SInt8*>(io_data->mBuffers[0].mData);
1096  // Produce silence and give audio unit a hint about it if playout is not
1097  // activated.
1098  if (!rtc::AtomicOps::AcquireLoad(&playing_)) {
1099    *io_action_flags |= kAudioUnitRenderAction_OutputIsSilence;
1100    memset(destination, 0, dataSizeInBytes);
1101    return noErr;
1102  }
1103  // Read decoded 16-bit PCM samples from WebRTC (using a size that matches
1104  // the native I/O audio unit) to a preallocated intermediate buffer and
1105  // copy the result to the audio buffer in the |io_data| destination.
1106  SInt8* source = playout_audio_buffer_.get();
1107  fine_audio_buffer_->GetPlayoutData(source);
1108  memcpy(destination, source, dataSizeInBytes);
1109  return noErr;
1110}
1111
1112}  // namespace webrtc
1113