1/*
2 *  Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 *
4 *  Use of this source code is governed by a BSD-style license
5 *  that can be found in the LICENSE file in the root of the source
6 *  tree. An additional intellectual property rights grant can be found
7 *  in the file PATENTS.  All contributing project authors may
8 *  be found in the AUTHORS file in the root of the source tree.
9 */
10
11#pragma warning(disable: 4995)  // name was marked as #pragma deprecated
12
13#if (_MSC_VER >= 1310) && (_MSC_VER < 1400)
14// Reports the major and minor versions of the compiler.
15// For example, 1310 for Microsoft Visual C++ .NET 2003. 1310 represents version 13 and a 1.0 point release.
16// The Visual C++ 2005 compiler version is 1400.
17// Type cl /? at the command line to see the major and minor versions of your compiler along with the build number.
18#pragma message(">> INFO: Windows Core Audio is not supported in VS 2003")
19#endif
20
21#include "webrtc/modules/audio_device/audio_device_config.h"
22
23#ifdef WEBRTC_WINDOWS_CORE_AUDIO_BUILD
24
25#include "webrtc/modules/audio_device/win/audio_device_core_win.h"
26
27#include <assert.h>
28#include <string.h>
29
30#include <windows.h>
31#include <comdef.h>
32#include <dmo.h>
33#include <Functiondiscoverykeys_devpkey.h>
34#include <mmsystem.h>
35#include <strsafe.h>
36#include <uuids.h>
37
38#include "webrtc/modules/audio_device/audio_device_utility.h"
39#include "webrtc/system_wrappers/interface/sleep.h"
40#include "webrtc/system_wrappers/interface/trace.h"
41
42// Macro that calls a COM method returning HRESULT value.
43#define EXIT_ON_ERROR(hres)    do { if (FAILED(hres)) goto Exit; } while(0)
44
45// Macro that continues to a COM error.
46#define CONTINUE_ON_ERROR(hres) do { if (FAILED(hres)) goto Next; } while(0)
47
48// Macro that releases a COM object if not NULL.
49#define SAFE_RELEASE(p)     do { if ((p)) { (p)->Release(); (p) = NULL; } } while(0)
50
51#define ROUND(x) ((x) >=0 ? (int)((x) + 0.5) : (int)((x) - 0.5))
52
53// REFERENCE_TIME time units per millisecond
54#define REFTIMES_PER_MILLISEC  10000
55
56typedef struct tagTHREADNAME_INFO
57{
58   DWORD dwType;        // must be 0x1000
59   LPCSTR szName;       // pointer to name (in user addr space)
60   DWORD dwThreadID;    // thread ID (-1=caller thread)
61   DWORD dwFlags;       // reserved for future use, must be zero
62} THREADNAME_INFO;
63
64namespace webrtc {
65namespace {
66
67enum { COM_THREADING_MODEL = COINIT_MULTITHREADED };
68
69enum
70{
71    kAecCaptureStreamIndex = 0,
72    kAecRenderStreamIndex = 1
73};
74
75// An implementation of IMediaBuffer, as required for
76// IMediaObject::ProcessOutput(). After consuming data provided by
77// ProcessOutput(), call SetLength() to update the buffer availability.
78//
79// Example implementation:
80// http://msdn.microsoft.com/en-us/library/dd376684(v=vs.85).aspx
81class MediaBufferImpl : public IMediaBuffer
82{
83public:
84    explicit MediaBufferImpl(DWORD maxLength)
85        : _data(new BYTE[maxLength]),
86          _length(0),
87          _maxLength(maxLength),
88          _refCount(0)
89    {}
90
91    // IMediaBuffer methods.
92    STDMETHOD(GetBufferAndLength(BYTE** ppBuffer, DWORD* pcbLength))
93    {
94        if (!ppBuffer || !pcbLength)
95        {
96            return E_POINTER;
97        }
98
99        *ppBuffer = _data;
100        *pcbLength = _length;
101
102        return S_OK;
103    }
104
105    STDMETHOD(GetMaxLength(DWORD* pcbMaxLength))
106    {
107        if (!pcbMaxLength)
108        {
109            return E_POINTER;
110        }
111
112        *pcbMaxLength = _maxLength;
113        return S_OK;
114    }
115
116    STDMETHOD(SetLength(DWORD cbLength))
117    {
118        if (cbLength > _maxLength)
119        {
120            return E_INVALIDARG;
121        }
122
123        _length = cbLength;
124        return S_OK;
125    }
126
127    // IUnknown methods.
128    STDMETHOD_(ULONG, AddRef())
129    {
130        return InterlockedIncrement(&_refCount);
131    }
132
133    STDMETHOD(QueryInterface(REFIID riid, void** ppv))
134    {
135        if (!ppv)
136        {
137            return E_POINTER;
138        }
139        else if (riid != IID_IMediaBuffer && riid != IID_IUnknown)
140        {
141            return E_NOINTERFACE;
142        }
143
144        *ppv = static_cast<IMediaBuffer*>(this);
145        AddRef();
146        return S_OK;
147    }
148
149    STDMETHOD_(ULONG, Release())
150    {
151        LONG refCount = InterlockedDecrement(&_refCount);
152        if (refCount == 0)
153        {
154            delete this;
155        }
156
157        return refCount;
158    }
159
160private:
161    ~MediaBufferImpl()
162    {
163        delete [] _data;
164    }
165
166    BYTE* _data;
167    DWORD _length;
168    const DWORD _maxLength;
169    LONG _refCount;
170};
171}  // namespace
172
173// ============================================================================
174//                              Static Methods
175// ============================================================================
176
177// ----------------------------------------------------------------------------
178//  CoreAudioIsSupported
179// ----------------------------------------------------------------------------
180
181bool AudioDeviceWindowsCore::CoreAudioIsSupported()
182{
183    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%s", __FUNCTION__);
184
185    bool MMDeviceIsAvailable(false);
186    bool coreAudioIsSupported(false);
187
188    HRESULT hr(S_OK);
189    TCHAR buf[MAXERRORLENGTH];
190    TCHAR errorText[MAXERRORLENGTH];
191
192    // 1) Check if Windows version is Vista SP1 or later.
193    //
194    // CoreAudio is only available on Vista SP1 and later.
195    //
196    OSVERSIONINFOEX osvi;
197    DWORDLONG dwlConditionMask = 0;
198    int op = VER_LESS_EQUAL;
199
200    // Initialize the OSVERSIONINFOEX structure.
201    ZeroMemory(&osvi, sizeof(OSVERSIONINFOEX));
202    osvi.dwOSVersionInfoSize = sizeof(OSVERSIONINFOEX);
203    osvi.dwMajorVersion = 6;
204    osvi.dwMinorVersion = 0;
205    osvi.wServicePackMajor = 0;
206    osvi.wServicePackMinor = 0;
207    osvi.wProductType = VER_NT_WORKSTATION;
208
209    // Initialize the condition mask.
210    VER_SET_CONDITION(dwlConditionMask, VER_MAJORVERSION, op);
211    VER_SET_CONDITION(dwlConditionMask, VER_MINORVERSION, op);
212    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMAJOR, op);
213    VER_SET_CONDITION(dwlConditionMask, VER_SERVICEPACKMINOR, op);
214    VER_SET_CONDITION(dwlConditionMask, VER_PRODUCT_TYPE, VER_EQUAL);
215
216    DWORD dwTypeMask = VER_MAJORVERSION | VER_MINORVERSION |
217                       VER_SERVICEPACKMAJOR | VER_SERVICEPACKMINOR |
218                       VER_PRODUCT_TYPE;
219
220    // Perform the test.
221    BOOL isVistaRTMorXP = VerifyVersionInfo(&osvi, dwTypeMask,
222                                            dwlConditionMask);
223    if (isVistaRTMorXP != 0)
224    {
225        WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1,
226            "*** Windows Core Audio is only supported on Vista SP1 or later "
227            "=> will revert to the Wave API ***");
228        return false;
229    }
230
231    // 2) Initializes the COM library for use by the calling thread.
232
233    // The COM init wrapper sets the thread's concurrency model to MTA,
234    // and creates a new apartment for the thread if one is required. The
235    // wrapper also ensures that each call to CoInitializeEx is balanced
236    // by a corresponding call to CoUninitialize.
237    //
238    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
239    if (!comInit.succeeded()) {
240      // Things will work even if an STA thread is calling this method but we
241      // want to ensure that MTA is used and therefore return false here.
242      return false;
243    }
244
245    // 3) Check if the MMDevice API is available.
246    //
247    // The Windows Multimedia Device (MMDevice) API enables audio clients to
248    // discover audio endpoint devices, determine their capabilities, and create
249    // driver instances for those devices.
250    // Header file Mmdeviceapi.h defines the interfaces in the MMDevice API.
251    // The MMDevice API consists of several interfaces. The first of these is the
252    // IMMDeviceEnumerator interface. To access the interfaces in the MMDevice API,
253    // a client obtains a reference to the IMMDeviceEnumerator interface of a
254    // device-enumerator object by calling the CoCreateInstance function.
255    //
256    // Through the IMMDeviceEnumerator interface, the client can obtain references
257    // to the other interfaces in the MMDevice API. The MMDevice API implements
258    // the following interfaces:
259    //
260    // IMMDevice            Represents an audio device.
261    // IMMDeviceCollection  Represents a collection of audio devices.
262    // IMMDeviceEnumerator  Provides methods for enumerating audio devices.
263    // IMMEndpoint          Represents an audio endpoint device.
264    //
265    IMMDeviceEnumerator* pIMMD(NULL);
266    const CLSID CLSID_MMDeviceEnumerator = __uuidof(MMDeviceEnumerator);
267    const IID IID_IMMDeviceEnumerator = __uuidof(IMMDeviceEnumerator);
268
269    hr = CoCreateInstance(
270            CLSID_MMDeviceEnumerator,   // GUID value of MMDeviceEnumerator coclass
271            NULL,
272            CLSCTX_ALL,
273            IID_IMMDeviceEnumerator,    // GUID value of the IMMDeviceEnumerator interface
274            (void**)&pIMMD );
275
276    if (FAILED(hr))
277    {
278        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, -1,
279            "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to create the required COM object", hr);
280        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
281            "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) failed (hr=0x%x)", hr);
282
283        const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
284                              FORMAT_MESSAGE_IGNORE_INSERTS;
285        const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
286
287        // Gets the system's human readable message string for this HRESULT.
288        // All error message in English by default.
289        DWORD messageLength = ::FormatMessageW(dwFlags,
290                                               0,
291                                               hr,
292                                               dwLangID,
293                                               errorText,
294                                               MAXERRORLENGTH,
295                                               NULL);
296
297        assert(messageLength <= MAXERRORLENGTH);
298
299        // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
300        for (; messageLength && ::isspace(errorText[messageLength - 1]);
301             --messageLength)
302        {
303            errorText[messageLength - 1] = '\0';
304        }
305
306        StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
307        StringCchCat(buf, MAXERRORLENGTH, errorText);
308        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1, "%S", buf);
309    }
310    else
311    {
312        MMDeviceIsAvailable = true;
313        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, -1,
314            "AudioDeviceWindowsCore::CoreAudioIsSupported() CoCreateInstance(MMDeviceEnumerator) succeeded", hr);
315        SAFE_RELEASE(pIMMD);
316    }
317
318    // 4) Verify that we can create and initialize our Core Audio class.
319    //
320    // Also, perform a limited "API test" to ensure that Core Audio is supported for all devices.
321    //
322    if (MMDeviceIsAvailable)
323    {
324        coreAudioIsSupported = false;
325
326        AudioDeviceWindowsCore* p = new AudioDeviceWindowsCore(-1);
327        if (p == NULL)
328        {
329            return false;
330        }
331
332        int ok(0);
333        int temp_ok(0);
334        bool available(false);
335
336        ok |= p->Init();
337
338        int16_t numDevsRec = p->RecordingDevices();
339        for (uint16_t i = 0; i < numDevsRec; i++)
340        {
341            ok |= p->SetRecordingDevice(i);
342            temp_ok = p->RecordingIsAvailable(available);
343            ok |= temp_ok;
344            ok |= (available == false);
345            if (available)
346            {
347                ok |= p->InitMicrophone();
348            }
349            if (ok)
350            {
351                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1,
352                    "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Recording for device id=%i", i);
353            }
354        }
355
356        int16_t numDevsPlay = p->PlayoutDevices();
357        for (uint16_t i = 0; i < numDevsPlay; i++)
358        {
359            ok |= p->SetPlayoutDevice(i);
360            temp_ok = p->PlayoutIsAvailable(available);
361            ok |= temp_ok;
362            ok |= (available == false);
363            if (available)
364            {
365                ok |= p->InitSpeaker();
366            }
367            if (ok)
368            {
369                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, -1 ,
370                    "AudioDeviceWindowsCore::CoreAudioIsSupported() Failed to use Core Audio Playout for device id=%i", i);
371            }
372        }
373
374        ok |= p->Terminate();
375
376        if (ok == 0)
377        {
378            coreAudioIsSupported = true;
379        }
380
381        delete p;
382    }
383
384    if (coreAudioIsSupported)
385    {
386        WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is supported ***");
387    }
388    else
389    {
390        WEBRTC_TRACE(kTraceStateInfo, kTraceAudioDevice, -1, "*** Windows Core Audio is NOT supported => will revert to the Wave API ***");
391    }
392
393    return (coreAudioIsSupported);
394}
395
396// ============================================================================
397//                            Construction & Destruction
398// ============================================================================
399
400// ----------------------------------------------------------------------------
401//  AudioDeviceWindowsCore() - ctor
402// ----------------------------------------------------------------------------
403
404AudioDeviceWindowsCore::AudioDeviceWindowsCore(const int32_t id) :
405    _comInit(ScopedCOMInitializer::kMTA),
406    _critSect(*CriticalSectionWrapper::CreateCriticalSection()),
407    _volumeMutex(*CriticalSectionWrapper::CreateCriticalSection()),
408    _id(id),
409    _ptrAudioBuffer(NULL),
410    _ptrEnumerator(NULL),
411    _ptrRenderCollection(NULL),
412    _ptrCaptureCollection(NULL),
413    _ptrDeviceOut(NULL),
414    _ptrDeviceIn(NULL),
415    _ptrClientOut(NULL),
416    _ptrClientIn(NULL),
417    _ptrRenderClient(NULL),
418    _ptrCaptureClient(NULL),
419    _ptrCaptureVolume(NULL),
420    _ptrRenderSimpleVolume(NULL),
421    _dmo(NULL),
422    _mediaBuffer(NULL),
423    _builtInAecEnabled(false),
424    _playAudioFrameSize(0),
425    _playSampleRate(0),
426    _playBlockSize(0),
427    _playChannels(2),
428    _sndCardPlayDelay(0),
429    _sndCardRecDelay(0),
430    _writtenSamples(0),
431    _readSamples(0),
432    _playAcc(0),
433    _recAudioFrameSize(0),
434    _recSampleRate(0),
435    _recBlockSize(0),
436    _recChannels(2),
437    _avrtLibrary(NULL),
438    _winSupportAvrt(false),
439    _hRenderSamplesReadyEvent(NULL),
440    _hPlayThread(NULL),
441    _hCaptureSamplesReadyEvent(NULL),
442    _hRecThread(NULL),
443    _hShutdownRenderEvent(NULL),
444    _hShutdownCaptureEvent(NULL),
445    _hRenderStartedEvent(NULL),
446    _hCaptureStartedEvent(NULL),
447    _hGetCaptureVolumeThread(NULL),
448    _hSetCaptureVolumeThread(NULL),
449    _hSetCaptureVolumeEvent(NULL),
450    _hMmTask(NULL),
451    _initialized(false),
452    _recording(false),
453    _playing(false),
454    _recIsInitialized(false),
455    _playIsInitialized(false),
456    _speakerIsInitialized(false),
457    _microphoneIsInitialized(false),
458    _AGC(false),
459    _playWarning(0),
460    _playError(0),
461    _recWarning(0),
462    _recError(0),
463    _playBufType(AudioDeviceModule::kAdaptiveBufferSize),
464    _playBufDelay(80),
465    _playBufDelayFixed(80),
466    _usingInputDeviceIndex(false),
467    _usingOutputDeviceIndex(false),
468    _inputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
469    _outputDevice(AudioDeviceModule::kDefaultCommunicationDevice),
470    _inputDeviceIndex(0),
471    _outputDeviceIndex(0),
472    _newMicLevel(0)
473{
474    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, id, "%s created", __FUNCTION__);
475    assert(_comInit.succeeded());
476
477    // Try to load the Avrt DLL
478    if (!_avrtLibrary)
479    {
480        // Get handle to the Avrt DLL module.
481        _avrtLibrary = LoadLibrary(TEXT("Avrt.dll"));
482        if (_avrtLibrary)
483        {
484            // Handle is valid (should only happen if OS larger than vista & win7).
485            // Try to get the function addresses.
486            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() The Avrt DLL module is now loaded");
487
488            _PAvRevertMmThreadCharacteristics = (PAvRevertMmThreadCharacteristics)GetProcAddress(_avrtLibrary, "AvRevertMmThreadCharacteristics");
489            _PAvSetMmThreadCharacteristicsA = (PAvSetMmThreadCharacteristicsA)GetProcAddress(_avrtLibrary, "AvSetMmThreadCharacteristicsA");
490            _PAvSetMmThreadPriority = (PAvSetMmThreadPriority)GetProcAddress(_avrtLibrary, "AvSetMmThreadPriority");
491
492            if ( _PAvRevertMmThreadCharacteristics &&
493                 _PAvSetMmThreadCharacteristicsA &&
494                 _PAvSetMmThreadPriority)
495            {
496                WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvRevertMmThreadCharacteristics() is OK");
497                WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadCharacteristicsA() is OK");
498                WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::AudioDeviceWindowsCore() AvSetMmThreadPriority() is OK");
499                _winSupportAvrt = true;
500            }
501        }
502    }
503
504    // Create our samples ready events - we want auto reset events that start in the not-signaled state.
505    // The state of an auto-reset event object remains signaled until a single waiting thread is released,
506    // at which time the system automatically sets the state to nonsignaled. If no threads are waiting,
507    // the event object's state remains signaled.
508    // (Except for _hShutdownCaptureEvent, which is used to shutdown multiple threads).
509    _hRenderSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
510    _hCaptureSamplesReadyEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
511    _hShutdownRenderEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
512    _hShutdownCaptureEvent = CreateEvent(NULL, TRUE, FALSE, NULL);
513    _hRenderStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
514    _hCaptureStartedEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
515    _hSetCaptureVolumeEvent = CreateEvent(NULL, FALSE, FALSE, NULL);
516
517    _perfCounterFreq.QuadPart = 1;
518    _perfCounterFactor = 0.0;
519    _avgCPULoad = 0.0;
520
521    // list of number of channels to use on recording side
522    _recChannelsPrioList[0] = 2;    // stereo is prio 1
523    _recChannelsPrioList[1] = 1;    // mono is prio 2
524
525    // list of number of channels to use on playout side
526    _playChannelsPrioList[0] = 2;    // stereo is prio 1
527    _playChannelsPrioList[1] = 1;    // mono is prio 2
528
529    HRESULT hr;
530
531    // We know that this API will work since it has already been verified in
532    // CoreAudioIsSupported, hence no need to check for errors here as well.
533
534    // Retrive the IMMDeviceEnumerator API (should load the MMDevAPI.dll)
535    // TODO(henrika): we should probably move this allocation to Init() instead
536    // and deallocate in Terminate() to make the implementation more symmetric.
537    CoCreateInstance(
538      __uuidof(MMDeviceEnumerator),
539      NULL,
540      CLSCTX_ALL,
541      __uuidof(IMMDeviceEnumerator),
542      reinterpret_cast<void**>(&_ptrEnumerator));
543    assert(NULL != _ptrEnumerator);
544
545    // DMO initialization for built-in WASAPI AEC.
546    {
547        IMediaObject* ptrDMO = NULL;
548        hr = CoCreateInstance(CLSID_CWMAudioAEC,
549                              NULL,
550                              CLSCTX_INPROC_SERVER,
551                              IID_IMediaObject,
552                              reinterpret_cast<void**>(&ptrDMO));
553        if (FAILED(hr) || ptrDMO == NULL)
554        {
555            // Since we check that _dmo is non-NULL in EnableBuiltInAEC(), the
556            // feature is prevented from being enabled.
557            _builtInAecEnabled = false;
558            _TraceCOMError(hr);
559        }
560        _dmo = ptrDMO;
561        SAFE_RELEASE(ptrDMO);
562    }
563}
564
565// ----------------------------------------------------------------------------
566//  AudioDeviceWindowsCore() - dtor
567// ----------------------------------------------------------------------------
568
569AudioDeviceWindowsCore::~AudioDeviceWindowsCore()
570{
571    WEBRTC_TRACE(kTraceMemory, kTraceAudioDevice, _id, "%s destroyed", __FUNCTION__);
572
573    Terminate();
574
575    // The IMMDeviceEnumerator is created during construction. Must release
576    // it here and not in Terminate() since we don't recreate it in Init().
577    SAFE_RELEASE(_ptrEnumerator);
578
579    _ptrAudioBuffer = NULL;
580
581    if (NULL != _hRenderSamplesReadyEvent)
582    {
583        CloseHandle(_hRenderSamplesReadyEvent);
584        _hRenderSamplesReadyEvent = NULL;
585    }
586
587    if (NULL != _hCaptureSamplesReadyEvent)
588    {
589        CloseHandle(_hCaptureSamplesReadyEvent);
590        _hCaptureSamplesReadyEvent = NULL;
591    }
592
593    if (NULL != _hRenderStartedEvent)
594    {
595        CloseHandle(_hRenderStartedEvent);
596        _hRenderStartedEvent = NULL;
597    }
598
599    if (NULL != _hCaptureStartedEvent)
600    {
601        CloseHandle(_hCaptureStartedEvent);
602        _hCaptureStartedEvent = NULL;
603    }
604
605    if (NULL != _hShutdownRenderEvent)
606    {
607        CloseHandle(_hShutdownRenderEvent);
608        _hShutdownRenderEvent = NULL;
609    }
610
611    if (NULL != _hShutdownCaptureEvent)
612    {
613        CloseHandle(_hShutdownCaptureEvent);
614        _hShutdownCaptureEvent = NULL;
615    }
616
617    if (NULL != _hSetCaptureVolumeEvent)
618    {
619        CloseHandle(_hSetCaptureVolumeEvent);
620        _hSetCaptureVolumeEvent = NULL;
621    }
622
623    if (_avrtLibrary)
624    {
625        BOOL freeOK = FreeLibrary(_avrtLibrary);
626        if (!freeOK)
627        {
628            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
629                "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() failed to free the loaded Avrt DLL module correctly");
630        }
631        else
632        {
633            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
634                "AudioDeviceWindowsCore::~AudioDeviceWindowsCore() the Avrt DLL module is now unloaded");
635        }
636    }
637
638    delete &_critSect;
639    delete &_volumeMutex;
640}
641
642// ============================================================================
643//                                     API
644// ============================================================================
645
646// ----------------------------------------------------------------------------
647//  AttachAudioBuffer
648// ----------------------------------------------------------------------------
649
650void AudioDeviceWindowsCore::AttachAudioBuffer(AudioDeviceBuffer* audioBuffer)
651{
652
653    _ptrAudioBuffer = audioBuffer;
654
655    // Inform the AudioBuffer about default settings for this implementation.
656    // Set all values to zero here since the actual settings will be done by
657    // InitPlayout and InitRecording later.
658    _ptrAudioBuffer->SetRecordingSampleRate(0);
659    _ptrAudioBuffer->SetPlayoutSampleRate(0);
660    _ptrAudioBuffer->SetRecordingChannels(0);
661    _ptrAudioBuffer->SetPlayoutChannels(0);
662}
663
664// ----------------------------------------------------------------------------
665//  ActiveAudioLayer
666// ----------------------------------------------------------------------------
667
668int32_t AudioDeviceWindowsCore::ActiveAudioLayer(AudioDeviceModule::AudioLayer& audioLayer) const
669{
670    audioLayer = AudioDeviceModule::kWindowsCoreAudio;
671    return 0;
672}
673
674// ----------------------------------------------------------------------------
675//  Init
676// ----------------------------------------------------------------------------
677
678int32_t AudioDeviceWindowsCore::Init()
679{
680
681    CriticalSectionScoped lock(&_critSect);
682
683    if (_initialized)
684    {
685        return 0;
686    }
687
688    _playWarning = 0;
689    _playError = 0;
690    _recWarning = 0;
691    _recError = 0;
692
693    // Enumerate all audio rendering and capturing endpoint devices.
694    // Note that, some of these will not be able to select by the user.
695    // The complete collection is for internal use only.
696    //
697    _EnumerateEndpointDevicesAll(eRender);
698    _EnumerateEndpointDevicesAll(eCapture);
699
700    _initialized = true;
701
702    return 0;
703}
704
705// ----------------------------------------------------------------------------
706//  Terminate
707// ----------------------------------------------------------------------------
708
709int32_t AudioDeviceWindowsCore::Terminate()
710{
711
712    CriticalSectionScoped lock(&_critSect);
713
714    if (!_initialized) {
715        return 0;
716    }
717
718    _initialized = false;
719    _speakerIsInitialized = false;
720    _microphoneIsInitialized = false;
721    _playing = false;
722    _recording = false;
723
724    SAFE_RELEASE(_ptrRenderCollection);
725    SAFE_RELEASE(_ptrCaptureCollection);
726    SAFE_RELEASE(_ptrDeviceOut);
727    SAFE_RELEASE(_ptrDeviceIn);
728    SAFE_RELEASE(_ptrClientOut);
729    SAFE_RELEASE(_ptrClientIn);
730    SAFE_RELEASE(_ptrRenderClient);
731    SAFE_RELEASE(_ptrCaptureClient);
732    SAFE_RELEASE(_ptrCaptureVolume);
733    SAFE_RELEASE(_ptrRenderSimpleVolume);
734
735    return 0;
736}
737
738// ----------------------------------------------------------------------------
739//  Initialized
740// ----------------------------------------------------------------------------
741
742bool AudioDeviceWindowsCore::Initialized() const
743{
744    return (_initialized);
745}
746
747// ----------------------------------------------------------------------------
748//  InitSpeaker
749// ----------------------------------------------------------------------------
750
751int32_t AudioDeviceWindowsCore::InitSpeaker()
752{
753
754    CriticalSectionScoped lock(&_critSect);
755
756    if (_playing)
757    {
758        return -1;
759    }
760
761    if (_ptrDeviceOut == NULL)
762    {
763        return -1;
764    }
765
766    if (_usingOutputDeviceIndex)
767    {
768        int16_t nDevices = PlayoutDevices();
769        if (_outputDeviceIndex > (nDevices - 1))
770        {
771            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
772            return -1;
773        }
774    }
775
776    int32_t ret(0);
777
778    SAFE_RELEASE(_ptrDeviceOut);
779    if (_usingOutputDeviceIndex)
780    {
781        // Refresh the selected rendering endpoint device using current index
782        ret = _GetListDevice(eRender, _outputDeviceIndex, &_ptrDeviceOut);
783    }
784    else
785    {
786        ERole role;
787        (_outputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
788        // Refresh the selected rendering endpoint device using role
789        ret = _GetDefaultDevice(eRender, role, &_ptrDeviceOut);
790    }
791
792    if (ret != 0 || (_ptrDeviceOut == NULL))
793    {
794        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the rendering enpoint device");
795        SAFE_RELEASE(_ptrDeviceOut);
796        return -1;
797    }
798
799    IAudioSessionManager* pManager = NULL;
800    ret = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager),
801                                  CLSCTX_ALL,
802                                  NULL,
803                                  (void**)&pManager);
804    if (ret != 0 || pManager == NULL)
805    {
806        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
807                    "  failed to initialize the render manager");
808        SAFE_RELEASE(pManager);
809        return -1;
810    }
811
812    SAFE_RELEASE(_ptrRenderSimpleVolume);
813    ret = pManager->GetSimpleAudioVolume(NULL, FALSE, &_ptrRenderSimpleVolume);
814    if (ret != 0 || _ptrRenderSimpleVolume == NULL)
815    {
816        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
817                    "  failed to initialize the render simple volume");
818        SAFE_RELEASE(pManager);
819        SAFE_RELEASE(_ptrRenderSimpleVolume);
820        return -1;
821    }
822    SAFE_RELEASE(pManager);
823
824    _speakerIsInitialized = true;
825
826    return 0;
827}
828
829// ----------------------------------------------------------------------------
830//  InitMicrophone
831// ----------------------------------------------------------------------------
832
833int32_t AudioDeviceWindowsCore::InitMicrophone()
834{
835
836    CriticalSectionScoped lock(&_critSect);
837
838    if (_recording)
839    {
840        return -1;
841    }
842
843    if (_ptrDeviceIn == NULL)
844    {
845        return -1;
846    }
847
848    if (_usingInputDeviceIndex)
849    {
850        int16_t nDevices = RecordingDevices();
851        if (_inputDeviceIndex > (nDevices - 1))
852        {
853            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "current device selection is invalid => unable to initialize");
854            return -1;
855        }
856    }
857
858    int32_t ret(0);
859
860    SAFE_RELEASE(_ptrDeviceIn);
861    if (_usingInputDeviceIndex)
862    {
863        // Refresh the selected capture endpoint device using current index
864        ret = _GetListDevice(eCapture, _inputDeviceIndex, &_ptrDeviceIn);
865    }
866    else
867    {
868        ERole role;
869        (_inputDevice == AudioDeviceModule::kDefaultDevice) ? role = eConsole : role = eCommunications;
870        // Refresh the selected capture endpoint device using role
871        ret = _GetDefaultDevice(eCapture, role, &_ptrDeviceIn);
872    }
873
874    if (ret != 0 || (_ptrDeviceIn == NULL))
875    {
876        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "failed to initialize the capturing enpoint device");
877        SAFE_RELEASE(_ptrDeviceIn);
878        return -1;
879    }
880
881    ret = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume),
882                                 CLSCTX_ALL,
883                                 NULL,
884                                 reinterpret_cast<void **>(&_ptrCaptureVolume));
885    if (ret != 0 || _ptrCaptureVolume == NULL)
886    {
887        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
888                    "  failed to initialize the capture volume");
889        SAFE_RELEASE(_ptrCaptureVolume);
890        return -1;
891    }
892
893    _microphoneIsInitialized = true;
894
895    return 0;
896}
897
898// ----------------------------------------------------------------------------
899//  SpeakerIsInitialized
900// ----------------------------------------------------------------------------
901
902bool AudioDeviceWindowsCore::SpeakerIsInitialized() const
903{
904
905    return (_speakerIsInitialized);
906}
907
908// ----------------------------------------------------------------------------
909//  MicrophoneIsInitialized
910// ----------------------------------------------------------------------------
911
912bool AudioDeviceWindowsCore::MicrophoneIsInitialized() const
913{
914
915    return (_microphoneIsInitialized);
916}
917
918// ----------------------------------------------------------------------------
919//  SpeakerVolumeIsAvailable
920// ----------------------------------------------------------------------------
921
922int32_t AudioDeviceWindowsCore::SpeakerVolumeIsAvailable(bool& available)
923{
924
925    CriticalSectionScoped lock(&_critSect);
926
927    if (_ptrDeviceOut == NULL)
928    {
929        return -1;
930    }
931
932    HRESULT hr = S_OK;
933    IAudioSessionManager* pManager = NULL;
934    ISimpleAudioVolume* pVolume = NULL;
935
936    hr = _ptrDeviceOut->Activate(__uuidof(IAudioSessionManager), CLSCTX_ALL, NULL, (void**)&pManager);
937    EXIT_ON_ERROR(hr);
938
939    hr = pManager->GetSimpleAudioVolume(NULL, FALSE, &pVolume);
940    EXIT_ON_ERROR(hr);
941
942    float volume(0.0f);
943    hr = pVolume->GetMasterVolume(&volume);
944    if (FAILED(hr))
945    {
946        available = false;
947    }
948    available = true;
949
950    SAFE_RELEASE(pManager);
951    SAFE_RELEASE(pVolume);
952
953    return 0;
954
955Exit:
956    _TraceCOMError(hr);
957    SAFE_RELEASE(pManager);
958    SAFE_RELEASE(pVolume);
959    return -1;
960}
961
962// ----------------------------------------------------------------------------
963//  SetSpeakerVolume
964// ----------------------------------------------------------------------------
965
966int32_t AudioDeviceWindowsCore::SetSpeakerVolume(uint32_t volume)
967{
968
969    {
970        CriticalSectionScoped lock(&_critSect);
971
972        if (!_speakerIsInitialized)
973        {
974        return -1;
975        }
976
977        if (_ptrDeviceOut == NULL)
978        {
979            return -1;
980        }
981    }
982
983    if (volume < (uint32_t)MIN_CORE_SPEAKER_VOLUME ||
984        volume > (uint32_t)MAX_CORE_SPEAKER_VOLUME)
985    {
986        return -1;
987    }
988
989    HRESULT hr = S_OK;
990
991    // scale input volume to valid range (0.0 to 1.0)
992    const float fLevel = (float)volume/MAX_CORE_SPEAKER_VOLUME;
993    _volumeMutex.Enter();
994    hr = _ptrRenderSimpleVolume->SetMasterVolume(fLevel,NULL);
995    _volumeMutex.Leave();
996    EXIT_ON_ERROR(hr);
997
998    return 0;
999
1000Exit:
1001    _TraceCOMError(hr);
1002    return -1;
1003}
1004
1005// ----------------------------------------------------------------------------
1006//  SpeakerVolume
1007// ----------------------------------------------------------------------------
1008
1009int32_t AudioDeviceWindowsCore::SpeakerVolume(uint32_t& volume) const
1010{
1011
1012    {
1013        CriticalSectionScoped lock(&_critSect);
1014
1015        if (!_speakerIsInitialized)
1016        {
1017            return -1;
1018        }
1019
1020        if (_ptrDeviceOut == NULL)
1021        {
1022            return -1;
1023        }
1024    }
1025
1026    HRESULT hr = S_OK;
1027    float fLevel(0.0f);
1028
1029    _volumeMutex.Enter();
1030    hr = _ptrRenderSimpleVolume->GetMasterVolume(&fLevel);
1031    _volumeMutex.Leave();
1032    EXIT_ON_ERROR(hr);
1033
1034    // scale input volume range [0.0,1.0] to valid output range
1035    volume = static_cast<uint32_t> (fLevel*MAX_CORE_SPEAKER_VOLUME);
1036
1037    return 0;
1038
1039Exit:
1040    _TraceCOMError(hr);
1041    return -1;
1042}
1043
1044// ----------------------------------------------------------------------------
1045//  SetWaveOutVolume
1046// ----------------------------------------------------------------------------
1047
1048int32_t AudioDeviceWindowsCore::SetWaveOutVolume(uint16_t volumeLeft, uint16_t volumeRight)
1049{
1050    return -1;
1051}
1052
1053// ----------------------------------------------------------------------------
1054//  WaveOutVolume
1055// ----------------------------------------------------------------------------
1056
1057int32_t AudioDeviceWindowsCore::WaveOutVolume(uint16_t& volumeLeft, uint16_t& volumeRight) const
1058{
1059    return -1;
1060}
1061
1062// ----------------------------------------------------------------------------
1063//  MaxSpeakerVolume
1064//
1065//  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1066//  silence and 1.0 indicates full volume (no attenuation).
1067//  We add our (webrtc-internal) own max level to match the Wave API and
1068//  how it is used today in VoE.
1069// ----------------------------------------------------------------------------
1070
1071int32_t AudioDeviceWindowsCore::MaxSpeakerVolume(uint32_t& maxVolume) const
1072{
1073
1074    if (!_speakerIsInitialized)
1075    {
1076        return -1;
1077    }
1078
1079    maxVolume = static_cast<uint32_t> (MAX_CORE_SPEAKER_VOLUME);
1080
1081    return 0;
1082}
1083
1084// ----------------------------------------------------------------------------
1085//  MinSpeakerVolume
1086// ----------------------------------------------------------------------------
1087
1088int32_t AudioDeviceWindowsCore::MinSpeakerVolume(uint32_t& minVolume) const
1089{
1090
1091    if (!_speakerIsInitialized)
1092    {
1093        return -1;
1094    }
1095
1096    minVolume = static_cast<uint32_t> (MIN_CORE_SPEAKER_VOLUME);
1097
1098    return 0;
1099}
1100
1101// ----------------------------------------------------------------------------
1102//  SpeakerVolumeStepSize
1103// ----------------------------------------------------------------------------
1104
1105int32_t AudioDeviceWindowsCore::SpeakerVolumeStepSize(uint16_t& stepSize) const
1106{
1107
1108    if (!_speakerIsInitialized)
1109    {
1110        return -1;
1111    }
1112
1113    stepSize = CORE_SPEAKER_VOLUME_STEP_SIZE;
1114
1115    return 0;
1116}
1117
1118// ----------------------------------------------------------------------------
1119//  SpeakerMuteIsAvailable
1120// ----------------------------------------------------------------------------
1121
1122int32_t AudioDeviceWindowsCore::SpeakerMuteIsAvailable(bool& available)
1123{
1124
1125    CriticalSectionScoped lock(&_critSect);
1126
1127    if (_ptrDeviceOut == NULL)
1128    {
1129        return -1;
1130    }
1131
1132    HRESULT hr = S_OK;
1133    IAudioEndpointVolume* pVolume = NULL;
1134
1135    // Query the speaker system mute state.
1136    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume),
1137        CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1138    EXIT_ON_ERROR(hr);
1139
1140    BOOL mute;
1141    hr = pVolume->GetMute(&mute);
1142    if (FAILED(hr))
1143        available = false;
1144    else
1145        available = true;
1146
1147    SAFE_RELEASE(pVolume);
1148
1149    return 0;
1150
1151Exit:
1152    _TraceCOMError(hr);
1153    SAFE_RELEASE(pVolume);
1154    return -1;
1155}
1156
1157// ----------------------------------------------------------------------------
1158//  SetSpeakerMute
1159// ----------------------------------------------------------------------------
1160
1161int32_t AudioDeviceWindowsCore::SetSpeakerMute(bool enable)
1162{
1163
1164    CriticalSectionScoped lock(&_critSect);
1165
1166    if (!_speakerIsInitialized)
1167    {
1168        return -1;
1169    }
1170
1171    if (_ptrDeviceOut == NULL)
1172    {
1173        return -1;
1174    }
1175
1176    HRESULT hr = S_OK;
1177    IAudioEndpointVolume* pVolume = NULL;
1178
1179    // Set the speaker system mute state.
1180    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1181    EXIT_ON_ERROR(hr);
1182
1183    const BOOL mute(enable);
1184    hr = pVolume->SetMute(mute, NULL);
1185    EXIT_ON_ERROR(hr);
1186
1187    SAFE_RELEASE(pVolume);
1188
1189    return 0;
1190
1191Exit:
1192    _TraceCOMError(hr);
1193    SAFE_RELEASE(pVolume);
1194    return -1;
1195}
1196
1197// ----------------------------------------------------------------------------
1198//  SpeakerMute
1199// ----------------------------------------------------------------------------
1200
1201int32_t AudioDeviceWindowsCore::SpeakerMute(bool& enabled) const
1202{
1203
1204    if (!_speakerIsInitialized)
1205    {
1206        return -1;
1207    }
1208
1209    if (_ptrDeviceOut == NULL)
1210    {
1211        return -1;
1212    }
1213
1214    HRESULT hr = S_OK;
1215    IAudioEndpointVolume* pVolume = NULL;
1216
1217    // Query the speaker system mute state.
1218    hr = _ptrDeviceOut->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1219    EXIT_ON_ERROR(hr);
1220
1221    BOOL mute;
1222    hr = pVolume->GetMute(&mute);
1223    EXIT_ON_ERROR(hr);
1224
1225    enabled = (mute == TRUE) ? true : false;
1226
1227    SAFE_RELEASE(pVolume);
1228
1229    return 0;
1230
1231Exit:
1232    _TraceCOMError(hr);
1233    SAFE_RELEASE(pVolume);
1234    return -1;
1235}
1236
1237// ----------------------------------------------------------------------------
1238//  MicrophoneMuteIsAvailable
1239// ----------------------------------------------------------------------------
1240
1241int32_t AudioDeviceWindowsCore::MicrophoneMuteIsAvailable(bool& available)
1242{
1243
1244    CriticalSectionScoped lock(&_critSect);
1245
1246    if (_ptrDeviceIn == NULL)
1247    {
1248        return -1;
1249    }
1250
1251    HRESULT hr = S_OK;
1252    IAudioEndpointVolume* pVolume = NULL;
1253
1254    // Query the microphone system mute state.
1255    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1256    EXIT_ON_ERROR(hr);
1257
1258    BOOL mute;
1259    hr = pVolume->GetMute(&mute);
1260    if (FAILED(hr))
1261        available = false;
1262    else
1263        available = true;
1264
1265    SAFE_RELEASE(pVolume);
1266    return 0;
1267
1268Exit:
1269    _TraceCOMError(hr);
1270    SAFE_RELEASE(pVolume);
1271    return -1;
1272}
1273
1274// ----------------------------------------------------------------------------
1275//  SetMicrophoneMute
1276// ----------------------------------------------------------------------------
1277
1278int32_t AudioDeviceWindowsCore::SetMicrophoneMute(bool enable)
1279{
1280
1281    if (!_microphoneIsInitialized)
1282    {
1283        return -1;
1284    }
1285
1286    if (_ptrDeviceIn == NULL)
1287    {
1288        return -1;
1289    }
1290
1291    HRESULT hr = S_OK;
1292    IAudioEndpointVolume* pVolume = NULL;
1293
1294    // Set the microphone system mute state.
1295    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1296    EXIT_ON_ERROR(hr);
1297
1298    const BOOL mute(enable);
1299    hr = pVolume->SetMute(mute, NULL);
1300    EXIT_ON_ERROR(hr);
1301
1302    SAFE_RELEASE(pVolume);
1303    return 0;
1304
1305Exit:
1306    _TraceCOMError(hr);
1307    SAFE_RELEASE(pVolume);
1308    return -1;
1309}
1310
1311// ----------------------------------------------------------------------------
1312//  MicrophoneMute
1313// ----------------------------------------------------------------------------
1314
1315int32_t AudioDeviceWindowsCore::MicrophoneMute(bool& enabled) const
1316{
1317
1318    if (!_microphoneIsInitialized)
1319    {
1320        return -1;
1321    }
1322
1323    HRESULT hr = S_OK;
1324    IAudioEndpointVolume* pVolume = NULL;
1325
1326    // Query the microphone system mute state.
1327    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL,  reinterpret_cast<void**>(&pVolume));
1328    EXIT_ON_ERROR(hr);
1329
1330    BOOL mute;
1331    hr = pVolume->GetMute(&mute);
1332    EXIT_ON_ERROR(hr);
1333
1334    enabled = (mute == TRUE) ? true : false;
1335
1336    SAFE_RELEASE(pVolume);
1337    return 0;
1338
1339Exit:
1340    _TraceCOMError(hr);
1341    SAFE_RELEASE(pVolume);
1342    return -1;
1343}
1344
1345// ----------------------------------------------------------------------------
1346//  MicrophoneBoostIsAvailable
1347// ----------------------------------------------------------------------------
1348
1349int32_t AudioDeviceWindowsCore::MicrophoneBoostIsAvailable(bool& available)
1350{
1351
1352    available = false;
1353    return 0;
1354}
1355
1356// ----------------------------------------------------------------------------
1357//  SetMicrophoneBoost
1358// ----------------------------------------------------------------------------
1359
1360int32_t AudioDeviceWindowsCore::SetMicrophoneBoost(bool enable)
1361{
1362
1363    if (!_microphoneIsInitialized)
1364    {
1365        return -1;
1366    }
1367
1368    return -1;
1369}
1370
1371// ----------------------------------------------------------------------------
1372//  MicrophoneBoost
1373// ----------------------------------------------------------------------------
1374
1375int32_t AudioDeviceWindowsCore::MicrophoneBoost(bool& enabled) const
1376{
1377
1378    if (!_microphoneIsInitialized)
1379    {
1380        return -1;
1381    }
1382
1383    return -1;
1384}
1385
1386// ----------------------------------------------------------------------------
1387//  StereoRecordingIsAvailable
1388// ----------------------------------------------------------------------------
1389
1390int32_t AudioDeviceWindowsCore::StereoRecordingIsAvailable(bool& available)
1391{
1392
1393    available = true;
1394    return 0;
1395}
1396
1397// ----------------------------------------------------------------------------
1398//  SetStereoRecording
1399// ----------------------------------------------------------------------------
1400
1401int32_t AudioDeviceWindowsCore::SetStereoRecording(bool enable)
1402{
1403
1404    CriticalSectionScoped lock(&_critSect);
1405
1406    if (enable)
1407    {
1408        _recChannelsPrioList[0] = 2;    // try stereo first
1409        _recChannelsPrioList[1] = 1;
1410        _recChannels = 2;
1411    }
1412    else
1413    {
1414        _recChannelsPrioList[0] = 1;    // try mono first
1415        _recChannelsPrioList[1] = 2;
1416        _recChannels = 1;
1417    }
1418
1419    return 0;
1420}
1421
1422// ----------------------------------------------------------------------------
1423//  StereoRecording
1424// ----------------------------------------------------------------------------
1425
1426int32_t AudioDeviceWindowsCore::StereoRecording(bool& enabled) const
1427{
1428
1429    if (_recChannels == 2)
1430        enabled = true;
1431    else
1432        enabled = false;
1433
1434    return 0;
1435}
1436
1437// ----------------------------------------------------------------------------
1438//  StereoPlayoutIsAvailable
1439// ----------------------------------------------------------------------------
1440
1441int32_t AudioDeviceWindowsCore::StereoPlayoutIsAvailable(bool& available)
1442{
1443
1444    available = true;
1445    return 0;
1446}
1447
1448// ----------------------------------------------------------------------------
1449//  SetStereoPlayout
1450// ----------------------------------------------------------------------------
1451
1452int32_t AudioDeviceWindowsCore::SetStereoPlayout(bool enable)
1453{
1454
1455    CriticalSectionScoped lock(&_critSect);
1456
1457    if (enable)
1458    {
1459        _playChannelsPrioList[0] = 2;    // try stereo first
1460        _playChannelsPrioList[1] = 1;
1461        _playChannels = 2;
1462    }
1463    else
1464    {
1465        _playChannelsPrioList[0] = 1;    // try mono first
1466        _playChannelsPrioList[1] = 2;
1467        _playChannels = 1;
1468    }
1469
1470    return 0;
1471}
1472
1473// ----------------------------------------------------------------------------
1474//  StereoPlayout
1475// ----------------------------------------------------------------------------
1476
1477int32_t AudioDeviceWindowsCore::StereoPlayout(bool& enabled) const
1478{
1479
1480    if (_playChannels == 2)
1481        enabled = true;
1482    else
1483        enabled = false;
1484
1485    return 0;
1486}
1487
1488// ----------------------------------------------------------------------------
1489//  SetAGC
1490// ----------------------------------------------------------------------------
1491
1492int32_t AudioDeviceWindowsCore::SetAGC(bool enable)
1493{
1494    CriticalSectionScoped lock(&_critSect);
1495    _AGC = enable;
1496    return 0;
1497}
1498
1499// ----------------------------------------------------------------------------
1500//  AGC
1501// ----------------------------------------------------------------------------
1502
1503bool AudioDeviceWindowsCore::AGC() const
1504{
1505    CriticalSectionScoped lock(&_critSect);
1506    return _AGC;
1507}
1508
1509// ----------------------------------------------------------------------------
1510//  MicrophoneVolumeIsAvailable
1511// ----------------------------------------------------------------------------
1512
1513int32_t AudioDeviceWindowsCore::MicrophoneVolumeIsAvailable(bool& available)
1514{
1515
1516    CriticalSectionScoped lock(&_critSect);
1517
1518    if (_ptrDeviceIn == NULL)
1519    {
1520        return -1;
1521    }
1522
1523    HRESULT hr = S_OK;
1524    IAudioEndpointVolume* pVolume = NULL;
1525
1526    hr = _ptrDeviceIn->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL, NULL, reinterpret_cast<void**>(&pVolume));
1527    EXIT_ON_ERROR(hr);
1528
1529    float volume(0.0f);
1530    hr = pVolume->GetMasterVolumeLevelScalar(&volume);
1531    if (FAILED(hr))
1532    {
1533        available = false;
1534    }
1535    available = true;
1536
1537    SAFE_RELEASE(pVolume);
1538    return 0;
1539
1540Exit:
1541    _TraceCOMError(hr);
1542    SAFE_RELEASE(pVolume);
1543    return -1;
1544}
1545
1546// ----------------------------------------------------------------------------
1547//  SetMicrophoneVolume
1548// ----------------------------------------------------------------------------
1549
1550int32_t AudioDeviceWindowsCore::SetMicrophoneVolume(uint32_t volume)
1551{
1552    WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AudioDeviceWindowsCore::SetMicrophoneVolume(volume=%u)", volume);
1553
1554    {
1555        CriticalSectionScoped lock(&_critSect);
1556
1557        if (!_microphoneIsInitialized)
1558        {
1559            return -1;
1560        }
1561
1562        if (_ptrDeviceIn == NULL)
1563        {
1564            return -1;
1565        }
1566    }
1567
1568    if (volume < static_cast<uint32_t>(MIN_CORE_MICROPHONE_VOLUME) ||
1569        volume > static_cast<uint32_t>(MAX_CORE_MICROPHONE_VOLUME))
1570    {
1571        return -1;
1572    }
1573
1574    HRESULT hr = S_OK;
1575    // scale input volume to valid range (0.0 to 1.0)
1576    const float fLevel = static_cast<float>(volume)/MAX_CORE_MICROPHONE_VOLUME;
1577    _volumeMutex.Enter();
1578    _ptrCaptureVolume->SetMasterVolumeLevelScalar(fLevel, NULL);
1579    _volumeMutex.Leave();
1580    EXIT_ON_ERROR(hr);
1581
1582    return 0;
1583
1584Exit:
1585    _TraceCOMError(hr);
1586    return -1;
1587}
1588
1589// ----------------------------------------------------------------------------
1590//  MicrophoneVolume
1591// ----------------------------------------------------------------------------
1592
1593int32_t AudioDeviceWindowsCore::MicrophoneVolume(uint32_t& volume) const
1594{
1595    {
1596        CriticalSectionScoped lock(&_critSect);
1597
1598        if (!_microphoneIsInitialized)
1599        {
1600            return -1;
1601        }
1602
1603        if (_ptrDeviceIn == NULL)
1604        {
1605            return -1;
1606        }
1607    }
1608
1609    HRESULT hr = S_OK;
1610    float fLevel(0.0f);
1611    volume = 0;
1612    _volumeMutex.Enter();
1613    hr = _ptrCaptureVolume->GetMasterVolumeLevelScalar(&fLevel);
1614    _volumeMutex.Leave();
1615    EXIT_ON_ERROR(hr);
1616
1617    // scale input volume range [0.0,1.0] to valid output range
1618    volume = static_cast<uint32_t> (fLevel*MAX_CORE_MICROPHONE_VOLUME);
1619
1620    return 0;
1621
1622Exit:
1623    _TraceCOMError(hr);
1624    return -1;
1625}
1626
1627// ----------------------------------------------------------------------------
1628//  MaxMicrophoneVolume
1629//
1630//  The internal range for Core Audio is 0.0 to 1.0, where 0.0 indicates
1631//  silence and 1.0 indicates full volume (no attenuation).
1632//  We add our (webrtc-internal) own max level to match the Wave API and
1633//  how it is used today in VoE.
1634// ----------------------------------------------------------------------------
1635
1636int32_t AudioDeviceWindowsCore::MaxMicrophoneVolume(uint32_t& maxVolume) const
1637{
1638    WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "%s", __FUNCTION__);
1639
1640    if (!_microphoneIsInitialized)
1641    {
1642        return -1;
1643    }
1644
1645    maxVolume = static_cast<uint32_t> (MAX_CORE_MICROPHONE_VOLUME);
1646
1647    return 0;
1648}
1649
1650// ----------------------------------------------------------------------------
1651//  MinMicrophoneVolume
1652// ----------------------------------------------------------------------------
1653
1654int32_t AudioDeviceWindowsCore::MinMicrophoneVolume(uint32_t& minVolume) const
1655{
1656
1657    if (!_microphoneIsInitialized)
1658    {
1659        return -1;
1660    }
1661
1662    minVolume = static_cast<uint32_t> (MIN_CORE_MICROPHONE_VOLUME);
1663
1664    return 0;
1665}
1666
1667// ----------------------------------------------------------------------------
1668//  MicrophoneVolumeStepSize
1669// ----------------------------------------------------------------------------
1670
1671int32_t AudioDeviceWindowsCore::MicrophoneVolumeStepSize(uint16_t& stepSize) const
1672{
1673
1674    if (!_microphoneIsInitialized)
1675    {
1676        return -1;
1677    }
1678
1679    stepSize = CORE_MICROPHONE_VOLUME_STEP_SIZE;
1680
1681    return 0;
1682}
1683
1684// ----------------------------------------------------------------------------
1685//  PlayoutDevices
1686// ----------------------------------------------------------------------------
1687
1688int16_t AudioDeviceWindowsCore::PlayoutDevices()
1689{
1690
1691    CriticalSectionScoped lock(&_critSect);
1692
1693    if (_RefreshDeviceList(eRender) != -1)
1694    {
1695        return (_DeviceListCount(eRender));
1696    }
1697
1698    return -1;
1699}
1700
1701// ----------------------------------------------------------------------------
1702//  SetPlayoutDevice I (II)
1703// ----------------------------------------------------------------------------
1704
1705int32_t AudioDeviceWindowsCore::SetPlayoutDevice(uint16_t index)
1706{
1707
1708    if (_playIsInitialized)
1709    {
1710        return -1;
1711    }
1712
1713    // Get current number of available rendering endpoint devices and refresh the rendering collection.
1714    UINT nDevices = PlayoutDevices();
1715
1716    if (index < 0 || index > (nDevices-1))
1717    {
1718        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
1719        return -1;
1720    }
1721
1722    CriticalSectionScoped lock(&_critSect);
1723
1724    HRESULT hr(S_OK);
1725
1726    assert(_ptrRenderCollection != NULL);
1727
1728    //  Select an endpoint rendering device given the specified index
1729    SAFE_RELEASE(_ptrDeviceOut);
1730    hr = _ptrRenderCollection->Item(
1731                                 index,
1732                                 &_ptrDeviceOut);
1733    if (FAILED(hr))
1734    {
1735        _TraceCOMError(hr);
1736        SAFE_RELEASE(_ptrDeviceOut);
1737        return -1;
1738    }
1739
1740    WCHAR szDeviceName[MAX_PATH];
1741    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1742
1743    // Get the endpoint device's friendly-name
1744    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1745    {
1746        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1747    }
1748
1749    _usingOutputDeviceIndex = true;
1750    _outputDeviceIndex = index;
1751
1752    return 0;
1753}
1754
1755// ----------------------------------------------------------------------------
1756//  SetPlayoutDevice II (II)
1757// ----------------------------------------------------------------------------
1758
1759int32_t AudioDeviceWindowsCore::SetPlayoutDevice(AudioDeviceModule::WindowsDeviceType device)
1760{
1761    if (_playIsInitialized)
1762    {
1763        return -1;
1764    }
1765
1766    ERole role(eCommunications);
1767
1768    if (device == AudioDeviceModule::kDefaultDevice)
1769    {
1770        role = eConsole;
1771    }
1772    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
1773    {
1774        role = eCommunications;
1775    }
1776
1777    CriticalSectionScoped lock(&_critSect);
1778
1779    // Refresh the list of rendering endpoint devices
1780    _RefreshDeviceList(eRender);
1781
1782    HRESULT hr(S_OK);
1783
1784    assert(_ptrEnumerator != NULL);
1785
1786    //  Select an endpoint rendering device given the specified role
1787    SAFE_RELEASE(_ptrDeviceOut);
1788    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
1789                           eRender,
1790                           role,
1791                           &_ptrDeviceOut);
1792    if (FAILED(hr))
1793    {
1794        _TraceCOMError(hr);
1795        SAFE_RELEASE(_ptrDeviceOut);
1796        return -1;
1797    }
1798
1799    WCHAR szDeviceName[MAX_PATH];
1800    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1801
1802    // Get the endpoint device's friendly-name
1803    if (_GetDeviceName(_ptrDeviceOut, szDeviceName, bufferLen) == 0)
1804    {
1805        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
1806    }
1807
1808    _usingOutputDeviceIndex = false;
1809    _outputDevice = device;
1810
1811    return 0;
1812}
1813
1814// ----------------------------------------------------------------------------
1815//  PlayoutDeviceName
1816// ----------------------------------------------------------------------------
1817
1818int32_t AudioDeviceWindowsCore::PlayoutDeviceName(
1819    uint16_t index,
1820    char name[kAdmMaxDeviceNameSize],
1821    char guid[kAdmMaxGuidSize])
1822{
1823
1824    bool defaultCommunicationDevice(false);
1825    const int16_t nDevices(PlayoutDevices());  // also updates the list of devices
1826
1827    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1828    if (index == (uint16_t)(-1))
1829    {
1830        defaultCommunicationDevice = true;
1831        index = 0;
1832        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1833    }
1834
1835    if ((index > (nDevices-1)) || (name == NULL))
1836    {
1837        return -1;
1838    }
1839
1840    memset(name, 0, kAdmMaxDeviceNameSize);
1841
1842    if (guid != NULL)
1843    {
1844        memset(guid, 0, kAdmMaxGuidSize);
1845    }
1846
1847    CriticalSectionScoped lock(&_critSect);
1848
1849    int32_t ret(-1);
1850    WCHAR szDeviceName[MAX_PATH];
1851    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1852
1853    // Get the endpoint device's friendly-name
1854    if (defaultCommunicationDevice)
1855    {
1856        ret = _GetDefaultDeviceName(eRender, eCommunications, szDeviceName, bufferLen);
1857    }
1858    else
1859    {
1860        ret = _GetListDeviceName(eRender, index, szDeviceName, bufferLen);
1861    }
1862
1863    if (ret == 0)
1864    {
1865        // Convert the endpoint device's friendly-name to UTF-8
1866        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1867        {
1868            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1869        }
1870    }
1871
1872    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1873    if (defaultCommunicationDevice)
1874    {
1875        ret = _GetDefaultDeviceID(eRender, eCommunications, szDeviceName, bufferLen);
1876    }
1877    else
1878    {
1879        ret = _GetListDeviceID(eRender, index, szDeviceName, bufferLen);
1880    }
1881
1882    if (guid != NULL && ret == 0)
1883    {
1884        // Convert the endpoint device's ID string to UTF-8
1885        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1886        {
1887            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1888        }
1889    }
1890
1891    return ret;
1892}
1893
1894// ----------------------------------------------------------------------------
1895//  RecordingDeviceName
1896// ----------------------------------------------------------------------------
1897
1898int32_t AudioDeviceWindowsCore::RecordingDeviceName(
1899    uint16_t index,
1900    char name[kAdmMaxDeviceNameSize],
1901    char guid[kAdmMaxGuidSize])
1902{
1903
1904    bool defaultCommunicationDevice(false);
1905    const int16_t nDevices(RecordingDevices());  // also updates the list of devices
1906
1907    // Special fix for the case when the user selects '-1' as index (<=> Default Communication Device)
1908    if (index == (uint16_t)(-1))
1909    {
1910        defaultCommunicationDevice = true;
1911        index = 0;
1912        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Default Communication endpoint device will be used");
1913    }
1914
1915    if ((index > (nDevices-1)) || (name == NULL))
1916    {
1917        return -1;
1918    }
1919
1920    memset(name, 0, kAdmMaxDeviceNameSize);
1921
1922    if (guid != NULL)
1923    {
1924        memset(guid, 0, kAdmMaxGuidSize);
1925    }
1926
1927    CriticalSectionScoped lock(&_critSect);
1928
1929    int32_t ret(-1);
1930    WCHAR szDeviceName[MAX_PATH];
1931    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
1932
1933    // Get the endpoint device's friendly-name
1934    if (defaultCommunicationDevice)
1935    {
1936        ret = _GetDefaultDeviceName(eCapture, eCommunications, szDeviceName, bufferLen);
1937    }
1938    else
1939    {
1940        ret = _GetListDeviceName(eCapture, index, szDeviceName, bufferLen);
1941    }
1942
1943    if (ret == 0)
1944    {
1945        // Convert the endpoint device's friendly-name to UTF-8
1946        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, name, kAdmMaxDeviceNameSize, NULL, NULL) == 0)
1947        {
1948            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1949        }
1950    }
1951
1952    // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
1953    if (defaultCommunicationDevice)
1954    {
1955        ret = _GetDefaultDeviceID(eCapture, eCommunications, szDeviceName, bufferLen);
1956    }
1957    else
1958    {
1959        ret = _GetListDeviceID(eCapture, index, szDeviceName, bufferLen);
1960    }
1961
1962    if (guid != NULL && ret == 0)
1963    {
1964        // Convert the endpoint device's ID string to UTF-8
1965        if (WideCharToMultiByte(CP_UTF8, 0, szDeviceName, -1, guid, kAdmMaxGuidSize, NULL, NULL) == 0)
1966        {
1967            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "WideCharToMultiByte(CP_UTF8) failed with error code %d", GetLastError());
1968        }
1969    }
1970
1971    return ret;
1972}
1973
1974// ----------------------------------------------------------------------------
1975//  RecordingDevices
1976// ----------------------------------------------------------------------------
1977
1978int16_t AudioDeviceWindowsCore::RecordingDevices()
1979{
1980
1981    CriticalSectionScoped lock(&_critSect);
1982
1983    if (_RefreshDeviceList(eCapture) != -1)
1984    {
1985        return (_DeviceListCount(eCapture));
1986    }
1987
1988    return -1;
1989}
1990
1991// ----------------------------------------------------------------------------
1992//  SetRecordingDevice I (II)
1993// ----------------------------------------------------------------------------
1994
1995int32_t AudioDeviceWindowsCore::SetRecordingDevice(uint16_t index)
1996{
1997
1998    if (_recIsInitialized)
1999    {
2000        return -1;
2001    }
2002
2003    // Get current number of available capture endpoint devices and refresh the capture collection.
2004    UINT nDevices = RecordingDevices();
2005
2006    if (index < 0 || index > (nDevices-1))
2007    {
2008        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "device index is out of range [0,%u]", (nDevices-1));
2009        return -1;
2010    }
2011
2012    CriticalSectionScoped lock(&_critSect);
2013
2014    HRESULT hr(S_OK);
2015
2016    assert(_ptrCaptureCollection != NULL);
2017
2018    // Select an endpoint capture device given the specified index
2019    SAFE_RELEASE(_ptrDeviceIn);
2020    hr = _ptrCaptureCollection->Item(
2021                                 index,
2022                                 &_ptrDeviceIn);
2023    if (FAILED(hr))
2024    {
2025        _TraceCOMError(hr);
2026        SAFE_RELEASE(_ptrDeviceIn);
2027        return -1;
2028    }
2029
2030    WCHAR szDeviceName[MAX_PATH];
2031    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2032
2033    // Get the endpoint device's friendly-name
2034    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2035    {
2036        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2037    }
2038
2039    _usingInputDeviceIndex = true;
2040    _inputDeviceIndex = index;
2041
2042    return 0;
2043}
2044
2045// ----------------------------------------------------------------------------
2046//  SetRecordingDevice II (II)
2047// ----------------------------------------------------------------------------
2048
2049int32_t AudioDeviceWindowsCore::SetRecordingDevice(AudioDeviceModule::WindowsDeviceType device)
2050{
2051    if (_recIsInitialized)
2052    {
2053        return -1;
2054    }
2055
2056    ERole role(eCommunications);
2057
2058    if (device == AudioDeviceModule::kDefaultDevice)
2059    {
2060        role = eConsole;
2061    }
2062    else if (device == AudioDeviceModule::kDefaultCommunicationDevice)
2063    {
2064        role = eCommunications;
2065    }
2066
2067    CriticalSectionScoped lock(&_critSect);
2068
2069    // Refresh the list of capture endpoint devices
2070    _RefreshDeviceList(eCapture);
2071
2072    HRESULT hr(S_OK);
2073
2074    assert(_ptrEnumerator != NULL);
2075
2076    //  Select an endpoint capture device given the specified role
2077    SAFE_RELEASE(_ptrDeviceIn);
2078    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
2079                           eCapture,
2080                           role,
2081                           &_ptrDeviceIn);
2082    if (FAILED(hr))
2083    {
2084        _TraceCOMError(hr);
2085        SAFE_RELEASE(_ptrDeviceIn);
2086        return -1;
2087    }
2088
2089    WCHAR szDeviceName[MAX_PATH];
2090    const int bufferLen = sizeof(szDeviceName)/sizeof(szDeviceName)[0];
2091
2092    // Get the endpoint device's friendly-name
2093    if (_GetDeviceName(_ptrDeviceIn, szDeviceName, bufferLen) == 0)
2094    {
2095        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", szDeviceName);
2096    }
2097
2098    _usingInputDeviceIndex = false;
2099    _inputDevice = device;
2100
2101    return 0;
2102}
2103
2104// ----------------------------------------------------------------------------
2105//  PlayoutIsAvailable
2106// ----------------------------------------------------------------------------
2107
2108int32_t AudioDeviceWindowsCore::PlayoutIsAvailable(bool& available)
2109{
2110
2111    available = false;
2112
2113    // Try to initialize the playout side
2114    int32_t res = InitPlayout();
2115
2116    // Cancel effect of initialization
2117    StopPlayout();
2118
2119    if (res != -1)
2120    {
2121        available = true;
2122    }
2123
2124    return 0;
2125}
2126
2127// ----------------------------------------------------------------------------
2128//  RecordingIsAvailable
2129// ----------------------------------------------------------------------------
2130
2131int32_t AudioDeviceWindowsCore::RecordingIsAvailable(bool& available)
2132{
2133
2134    available = false;
2135
2136    // Try to initialize the recording side
2137    int32_t res = InitRecording();
2138
2139    // Cancel effect of initialization
2140    StopRecording();
2141
2142    if (res != -1)
2143    {
2144        available = true;
2145    }
2146
2147    return 0;
2148}
2149
2150// ----------------------------------------------------------------------------
2151//  InitPlayout
2152// ----------------------------------------------------------------------------
2153
2154int32_t AudioDeviceWindowsCore::InitPlayout()
2155{
2156
2157    CriticalSectionScoped lock(&_critSect);
2158
2159    if (_playing)
2160    {
2161        return -1;
2162    }
2163
2164    if (_playIsInitialized)
2165    {
2166        return 0;
2167    }
2168
2169    if (_ptrDeviceOut == NULL)
2170    {
2171        return -1;
2172    }
2173
2174    // Initialize the speaker (devices might have been added or removed)
2175    if (InitSpeaker() == -1)
2176    {
2177        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitSpeaker() failed");
2178    }
2179
2180    // Ensure that the updated rendering endpoint device is valid
2181    if (_ptrDeviceOut == NULL)
2182    {
2183        return -1;
2184    }
2185
2186    if (_builtInAecEnabled && _recIsInitialized)
2187    {
2188        // Ensure the correct render device is configured in case
2189        // InitRecording() was called before InitPlayout().
2190        if (SetDMOProperties() == -1)
2191        {
2192            return -1;
2193        }
2194    }
2195
2196    HRESULT hr = S_OK;
2197    WAVEFORMATEX* pWfxOut = NULL;
2198    WAVEFORMATEX Wfx = WAVEFORMATEX();
2199    WAVEFORMATEX* pWfxClosestMatch = NULL;
2200
2201    // Create COM object with IAudioClient interface.
2202    SAFE_RELEASE(_ptrClientOut);
2203    hr = _ptrDeviceOut->Activate(
2204                          __uuidof(IAudioClient),
2205                          CLSCTX_ALL,
2206                          NULL,
2207                          (void**)&_ptrClientOut);
2208    EXIT_ON_ERROR(hr);
2209
2210    // Retrieve the stream format that the audio engine uses for its internal
2211    // processing (mixing) of shared-mode streams.
2212    hr = _ptrClientOut->GetMixFormat(&pWfxOut);
2213    if (SUCCEEDED(hr))
2214    {
2215        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current rendering mix format:");
2216        // format type
2217        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxOut->wFormatTag, pWfxOut->wFormatTag);
2218        // number of channels (i.e. mono, stereo...)
2219        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxOut->nChannels);
2220        // sample rate
2221        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxOut->nSamplesPerSec);
2222        // for buffer estimation
2223        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxOut->nAvgBytesPerSec);
2224        // block size of data
2225        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxOut->nBlockAlign);
2226        // number of bits per sample of mono data
2227        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxOut->wBitsPerSample);
2228        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxOut->cbSize);
2229    }
2230
2231    // Set wave format
2232    Wfx.wFormatTag = WAVE_FORMAT_PCM;
2233    Wfx.wBitsPerSample = 16;
2234    Wfx.cbSize = 0;
2235
2236    const int freqs[] = {48000, 44100, 16000, 96000, 32000, 8000};
2237    hr = S_FALSE;
2238
2239    // Iterate over frequencies and channels, in order of priority
2240    for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2241    {
2242        for (int chan = 0; chan < sizeof(_playChannelsPrioList)/sizeof(_playChannelsPrioList[0]); chan++)
2243        {
2244            Wfx.nChannels = _playChannelsPrioList[chan];
2245            Wfx.nSamplesPerSec = freqs[freq];
2246            Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2247            Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2248            // If the method succeeds and the audio endpoint device supports the specified stream format,
2249            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2250            // it returns S_FALSE.
2251            hr = _ptrClientOut->IsFormatSupported(
2252                                  AUDCLNT_SHAREMODE_SHARED,
2253                                  &Wfx,
2254                                  &pWfxClosestMatch);
2255            if (hr == S_OK)
2256            {
2257                break;
2258            }
2259            else
2260            {
2261                WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2262                    Wfx.nChannels, Wfx.nSamplesPerSec);
2263            }
2264        }
2265        if (hr == S_OK)
2266            break;
2267    }
2268
2269    // TODO(andrew): what happens in the event of failure in the above loop?
2270    //   Is _ptrClientOut->Initialize expected to fail?
2271    //   Same in InitRecording().
2272    if (hr == S_OK)
2273    {
2274        _playAudioFrameSize = Wfx.nBlockAlign;
2275        _playBlockSize = Wfx.nSamplesPerSec/100;
2276        _playSampleRate = Wfx.nSamplesPerSec;
2277        _devicePlaySampleRate = Wfx.nSamplesPerSec; // The device itself continues to run at 44.1 kHz.
2278        _devicePlayBlockSize = Wfx.nSamplesPerSec/100;
2279        _playChannels = Wfx.nChannels;
2280
2281        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this rendering format:");
2282        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag         : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2283        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels          : %d", Wfx.nChannels);
2284        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec     : %d", Wfx.nSamplesPerSec);
2285        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec    : %d", Wfx.nAvgBytesPerSec);
2286        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign        : %d", Wfx.nBlockAlign);
2287        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample     : %d", Wfx.wBitsPerSample);
2288        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize             : %d", Wfx.cbSize);
2289        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2290        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playAudioFrameSize: %d", _playAudioFrameSize);
2291        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playBlockSize     : %d", _playBlockSize);
2292        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_playChannels      : %d", _playChannels);
2293    }
2294
2295    // Create a rendering stream.
2296    //
2297    // ****************************************************************************
2298    // For a shared-mode stream that uses event-driven buffering, the caller must
2299    // set both hnsPeriodicity and hnsBufferDuration to 0. The Initialize method
2300    // determines how large a buffer to allocate based on the scheduling period
2301    // of the audio engine. Although the client's buffer processing thread is
2302    // event driven, the basic buffer management process, as described previously,
2303    // is unaltered.
2304    // Each time the thread awakens, it should call IAudioClient::GetCurrentPadding
2305    // to determine how much data to write to a rendering buffer or read from a capture
2306    // buffer. In contrast to the two buffers that the Initialize method allocates
2307    // for an exclusive-mode stream that uses event-driven buffering, a shared-mode
2308    // stream requires a single buffer.
2309    // ****************************************************************************
2310    //
2311    REFERENCE_TIME hnsBufferDuration = 0;  // ask for minimum buffer size (default)
2312    if (_devicePlaySampleRate == 44100)
2313    {
2314        // Ask for a larger buffer size (30ms) when using 44.1kHz as render rate.
2315        // There seems to be a larger risk of underruns for 44.1 compared
2316        // with the default rate (48kHz). When using default, we set the requested
2317        // buffer duration to 0, which sets the buffer to the minimum size
2318        // required by the engine thread. The actual buffer size can then be
2319        // read by GetBufferSize() and it is 20ms on most machines.
2320        hnsBufferDuration = 30*10000;
2321    }
2322    hr = _ptrClientOut->Initialize(
2323                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2324                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK,    // processing of the audio buffer by the client will be event driven
2325                          hnsBufferDuration,                    // requested buffer capacity as a time value (in 100-nanosecond units)
2326                          0,                                    // periodicity
2327                          &Wfx,                                 // selected wave format
2328                          NULL);                                // session GUID
2329
2330    if (FAILED(hr))
2331    {
2332        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2333        if (pWfxClosestMatch != NULL)
2334        {
2335            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2336                pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2337        }
2338        else
2339        {
2340            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2341        }
2342    }
2343    EXIT_ON_ERROR(hr);
2344
2345    if (_ptrAudioBuffer)
2346    {
2347        // Update the audio buffer with the selected parameters
2348        _ptrAudioBuffer->SetPlayoutSampleRate(_playSampleRate);
2349        _ptrAudioBuffer->SetPlayoutChannels((uint8_t)_playChannels);
2350    }
2351    else
2352    {
2353        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2354        // has been created, hence the AudioDeviceBuffer does not exist.
2355        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2356        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2357    }
2358
2359    // Get the actual size of the shared (endpoint buffer).
2360    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2361    UINT bufferFrameCount(0);
2362    hr = _ptrClientOut->GetBufferSize(
2363                          &bufferFrameCount);
2364    if (SUCCEEDED(hr))
2365    {
2366        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2367            bufferFrameCount, bufferFrameCount*_playAudioFrameSize);
2368    }
2369
2370    // Set the event handle that the system signals when an audio buffer is ready
2371    // to be processed by the client.
2372    hr = _ptrClientOut->SetEventHandle(
2373                          _hRenderSamplesReadyEvent);
2374    EXIT_ON_ERROR(hr);
2375
2376    // Get an IAudioRenderClient interface.
2377    SAFE_RELEASE(_ptrRenderClient);
2378    hr = _ptrClientOut->GetService(
2379                          __uuidof(IAudioRenderClient),
2380                          (void**)&_ptrRenderClient);
2381    EXIT_ON_ERROR(hr);
2382
2383    // Mark playout side as initialized
2384    _playIsInitialized = true;
2385
2386    CoTaskMemFree(pWfxOut);
2387    CoTaskMemFree(pWfxClosestMatch);
2388
2389    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render side is now initialized");
2390    return 0;
2391
2392Exit:
2393    _TraceCOMError(hr);
2394    CoTaskMemFree(pWfxOut);
2395    CoTaskMemFree(pWfxClosestMatch);
2396    SAFE_RELEASE(_ptrClientOut);
2397    SAFE_RELEASE(_ptrRenderClient);
2398    return -1;
2399}
2400
2401// Capture initialization when the built-in AEC DirectX Media Object (DMO) is
2402// used. Called from InitRecording(), most of which is skipped over. The DMO
2403// handles device initialization itself.
2404// Reference: http://msdn.microsoft.com/en-us/library/ff819492(v=vs.85).aspx
2405int32_t AudioDeviceWindowsCore::InitRecordingDMO()
2406{
2407    assert(_builtInAecEnabled);
2408    assert(_dmo != NULL);
2409
2410    if (SetDMOProperties() == -1)
2411    {
2412        return -1;
2413    }
2414
2415    DMO_MEDIA_TYPE mt = {0};
2416    HRESULT hr = MoInitMediaType(&mt, sizeof(WAVEFORMATEX));
2417    if (FAILED(hr))
2418    {
2419        MoFreeMediaType(&mt);
2420        _TraceCOMError(hr);
2421        return -1;
2422    }
2423    mt.majortype = MEDIATYPE_Audio;
2424    mt.subtype = MEDIASUBTYPE_PCM;
2425    mt.formattype = FORMAT_WaveFormatEx;
2426
2427    // Supported formats
2428    // nChannels: 1 (in AEC-only mode)
2429    // nSamplesPerSec: 8000, 11025, 16000, 22050
2430    // wBitsPerSample: 16
2431    WAVEFORMATEX* ptrWav = reinterpret_cast<WAVEFORMATEX*>(mt.pbFormat);
2432    ptrWav->wFormatTag = WAVE_FORMAT_PCM;
2433    ptrWav->nChannels = 1;
2434    // 16000 is the highest we can support with our resampler.
2435    ptrWav->nSamplesPerSec = 16000;
2436    ptrWav->nAvgBytesPerSec = 32000;
2437    ptrWav->nBlockAlign = 2;
2438    ptrWav->wBitsPerSample = 16;
2439    ptrWav->cbSize = 0;
2440
2441    // Set the VoE format equal to the AEC output format.
2442    _recAudioFrameSize = ptrWav->nBlockAlign;
2443    _recSampleRate = ptrWav->nSamplesPerSec;
2444    _recBlockSize = ptrWav->nSamplesPerSec / 100;
2445    _recChannels = ptrWav->nChannels;
2446
2447    // Set the DMO output format parameters.
2448    hr = _dmo->SetOutputType(kAecCaptureStreamIndex, &mt, 0);
2449    MoFreeMediaType(&mt);
2450    if (FAILED(hr))
2451    {
2452        _TraceCOMError(hr);
2453        return -1;
2454    }
2455
2456    if (_ptrAudioBuffer)
2457    {
2458        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2459        _ptrAudioBuffer->SetRecordingChannels(_recChannels);
2460    }
2461    else
2462    {
2463        // Refer to InitRecording() for comments.
2464        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2465            "AudioDeviceBuffer must be attached before streaming can start");
2466    }
2467
2468    _mediaBuffer = new MediaBufferImpl(_recBlockSize * _recAudioFrameSize);
2469
2470    // Optional, but if called, must be after media types are set.
2471    hr = _dmo->AllocateStreamingResources();
2472    if (FAILED(hr))
2473    {
2474         _TraceCOMError(hr);
2475        return -1;
2476    }
2477
2478    _recIsInitialized = true;
2479    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2480        "Capture side is now initialized");
2481
2482    return 0;
2483}
2484
2485// ----------------------------------------------------------------------------
2486//  InitRecording
2487// ----------------------------------------------------------------------------
2488
2489int32_t AudioDeviceWindowsCore::InitRecording()
2490{
2491
2492    CriticalSectionScoped lock(&_critSect);
2493
2494    if (_recording)
2495    {
2496        return -1;
2497    }
2498
2499    if (_recIsInitialized)
2500    {
2501        return 0;
2502    }
2503
2504    if (QueryPerformanceFrequency(&_perfCounterFreq) == 0)
2505    {
2506        return -1;
2507    }
2508    _perfCounterFactor = 10000000.0 / (double)_perfCounterFreq.QuadPart;
2509
2510    if (_ptrDeviceIn == NULL)
2511    {
2512        return -1;
2513    }
2514
2515    // Initialize the microphone (devices might have been added or removed)
2516    if (InitMicrophone() == -1)
2517    {
2518        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "InitMicrophone() failed");
2519    }
2520
2521    // Ensure that the updated capturing endpoint device is valid
2522    if (_ptrDeviceIn == NULL)
2523    {
2524        return -1;
2525    }
2526
2527    if (_builtInAecEnabled)
2528    {
2529        // The DMO will configure the capture device.
2530        return InitRecordingDMO();
2531    }
2532
2533    HRESULT hr = S_OK;
2534    WAVEFORMATEX* pWfxIn = NULL;
2535    WAVEFORMATEX Wfx = WAVEFORMATEX();
2536    WAVEFORMATEX* pWfxClosestMatch = NULL;
2537
2538    // Create COM object with IAudioClient interface.
2539    SAFE_RELEASE(_ptrClientIn);
2540    hr = _ptrDeviceIn->Activate(
2541                          __uuidof(IAudioClient),
2542                          CLSCTX_ALL,
2543                          NULL,
2544                          (void**)&_ptrClientIn);
2545    EXIT_ON_ERROR(hr);
2546
2547    // Retrieve the stream format that the audio engine uses for its internal
2548    // processing (mixing) of shared-mode streams.
2549    hr = _ptrClientIn->GetMixFormat(&pWfxIn);
2550    if (SUCCEEDED(hr))
2551    {
2552        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Audio Engine's current capturing mix format:");
2553        // format type
2554        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag     : 0x%X (%u)", pWfxIn->wFormatTag, pWfxIn->wFormatTag);
2555        // number of channels (i.e. mono, stereo...)
2556        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels      : %d", pWfxIn->nChannels);
2557        // sample rate
2558        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec : %d", pWfxIn->nSamplesPerSec);
2559        // for buffer estimation
2560        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec: %d", pWfxIn->nAvgBytesPerSec);
2561        // block size of data
2562        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign    : %d", pWfxIn->nBlockAlign);
2563        // number of bits per sample of mono data
2564        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample : %d", pWfxIn->wBitsPerSample);
2565        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize         : %d", pWfxIn->cbSize);
2566    }
2567
2568    // Set wave format
2569    Wfx.wFormatTag = WAVE_FORMAT_PCM;
2570    Wfx.wBitsPerSample = 16;
2571    Wfx.cbSize = 0;
2572
2573    const int freqs[6] = {48000, 44100, 16000, 96000, 32000, 8000};
2574    hr = S_FALSE;
2575
2576    // Iterate over frequencies and channels, in order of priority
2577    for (int freq = 0; freq < sizeof(freqs)/sizeof(freqs[0]); freq++)
2578    {
2579        for (int chan = 0; chan < sizeof(_recChannelsPrioList)/sizeof(_recChannelsPrioList[0]); chan++)
2580        {
2581            Wfx.nChannels = _recChannelsPrioList[chan];
2582            Wfx.nSamplesPerSec = freqs[freq];
2583            Wfx.nBlockAlign = Wfx.nChannels * Wfx.wBitsPerSample / 8;
2584            Wfx.nAvgBytesPerSec = Wfx.nSamplesPerSec * Wfx.nBlockAlign;
2585            // If the method succeeds and the audio endpoint device supports the specified stream format,
2586            // it returns S_OK. If the method succeeds and provides a closest match to the specified format,
2587            // it returns S_FALSE.
2588            hr = _ptrClientIn->IsFormatSupported(
2589                                  AUDCLNT_SHAREMODE_SHARED,
2590                                  &Wfx,
2591                                  &pWfxClosestMatch);
2592            if (hr == S_OK)
2593            {
2594                break;
2595            }
2596            else
2597            {
2598                WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels=%d, nSamplesPerSec=%d is not supported",
2599                    Wfx.nChannels, Wfx.nSamplesPerSec);
2600            }
2601        }
2602        if (hr == S_OK)
2603            break;
2604    }
2605
2606    if (hr == S_OK)
2607    {
2608        _recAudioFrameSize = Wfx.nBlockAlign;
2609        _recSampleRate = Wfx.nSamplesPerSec;
2610        _recBlockSize = Wfx.nSamplesPerSec/100;
2611        _recChannels = Wfx.nChannels;
2612
2613        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "VoE selected this capturing format:");
2614        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wFormatTag        : 0x%X (%u)", Wfx.wFormatTag, Wfx.wFormatTag);
2615        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nChannels         : %d", Wfx.nChannels);
2616        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nSamplesPerSec    : %d", Wfx.nSamplesPerSec);
2617        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nAvgBytesPerSec   : %d", Wfx.nAvgBytesPerSec);
2618        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "nBlockAlign       : %d", Wfx.nBlockAlign);
2619        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "wBitsPerSample    : %d", Wfx.wBitsPerSample);
2620        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "cbSize            : %d", Wfx.cbSize);
2621        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Additional settings:");
2622        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recAudioFrameSize: %d", _recAudioFrameSize);
2623        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recBlockSize     : %d", _recBlockSize);
2624        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_recChannels      : %d", _recChannels);
2625    }
2626
2627    // Create a capturing stream.
2628    hr = _ptrClientIn->Initialize(
2629                          AUDCLNT_SHAREMODE_SHARED,             // share Audio Engine with other applications
2630                          AUDCLNT_STREAMFLAGS_EVENTCALLBACK |   // processing of the audio buffer by the client will be event driven
2631                          AUDCLNT_STREAMFLAGS_NOPERSIST,        // volume and mute settings for an audio session will not persist across system restarts
2632                          0,                                    // required for event-driven shared mode
2633                          0,                                    // periodicity
2634                          &Wfx,                                 // selected wave format
2635                          NULL);                                // session GUID
2636
2637
2638    if (hr != S_OK)
2639    {
2640        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "IAudioClient::Initialize() failed:");
2641        if (pWfxClosestMatch != NULL)
2642        {
2643            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "closest mix format: #channels=%d, samples/sec=%d, bits/sample=%d",
2644                pWfxClosestMatch->nChannels, pWfxClosestMatch->nSamplesPerSec, pWfxClosestMatch->wBitsPerSample);
2645        }
2646        else
2647        {
2648            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "no format suggested");
2649        }
2650    }
2651    EXIT_ON_ERROR(hr);
2652
2653    if (_ptrAudioBuffer)
2654    {
2655        // Update the audio buffer with the selected parameters
2656        _ptrAudioBuffer->SetRecordingSampleRate(_recSampleRate);
2657        _ptrAudioBuffer->SetRecordingChannels((uint8_t)_recChannels);
2658    }
2659    else
2660    {
2661        // We can enter this state during CoreAudioIsSupported() when no AudioDeviceImplementation
2662        // has been created, hence the AudioDeviceBuffer does not exist.
2663        // It is OK to end up here since we don't initiate any media in CoreAudioIsSupported().
2664        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "AudioDeviceBuffer must be attached before streaming can start");
2665    }
2666
2667    // Get the actual size of the shared (endpoint buffer).
2668    // Typical value is 960 audio frames <=> 20ms @ 48kHz sample rate.
2669    UINT bufferFrameCount(0);
2670    hr = _ptrClientIn->GetBufferSize(
2671                          &bufferFrameCount);
2672    if (SUCCEEDED(hr))
2673    {
2674        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "IAudioClient::GetBufferSize() => %u (<=> %u bytes)",
2675            bufferFrameCount, bufferFrameCount*_recAudioFrameSize);
2676    }
2677
2678    // Set the event handle that the system signals when an audio buffer is ready
2679    // to be processed by the client.
2680    hr = _ptrClientIn->SetEventHandle(
2681                          _hCaptureSamplesReadyEvent);
2682    EXIT_ON_ERROR(hr);
2683
2684    // Get an IAudioCaptureClient interface.
2685    SAFE_RELEASE(_ptrCaptureClient);
2686    hr = _ptrClientIn->GetService(
2687                          __uuidof(IAudioCaptureClient),
2688                          (void**)&_ptrCaptureClient);
2689    EXIT_ON_ERROR(hr);
2690
2691    // Mark capture side as initialized
2692    _recIsInitialized = true;
2693
2694    CoTaskMemFree(pWfxIn);
2695    CoTaskMemFree(pWfxClosestMatch);
2696
2697    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "capture side is now initialized");
2698    return 0;
2699
2700Exit:
2701    _TraceCOMError(hr);
2702    CoTaskMemFree(pWfxIn);
2703    CoTaskMemFree(pWfxClosestMatch);
2704    SAFE_RELEASE(_ptrClientIn);
2705    SAFE_RELEASE(_ptrCaptureClient);
2706    return -1;
2707}
2708
2709// ----------------------------------------------------------------------------
2710//  StartRecording
2711// ----------------------------------------------------------------------------
2712
2713int32_t AudioDeviceWindowsCore::StartRecording()
2714{
2715
2716    if (!_recIsInitialized)
2717    {
2718        return -1;
2719    }
2720
2721    if (_hRecThread != NULL)
2722    {
2723        return 0;
2724    }
2725
2726    if (_recording)
2727    {
2728        return 0;
2729    }
2730
2731    {
2732        CriticalSectionScoped critScoped(&_critSect);
2733
2734        // Create thread which will drive the capturing
2735        LPTHREAD_START_ROUTINE lpStartAddress = WSAPICaptureThread;
2736        if (_builtInAecEnabled)
2737        {
2738            // Redirect to the DMO polling method.
2739            lpStartAddress = WSAPICaptureThreadPollDMO;
2740
2741            if (!_playing)
2742            {
2743                // The DMO won't provide us captured output data unless we
2744                // give it render data to process.
2745                WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2746                    "Playout must be started before recording when using the "
2747                    "built-in AEC");
2748                return -1;
2749            }
2750        }
2751
2752        assert(_hRecThread == NULL);
2753        _hRecThread = CreateThread(NULL,
2754                                   0,
2755                                   lpStartAddress,
2756                                   this,
2757                                   0,
2758                                   NULL);
2759        if (_hRecThread == NULL)
2760        {
2761            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2762                         "failed to create the recording thread");
2763            return -1;
2764        }
2765
2766        // Set thread priority to highest possible
2767        SetThreadPriority(_hRecThread, THREAD_PRIORITY_TIME_CRITICAL);
2768
2769        assert(_hGetCaptureVolumeThread == NULL);
2770        _hGetCaptureVolumeThread = CreateThread(NULL,
2771                                                0,
2772                                                GetCaptureVolumeThread,
2773                                                this,
2774                                                0,
2775                                                NULL);
2776        if (_hGetCaptureVolumeThread == NULL)
2777        {
2778            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2779                         "  failed to create the volume getter thread");
2780            return -1;
2781        }
2782
2783        assert(_hSetCaptureVolumeThread == NULL);
2784        _hSetCaptureVolumeThread = CreateThread(NULL,
2785                                                0,
2786                                                SetCaptureVolumeThread,
2787                                                this,
2788                                                0,
2789                                                NULL);
2790        if (_hSetCaptureVolumeThread == NULL)
2791        {
2792            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2793                         "  failed to create the volume setter thread");
2794            return -1;
2795        }
2796    }  // critScoped
2797
2798    DWORD ret = WaitForSingleObject(_hCaptureStartedEvent, 1000);
2799    if (ret != WAIT_OBJECT_0)
2800    {
2801        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2802            "capturing did not start up properly");
2803        return -1;
2804    }
2805    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2806        "capture audio stream has now started...");
2807
2808    _avgCPULoad = 0.0f;
2809    _playAcc = 0;
2810    _recording = true;
2811
2812    return 0;
2813}
2814
2815// ----------------------------------------------------------------------------
2816//  StopRecording
2817// ----------------------------------------------------------------------------
2818
2819int32_t AudioDeviceWindowsCore::StopRecording()
2820{
2821    int32_t err = 0;
2822
2823    if (!_recIsInitialized)
2824    {
2825        return 0;
2826    }
2827
2828    _Lock();
2829
2830    if (_hRecThread == NULL)
2831    {
2832        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2833            "no capturing stream is active => close down WASAPI only");
2834        SAFE_RELEASE(_ptrClientIn);
2835        SAFE_RELEASE(_ptrCaptureClient);
2836        _recIsInitialized = false;
2837        _recording = false;
2838        _UnLock();
2839        return 0;
2840    }
2841
2842    // Stop the driving thread...
2843    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2844        "closing down the webrtc_core_audio_capture_thread...");
2845    // Manual-reset event; it will remain signalled to stop all capture threads.
2846    SetEvent(_hShutdownCaptureEvent);
2847
2848    _UnLock();
2849    DWORD ret = WaitForSingleObject(_hRecThread, 2000);
2850    if (ret != WAIT_OBJECT_0)
2851    {
2852        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2853            "failed to close down webrtc_core_audio_capture_thread");
2854        err = -1;
2855    }
2856    else
2857    {
2858        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2859            "webrtc_core_audio_capture_thread is now closed");
2860    }
2861
2862    ret = WaitForSingleObject(_hGetCaptureVolumeThread, 2000);
2863    if (ret != WAIT_OBJECT_0)
2864    {
2865        // the thread did not stop as it should
2866        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2867                     "  failed to close down volume getter thread");
2868        err = -1;
2869    }
2870    else
2871    {
2872        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2873            "  volume getter thread is now closed");
2874    }
2875
2876    ret = WaitForSingleObject(_hSetCaptureVolumeThread, 2000);
2877    if (ret != WAIT_OBJECT_0)
2878    {
2879        // the thread did not stop as it should
2880        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2881                     "  failed to close down volume setter thread");
2882        err = -1;
2883    }
2884    else
2885    {
2886        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
2887            "  volume setter thread is now closed");
2888    }
2889    _Lock();
2890
2891    ResetEvent(_hShutdownCaptureEvent); // Must be manually reset.
2892    // Ensure that the thread has released these interfaces properly.
2893    assert(err == -1 || _ptrClientIn == NULL);
2894    assert(err == -1 || _ptrCaptureClient == NULL);
2895
2896    _recIsInitialized = false;
2897    _recording = false;
2898
2899    // These will create thread leaks in the result of an error,
2900    // but we can at least resume the call.
2901    CloseHandle(_hRecThread);
2902    _hRecThread = NULL;
2903
2904    CloseHandle(_hGetCaptureVolumeThread);
2905    _hGetCaptureVolumeThread = NULL;
2906
2907    CloseHandle(_hSetCaptureVolumeThread);
2908    _hSetCaptureVolumeThread = NULL;
2909
2910    if (_builtInAecEnabled)
2911    {
2912        assert(_dmo != NULL);
2913        // This is necessary. Otherwise the DMO can generate garbage render
2914        // audio even after rendering has stopped.
2915        HRESULT hr = _dmo->FreeStreamingResources();
2916        if (FAILED(hr))
2917        {
2918            _TraceCOMError(hr);
2919            err = -1;
2920        }
2921    }
2922
2923    // Reset the recording delay value.
2924    _sndCardRecDelay = 0;
2925
2926    _UnLock();
2927
2928    return err;
2929}
2930
2931// ----------------------------------------------------------------------------
2932//  RecordingIsInitialized
2933// ----------------------------------------------------------------------------
2934
2935bool AudioDeviceWindowsCore::RecordingIsInitialized() const
2936{
2937    return (_recIsInitialized);
2938}
2939
2940// ----------------------------------------------------------------------------
2941//  Recording
2942// ----------------------------------------------------------------------------
2943
2944bool AudioDeviceWindowsCore::Recording() const
2945{
2946    return (_recording);
2947}
2948
2949// ----------------------------------------------------------------------------
2950//  PlayoutIsInitialized
2951// ----------------------------------------------------------------------------
2952
2953bool AudioDeviceWindowsCore::PlayoutIsInitialized() const
2954{
2955
2956    return (_playIsInitialized);
2957}
2958
2959// ----------------------------------------------------------------------------
2960//  StartPlayout
2961// ----------------------------------------------------------------------------
2962
2963int32_t AudioDeviceWindowsCore::StartPlayout()
2964{
2965
2966    if (!_playIsInitialized)
2967    {
2968        return -1;
2969    }
2970
2971    if (_hPlayThread != NULL)
2972    {
2973        return 0;
2974    }
2975
2976    if (_playing)
2977    {
2978        return 0;
2979    }
2980
2981    {
2982        CriticalSectionScoped critScoped(&_critSect);
2983
2984        // Create thread which will drive the rendering.
2985        assert(_hPlayThread == NULL);
2986        _hPlayThread = CreateThread(
2987                         NULL,
2988                         0,
2989                         WSAPIRenderThread,
2990                         this,
2991                         0,
2992                         NULL);
2993        if (_hPlayThread == NULL)
2994        {
2995            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
2996                "failed to create the playout thread");
2997            return -1;
2998        }
2999
3000        // Set thread priority to highest possible.
3001        SetThreadPriority(_hPlayThread, THREAD_PRIORITY_TIME_CRITICAL);
3002    }  // critScoped
3003
3004    DWORD ret = WaitForSingleObject(_hRenderStartedEvent, 1000);
3005    if (ret != WAIT_OBJECT_0)
3006    {
3007        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3008            "rendering did not start up properly");
3009        return -1;
3010    }
3011
3012    _playing = true;
3013    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3014        "rendering audio stream has now started...");
3015
3016    return 0;
3017}
3018
3019// ----------------------------------------------------------------------------
3020//  StopPlayout
3021// ----------------------------------------------------------------------------
3022
3023int32_t AudioDeviceWindowsCore::StopPlayout()
3024{
3025
3026    if (!_playIsInitialized)
3027    {
3028        return 0;
3029    }
3030
3031    {
3032        CriticalSectionScoped critScoped(&_critSect) ;
3033
3034        if (_hPlayThread == NULL)
3035        {
3036            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3037                "no rendering stream is active => close down WASAPI only");
3038            SAFE_RELEASE(_ptrClientOut);
3039            SAFE_RELEASE(_ptrRenderClient);
3040            _playIsInitialized = false;
3041            _playing = false;
3042            return 0;
3043        }
3044
3045        // stop the driving thread...
3046        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3047            "closing down the webrtc_core_audio_render_thread...");
3048        SetEvent(_hShutdownRenderEvent);
3049    }  // critScoped
3050
3051    DWORD ret = WaitForSingleObject(_hPlayThread, 2000);
3052    if (ret != WAIT_OBJECT_0)
3053    {
3054        // the thread did not stop as it should
3055        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3056            "failed to close down webrtc_core_audio_render_thread");
3057        CloseHandle(_hPlayThread);
3058        _hPlayThread = NULL;
3059        _playIsInitialized = false;
3060        _playing = false;
3061        return -1;
3062    }
3063
3064    {
3065        CriticalSectionScoped critScoped(&_critSect);
3066        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3067            "webrtc_core_audio_render_thread is now closed");
3068
3069        // to reset this event manually at each time we finish with it,
3070        // in case that the render thread has exited before StopPlayout(),
3071        // this event might be caught by the new render thread within same VoE instance.
3072        ResetEvent(_hShutdownRenderEvent);
3073
3074        SAFE_RELEASE(_ptrClientOut);
3075        SAFE_RELEASE(_ptrRenderClient);
3076
3077        _playIsInitialized = false;
3078        _playing = false;
3079
3080        CloseHandle(_hPlayThread);
3081        _hPlayThread = NULL;
3082
3083        if (_builtInAecEnabled && _recording)
3084        {
3085            // The DMO won't provide us captured output data unless we
3086            // give it render data to process.
3087            //
3088            // We still permit the playout to shutdown, and trace a warning.
3089            // Otherwise, VoE can get into a state which will never permit
3090            // playout to stop properly.
3091            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3092                "Recording should be stopped before playout when using the "
3093                "built-in AEC");
3094        }
3095
3096        // Reset the playout delay value.
3097        _sndCardPlayDelay = 0;
3098    }  // critScoped
3099
3100    return 0;
3101}
3102
3103// ----------------------------------------------------------------------------
3104//  PlayoutDelay
3105// ----------------------------------------------------------------------------
3106
3107int32_t AudioDeviceWindowsCore::PlayoutDelay(uint16_t& delayMS) const
3108{
3109    CriticalSectionScoped critScoped(&_critSect);
3110    delayMS = static_cast<uint16_t>(_sndCardPlayDelay);
3111    return 0;
3112}
3113
3114// ----------------------------------------------------------------------------
3115//  RecordingDelay
3116// ----------------------------------------------------------------------------
3117
3118int32_t AudioDeviceWindowsCore::RecordingDelay(uint16_t& delayMS) const
3119{
3120    CriticalSectionScoped critScoped(&_critSect);
3121    delayMS = static_cast<uint16_t>(_sndCardRecDelay);
3122    return 0;
3123}
3124
3125// ----------------------------------------------------------------------------
3126//  Playing
3127// ----------------------------------------------------------------------------
3128
3129bool AudioDeviceWindowsCore::Playing() const
3130{
3131    return (_playing);
3132}
3133// ----------------------------------------------------------------------------
3134//  SetPlayoutBuffer
3135// ----------------------------------------------------------------------------
3136
3137int32_t AudioDeviceWindowsCore::SetPlayoutBuffer(const AudioDeviceModule::BufferType type, uint16_t sizeMS)
3138{
3139
3140    CriticalSectionScoped lock(&_critSect);
3141
3142    _playBufType = type;
3143
3144    if (type == AudioDeviceModule::kFixedBufferSize)
3145    {
3146        _playBufDelayFixed = sizeMS;
3147    }
3148
3149    return 0;
3150}
3151
3152// ----------------------------------------------------------------------------
3153//  PlayoutBuffer
3154// ----------------------------------------------------------------------------
3155
3156int32_t AudioDeviceWindowsCore::PlayoutBuffer(AudioDeviceModule::BufferType& type, uint16_t& sizeMS) const
3157{
3158    CriticalSectionScoped lock(&_critSect);
3159    type = _playBufType;
3160
3161    if (type == AudioDeviceModule::kFixedBufferSize)
3162    {
3163        sizeMS = _playBufDelayFixed;
3164    }
3165    else
3166    {
3167        // Use same value as for PlayoutDelay
3168        sizeMS = static_cast<uint16_t>(_sndCardPlayDelay);
3169    }
3170
3171    return 0;
3172}
3173
3174// ----------------------------------------------------------------------------
3175//  CPULoad
3176// ----------------------------------------------------------------------------
3177
3178int32_t AudioDeviceWindowsCore::CPULoad(uint16_t& load) const
3179{
3180
3181    load = static_cast<uint16_t> (100*_avgCPULoad);
3182
3183    return 0;
3184}
3185
3186// ----------------------------------------------------------------------------
3187//  PlayoutWarning
3188// ----------------------------------------------------------------------------
3189
3190bool AudioDeviceWindowsCore::PlayoutWarning() const
3191{
3192    return ( _playWarning > 0);
3193}
3194
3195// ----------------------------------------------------------------------------
3196//  PlayoutError
3197// ----------------------------------------------------------------------------
3198
3199bool AudioDeviceWindowsCore::PlayoutError() const
3200{
3201    return ( _playError > 0);
3202}
3203
3204// ----------------------------------------------------------------------------
3205//  RecordingWarning
3206// ----------------------------------------------------------------------------
3207
3208bool AudioDeviceWindowsCore::RecordingWarning() const
3209{
3210    return ( _recWarning > 0);
3211}
3212
3213// ----------------------------------------------------------------------------
3214//  RecordingError
3215// ----------------------------------------------------------------------------
3216
3217bool AudioDeviceWindowsCore::RecordingError() const
3218{
3219    return ( _recError > 0);
3220}
3221
3222// ----------------------------------------------------------------------------
3223//  ClearPlayoutWarning
3224// ----------------------------------------------------------------------------
3225
3226void AudioDeviceWindowsCore::ClearPlayoutWarning()
3227{
3228    _playWarning = 0;
3229}
3230
3231// ----------------------------------------------------------------------------
3232//  ClearPlayoutError
3233// ----------------------------------------------------------------------------
3234
3235void AudioDeviceWindowsCore::ClearPlayoutError()
3236{
3237    _playError = 0;
3238}
3239
3240// ----------------------------------------------------------------------------
3241//  ClearRecordingWarning
3242// ----------------------------------------------------------------------------
3243
3244void AudioDeviceWindowsCore::ClearRecordingWarning()
3245{
3246    _recWarning = 0;
3247}
3248
3249// ----------------------------------------------------------------------------
3250//  ClearRecordingError
3251// ----------------------------------------------------------------------------
3252
3253void AudioDeviceWindowsCore::ClearRecordingError()
3254{
3255    _recError = 0;
3256}
3257
3258// ============================================================================
3259//                                 Private Methods
3260// ============================================================================
3261
3262// ----------------------------------------------------------------------------
3263//  [static] WSAPIRenderThread
3264// ----------------------------------------------------------------------------
3265
3266DWORD WINAPI AudioDeviceWindowsCore::WSAPIRenderThread(LPVOID context)
3267{
3268    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3269        DoRenderThread();
3270}
3271
3272// ----------------------------------------------------------------------------
3273//  [static] WSAPICaptureThread
3274// ----------------------------------------------------------------------------
3275
3276DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThread(LPVOID context)
3277{
3278    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3279        DoCaptureThread();
3280}
3281
3282DWORD WINAPI AudioDeviceWindowsCore::WSAPICaptureThreadPollDMO(LPVOID context)
3283{
3284    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3285        DoCaptureThreadPollDMO();
3286}
3287
3288DWORD WINAPI AudioDeviceWindowsCore::GetCaptureVolumeThread(LPVOID context)
3289{
3290    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3291        DoGetCaptureVolumeThread();
3292}
3293
3294DWORD WINAPI AudioDeviceWindowsCore::SetCaptureVolumeThread(LPVOID context)
3295{
3296    return reinterpret_cast<AudioDeviceWindowsCore*>(context)->
3297        DoSetCaptureVolumeThread();
3298}
3299
3300DWORD AudioDeviceWindowsCore::DoGetCaptureVolumeThread()
3301{
3302    HANDLE waitObject = _hShutdownCaptureEvent;
3303
3304    while (1)
3305    {
3306        if (AGC())
3307        {
3308            uint32_t currentMicLevel = 0;
3309            if (MicrophoneVolume(currentMicLevel) == 0)
3310            {
3311                // This doesn't set the system volume, just stores it.
3312                _Lock();
3313                if (_ptrAudioBuffer)
3314                {
3315                    _ptrAudioBuffer->SetCurrentMicLevel(currentMicLevel);
3316                }
3317                _UnLock();
3318            }
3319        }
3320
3321        DWORD waitResult = WaitForSingleObject(waitObject,
3322                                               GET_MIC_VOLUME_INTERVAL_MS);
3323        switch (waitResult)
3324        {
3325            case WAIT_OBJECT_0: // _hShutdownCaptureEvent
3326                return 0;
3327            case WAIT_TIMEOUT:  // timeout notification
3328                break;
3329            default:            // unexpected error
3330                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3331                    "  unknown wait termination on get volume thread");
3332                return 1;
3333        }
3334    }
3335}
3336
3337DWORD AudioDeviceWindowsCore::DoSetCaptureVolumeThread()
3338{
3339    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hSetCaptureVolumeEvent};
3340
3341    while (1)
3342    {
3343        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, INFINITE);
3344        switch (waitResult)
3345        {
3346            case WAIT_OBJECT_0:      // _hShutdownCaptureEvent
3347                return 0;
3348            case WAIT_OBJECT_0 + 1:  // _hSetCaptureVolumeEvent
3349                break;
3350            default:                 // unexpected error
3351                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3352                    "  unknown wait termination on set volume thread");
3353                    return 1;
3354        }
3355
3356        _Lock();
3357        uint32_t newMicLevel = _newMicLevel;
3358        _UnLock();
3359
3360        if (SetMicrophoneVolume(newMicLevel) == -1)
3361        {
3362            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3363                "  the required modification of the microphone volume failed");
3364        }
3365    }
3366}
3367
3368// ----------------------------------------------------------------------------
3369//  DoRenderThread
3370// ----------------------------------------------------------------------------
3371
3372DWORD AudioDeviceWindowsCore::DoRenderThread()
3373{
3374
3375    bool keepPlaying = true;
3376    HANDLE waitArray[2] = {_hShutdownRenderEvent, _hRenderSamplesReadyEvent};
3377    HRESULT hr = S_OK;
3378    HANDLE hMmTask = NULL;
3379
3380    LARGE_INTEGER t1;
3381    LARGE_INTEGER t2;
3382    int32_t time(0);
3383
3384    // Initialize COM as MTA in this thread.
3385    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3386    if (!comInit.succeeded()) {
3387      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3388          "failed to initialize COM in render thread");
3389      return 1;
3390    }
3391
3392    _SetThreadName(0, "webrtc_core_audio_render_thread");
3393
3394    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread priority.
3395    //
3396    if (_winSupportAvrt)
3397    {
3398        DWORD taskIndex(0);
3399        hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3400        if (hMmTask)
3401        {
3402            if (FALSE == _PAvSetMmThreadPriority(hMmTask, AVRT_PRIORITY_CRITICAL))
3403            {
3404                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to boost play-thread using MMCSS");
3405            }
3406            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "render thread is now registered with MMCSS (taskIndex=%d)", taskIndex);
3407        }
3408        else
3409        {
3410            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "failed to enable MMCSS on render thread (err=%d)", GetLastError());
3411            _TraceCOMError(GetLastError());
3412        }
3413    }
3414
3415    _Lock();
3416
3417    IAudioClock* clock = NULL;
3418
3419    // Get size of rendering buffer (length is expressed as the number of audio frames the buffer can hold).
3420    // This value is fixed during the rendering session.
3421    //
3422    UINT32 bufferLength = 0;
3423    hr = _ptrClientOut->GetBufferSize(&bufferLength);
3424    EXIT_ON_ERROR(hr);
3425    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] size of buffer       : %u", bufferLength);
3426
3427    // Get maximum latency for the current stream (will not change for the lifetime  of the IAudioClient object).
3428    //
3429    REFERENCE_TIME latency;
3430    _ptrClientOut->GetStreamLatency(&latency);
3431    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] max stream latency   : %u (%3.2f ms)",
3432        (DWORD)latency, (double)(latency/10000.0));
3433
3434    // Get the length of the periodic interval separating successive processing passes by
3435    // the audio engine on the data in the endpoint buffer.
3436    //
3437    // The period between processing passes by the audio engine is fixed for a particular
3438    // audio endpoint device and represents the smallest processing quantum for the audio engine.
3439    // This period plus the stream latency between the buffer and endpoint device represents
3440    // the minimum possible latency that an audio application can achieve.
3441    // Typical value: 100000 <=> 0.01 sec = 10ms.
3442    //
3443    REFERENCE_TIME devPeriod = 0;
3444    REFERENCE_TIME devPeriodMin = 0;
3445    _ptrClientOut->GetDevicePeriod(&devPeriod, &devPeriodMin);
3446    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] device period        : %u (%3.2f ms)",
3447        (DWORD)devPeriod, (double)(devPeriod/10000.0));
3448
3449    // Derive initial rendering delay.
3450    // Example: 10*(960/480) + 15 = 20 + 15 = 35ms
3451    //
3452    int playout_delay = 10 * (bufferLength / _playBlockSize) +
3453        (int)((latency + devPeriod) / 10000);
3454    _sndCardPlayDelay = playout_delay;
3455    _writtenSamples = 0;
3456    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3457                 "[REND] initial delay        : %u", playout_delay);
3458
3459    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_devicePlayBlockSize);
3460    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[REND] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3461
3462    // Before starting the stream, fill the rendering buffer with silence.
3463    //
3464    BYTE *pData = NULL;
3465    hr = _ptrRenderClient->GetBuffer(bufferLength, &pData);
3466    EXIT_ON_ERROR(hr);
3467
3468    hr = _ptrRenderClient->ReleaseBuffer(bufferLength, AUDCLNT_BUFFERFLAGS_SILENT);
3469    EXIT_ON_ERROR(hr);
3470
3471    _writtenSamples += bufferLength;
3472
3473    hr = _ptrClientOut->GetService(__uuidof(IAudioClock), (void**)&clock);
3474    if (FAILED(hr)) {
3475      WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3476                   "failed to get IAudioClock interface from the IAudioClient");
3477    }
3478
3479    // Start up the rendering audio stream.
3480    hr = _ptrClientOut->Start();
3481    EXIT_ON_ERROR(hr);
3482
3483    _UnLock();
3484
3485    // Set event which will ensure that the calling thread modifies the playing state to true.
3486    //
3487    SetEvent(_hRenderStartedEvent);
3488
3489    // >> ------------------ THREAD LOOP ------------------
3490
3491    while (keepPlaying)
3492    {
3493        // Wait for a render notification event or a shutdown event
3494        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3495        switch (waitResult)
3496        {
3497        case WAIT_OBJECT_0 + 0:     // _hShutdownRenderEvent
3498            keepPlaying = false;
3499            break;
3500        case WAIT_OBJECT_0 + 1:     // _hRenderSamplesReadyEvent
3501            break;
3502        case WAIT_TIMEOUT:          // timeout notification
3503            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "render event timed out after 0.5 seconds");
3504            goto Exit;
3505        default:                    // unexpected error
3506            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on render side");
3507            goto Exit;
3508        }
3509
3510        while (keepPlaying)
3511        {
3512            _Lock();
3513
3514            // Sanity check to ensure that essential states are not modified
3515            // during the unlocked period.
3516            if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3517            {
3518                _UnLock();
3519                WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3520                    "output state has been modified during unlocked period");
3521                goto Exit;
3522            }
3523
3524            // Get the number of frames of padding (queued up to play) in the endpoint buffer.
3525            UINT32 padding = 0;
3526            hr = _ptrClientOut->GetCurrentPadding(&padding);
3527            EXIT_ON_ERROR(hr);
3528
3529            // Derive the amount of available space in the output buffer
3530            uint32_t framesAvailable = bufferLength - padding;
3531            // WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "#avaliable audio frames = %u", framesAvailable);
3532
3533            // Do we have 10 ms available in the render buffer?
3534            if (framesAvailable < _playBlockSize)
3535            {
3536                // Not enough space in render buffer to store next render packet.
3537                _UnLock();
3538                break;
3539            }
3540
3541            // Write n*10ms buffers to the render buffer
3542            const uint32_t n10msBuffers = (framesAvailable / _playBlockSize);
3543            for (uint32_t n = 0; n < n10msBuffers; n++)
3544            {
3545                // Get pointer (i.e., grab the buffer) to next space in the shared render buffer.
3546                hr = _ptrRenderClient->GetBuffer(_playBlockSize, &pData);
3547                EXIT_ON_ERROR(hr);
3548
3549                QueryPerformanceCounter(&t1);    // measure time: START
3550
3551                if (_ptrAudioBuffer)
3552                {
3553                    // Request data to be played out (#bytes = _playBlockSize*_audioFrameSize)
3554                    _UnLock();
3555                    int32_t nSamples =
3556                    _ptrAudioBuffer->RequestPlayoutData(_playBlockSize);
3557                    _Lock();
3558
3559                    if (nSamples == -1)
3560                    {
3561                        _UnLock();
3562                        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3563                                     "failed to read data from render client");
3564                        goto Exit;
3565                    }
3566
3567                    // Sanity check to ensure that essential states are not modified during the unlocked period
3568                    if (_ptrRenderClient == NULL || _ptrClientOut == NULL)
3569                    {
3570                        _UnLock();
3571                        WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "output state has been modified during unlocked period");
3572                        goto Exit;
3573                    }
3574                    if (nSamples != static_cast<int32_t>(_playBlockSize))
3575                    {
3576                        WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "nSamples(%d) != _playBlockSize(%d)", nSamples, _playBlockSize);
3577                    }
3578
3579                    // Get the actual (stored) data
3580                    nSamples = _ptrAudioBuffer->GetPlayoutData((int8_t*)pData);
3581                }
3582
3583                QueryPerformanceCounter(&t2);    // measure time: STOP
3584                time = (int)(t2.QuadPart-t1.QuadPart);
3585                _playAcc += time;
3586
3587                DWORD dwFlags(0);
3588                hr = _ptrRenderClient->ReleaseBuffer(_playBlockSize, dwFlags);
3589                // See http://msdn.microsoft.com/en-us/library/dd316605(VS.85).aspx
3590                // for more details regarding AUDCLNT_E_DEVICE_INVALIDATED.
3591                EXIT_ON_ERROR(hr);
3592
3593                _writtenSamples += _playBlockSize;
3594            }
3595
3596            // Check the current delay on the playout side.
3597            if (clock) {
3598              UINT64 pos = 0;
3599              UINT64 freq = 1;
3600              clock->GetPosition(&pos, NULL);
3601              clock->GetFrequency(&freq);
3602              playout_delay = ROUND((double(_writtenSamples) /
3603                  _devicePlaySampleRate - double(pos) / freq) * 1000.0);
3604              _sndCardPlayDelay = playout_delay;
3605            }
3606
3607            _UnLock();
3608        }
3609    }
3610
3611    // ------------------ THREAD LOOP ------------------ <<
3612
3613    SleepMs(static_cast<DWORD>(endpointBufferSizeMS+0.5));
3614    hr = _ptrClientOut->Stop();
3615
3616Exit:
3617    SAFE_RELEASE(clock);
3618
3619    if (FAILED(hr))
3620    {
3621        _ptrClientOut->Stop();
3622        _UnLock();
3623        _TraceCOMError(hr);
3624    }
3625
3626    if (_winSupportAvrt)
3627    {
3628        if (NULL != hMmTask)
3629        {
3630            _PAvRevertMmThreadCharacteristics(hMmTask);
3631        }
3632    }
3633
3634    _Lock();
3635
3636    if (keepPlaying)
3637    {
3638        if (_ptrClientOut != NULL)
3639        {
3640            hr = _ptrClientOut->Stop();
3641            if (FAILED(hr))
3642            {
3643                _TraceCOMError(hr);
3644            }
3645            hr = _ptrClientOut->Reset();
3646            if (FAILED(hr))
3647            {
3648                _TraceCOMError(hr);
3649            }
3650        }
3651        // Trigger callback from module process thread
3652        _playError = 1;
3653        WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kPlayoutError message posted: rendering thread has ended pre-maturely");
3654    }
3655    else
3656    {
3657        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Rendering thread is now terminated properly");
3658    }
3659
3660    _UnLock();
3661
3662    return (DWORD)hr;
3663}
3664
3665DWORD AudioDeviceWindowsCore::InitCaptureThreadPriority()
3666{
3667    _hMmTask = NULL;
3668
3669    _SetThreadName(0, "webrtc_core_audio_capture_thread");
3670
3671    // Use Multimedia Class Scheduler Service (MMCSS) to boost the thread
3672    // priority.
3673    if (_winSupportAvrt)
3674    {
3675        DWORD taskIndex(0);
3676        _hMmTask = _PAvSetMmThreadCharacteristicsA("Pro Audio", &taskIndex);
3677        if (_hMmTask)
3678        {
3679            if (!_PAvSetMmThreadPriority(_hMmTask, AVRT_PRIORITY_CRITICAL))
3680            {
3681                WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3682                    "failed to boost rec-thread using MMCSS");
3683            }
3684            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3685                "capture thread is now registered with MMCSS (taskIndex=%d)",
3686                taskIndex);
3687        }
3688        else
3689        {
3690            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3691                "failed to enable MMCSS on capture thread (err=%d)",
3692                GetLastError());
3693            _TraceCOMError(GetLastError());
3694        }
3695    }
3696
3697    return S_OK;
3698}
3699
3700void AudioDeviceWindowsCore::RevertCaptureThreadPriority()
3701{
3702    if (_winSupportAvrt)
3703    {
3704        if (NULL != _hMmTask)
3705        {
3706            _PAvRevertMmThreadCharacteristics(_hMmTask);
3707        }
3708    }
3709
3710    _hMmTask = NULL;
3711}
3712
3713DWORD AudioDeviceWindowsCore::DoCaptureThreadPollDMO()
3714{
3715    assert(_mediaBuffer != NULL);
3716    bool keepRecording = true;
3717
3718    // Initialize COM as MTA in this thread.
3719    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3720    if (!comInit.succeeded()) {
3721      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3722        "failed to initialize COM in polling DMO thread");
3723      return 1;
3724    }
3725
3726    HRESULT hr = InitCaptureThreadPriority();
3727    if (FAILED(hr))
3728    {
3729        return hr;
3730    }
3731
3732    // Set event which will ensure that the calling thread modifies the
3733    // recording state to true.
3734    SetEvent(_hCaptureStartedEvent);
3735
3736    // >> ---------------------------- THREAD LOOP ----------------------------
3737    while (keepRecording)
3738    {
3739        // Poll the DMO every 5 ms.
3740        // (The same interval used in the Wave implementation.)
3741        DWORD waitResult = WaitForSingleObject(_hShutdownCaptureEvent, 5);
3742        switch (waitResult)
3743        {
3744        case WAIT_OBJECT_0:         // _hShutdownCaptureEvent
3745            keepRecording = false;
3746            break;
3747        case WAIT_TIMEOUT:          // timeout notification
3748            break;
3749        default:                    // unexpected error
3750            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id,
3751                "Unknown wait termination on capture side");
3752            hr = -1; // To signal an error callback.
3753            keepRecording = false;
3754            break;
3755        }
3756
3757        while (keepRecording)
3758        {
3759            CriticalSectionScoped critScoped(&_critSect);
3760
3761            DWORD dwStatus = 0;
3762            {
3763                DMO_OUTPUT_DATA_BUFFER dmoBuffer = {0};
3764                dmoBuffer.pBuffer = _mediaBuffer;
3765                dmoBuffer.pBuffer->AddRef();
3766
3767                // Poll the DMO for AEC processed capture data. The DMO will
3768                // copy available data to |dmoBuffer|, and should only return
3769                // 10 ms frames. The value of |dwStatus| should be ignored.
3770                hr = _dmo->ProcessOutput(0, 1, &dmoBuffer, &dwStatus);
3771                SAFE_RELEASE(dmoBuffer.pBuffer);
3772                dwStatus = dmoBuffer.dwStatus;
3773            }
3774            if (FAILED(hr))
3775            {
3776                _TraceCOMError(hr);
3777                keepRecording = false;
3778                assert(false);
3779                break;
3780            }
3781
3782            ULONG bytesProduced = 0;
3783            BYTE* data;
3784            // Get a pointer to the data buffer. This should be valid until
3785            // the next call to ProcessOutput.
3786            hr = _mediaBuffer->GetBufferAndLength(&data, &bytesProduced);
3787            if (FAILED(hr))
3788            {
3789                _TraceCOMError(hr);
3790                keepRecording = false;
3791                assert(false);
3792                break;
3793            }
3794
3795            // TODO(andrew): handle AGC.
3796
3797            if (bytesProduced > 0)
3798            {
3799                const int kSamplesProduced = bytesProduced / _recAudioFrameSize;
3800                // TODO(andrew): verify that this is always satisfied. It might
3801                // be that ProcessOutput will try to return more than 10 ms if
3802                // we fail to call it frequently enough.
3803                assert(kSamplesProduced == static_cast<int>(_recBlockSize));
3804                assert(sizeof(BYTE) == sizeof(int8_t));
3805                _ptrAudioBuffer->SetRecordedBuffer(
3806                    reinterpret_cast<int8_t*>(data),
3807                    kSamplesProduced);
3808                _ptrAudioBuffer->SetVQEData(0, 0, 0);
3809
3810                _UnLock();  // Release lock while making the callback.
3811                _ptrAudioBuffer->DeliverRecordedData();
3812                _Lock();
3813            }
3814
3815            // Reset length to indicate buffer availability.
3816            hr = _mediaBuffer->SetLength(0);
3817            if (FAILED(hr))
3818            {
3819                _TraceCOMError(hr);
3820                keepRecording = false;
3821                assert(false);
3822                break;
3823            }
3824
3825            if (!(dwStatus & DMO_OUTPUT_DATA_BUFFERF_INCOMPLETE))
3826            {
3827                // The DMO cannot currently produce more data. This is the
3828                // normal case; otherwise it means the DMO had more than 10 ms
3829                // of data available and ProcessOutput should be called again.
3830                break;
3831            }
3832        }
3833    }
3834    // ---------------------------- THREAD LOOP ---------------------------- <<
3835
3836    RevertCaptureThreadPriority();
3837
3838    if (FAILED(hr))
3839    {
3840        // Trigger callback from module process thread
3841        _recError = 1;
3842        WEBRTC_TRACE(kTraceError, kTraceUtility, _id,
3843            "kRecordingError message posted: capturing thread has ended "
3844            "prematurely");
3845    }
3846    else
3847    {
3848        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
3849            "Capturing thread is now terminated properly");
3850    }
3851
3852    return hr;
3853}
3854
3855
3856// ----------------------------------------------------------------------------
3857//  DoCaptureThread
3858// ----------------------------------------------------------------------------
3859
3860DWORD AudioDeviceWindowsCore::DoCaptureThread()
3861{
3862
3863    bool keepRecording = true;
3864    HANDLE waitArray[2] = {_hShutdownCaptureEvent, _hCaptureSamplesReadyEvent};
3865    HRESULT hr = S_OK;
3866
3867    LARGE_INTEGER t1;
3868    LARGE_INTEGER t2;
3869    int32_t time(0);
3870
3871    BYTE* syncBuffer = NULL;
3872    UINT32 syncBufIndex = 0;
3873
3874    _readSamples = 0;
3875
3876    // Initialize COM as MTA in this thread.
3877    ScopedCOMInitializer comInit(ScopedCOMInitializer::kMTA);
3878    if (!comInit.succeeded()) {
3879      WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
3880        "failed to initialize COM in capture thread");
3881      return 1;
3882    }
3883
3884    hr = InitCaptureThreadPriority();
3885    if (FAILED(hr))
3886    {
3887        return hr;
3888    }
3889
3890    _Lock();
3891
3892    // Get size of capturing buffer (length is expressed as the number of audio frames the buffer can hold).
3893    // This value is fixed during the capturing session.
3894    //
3895    UINT32 bufferLength = 0;
3896    hr = _ptrClientIn->GetBufferSize(&bufferLength);
3897    EXIT_ON_ERROR(hr);
3898    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of buffer       : %u", bufferLength);
3899
3900    // Allocate memory for sync buffer.
3901    // It is used for compensation between native 44.1 and internal 44.0 and
3902    // for cases when the capture buffer is larger than 10ms.
3903    //
3904    const UINT32 syncBufferSize = 2*(bufferLength * _recAudioFrameSize);
3905    syncBuffer = new BYTE[syncBufferSize];
3906    if (syncBuffer == NULL)
3907    {
3908        return (DWORD)E_POINTER;
3909    }
3910    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] size of sync buffer  : %u [bytes]", syncBufferSize);
3911
3912    // Get maximum latency for the current stream (will not change for the lifetime of the IAudioClient object).
3913    //
3914    REFERENCE_TIME latency;
3915    _ptrClientIn->GetStreamLatency(&latency);
3916    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] max stream latency   : %u (%3.2f ms)",
3917        (DWORD)latency, (double)(latency / 10000.0));
3918
3919    // Get the length of the periodic interval separating successive processing passes by
3920    // the audio engine on the data in the endpoint buffer.
3921    //
3922    REFERENCE_TIME devPeriod = 0;
3923    REFERENCE_TIME devPeriodMin = 0;
3924    _ptrClientIn->GetDevicePeriod(&devPeriod, &devPeriodMin);
3925    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] device period        : %u (%3.2f ms)",
3926        (DWORD)devPeriod, (double)(devPeriod / 10000.0));
3927
3928    double extraDelayMS = (double)((latency + devPeriod) / 10000.0);
3929    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] extraDelayMS         : %3.2f", extraDelayMS);
3930
3931    double endpointBufferSizeMS = 10.0 * ((double)bufferLength / (double)_recBlockSize);
3932    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "[CAPT] endpointBufferSizeMS : %3.2f", endpointBufferSizeMS);
3933
3934    // Start up the capturing stream.
3935    //
3936    hr = _ptrClientIn->Start();
3937    EXIT_ON_ERROR(hr);
3938
3939    _UnLock();
3940
3941    // Set event which will ensure that the calling thread modifies the recording state to true.
3942    //
3943    SetEvent(_hCaptureStartedEvent);
3944
3945    // >> ---------------------------- THREAD LOOP ----------------------------
3946
3947    while (keepRecording)
3948    {
3949        // Wait for a capture notification event or a shutdown event
3950        DWORD waitResult = WaitForMultipleObjects(2, waitArray, FALSE, 500);
3951        switch (waitResult)
3952        {
3953        case WAIT_OBJECT_0 + 0:        // _hShutdownCaptureEvent
3954            keepRecording = false;
3955            break;
3956        case WAIT_OBJECT_0 + 1:        // _hCaptureSamplesReadyEvent
3957            break;
3958        case WAIT_TIMEOUT:            // timeout notification
3959            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "capture event timed out after 0.5 seconds");
3960            goto Exit;
3961        default:                    // unexpected error
3962            WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "unknown wait termination on capture side");
3963            goto Exit;
3964        }
3965
3966        while (keepRecording)
3967        {
3968            BYTE *pData = 0;
3969            UINT32 framesAvailable = 0;
3970            DWORD flags = 0;
3971            UINT64 recTime = 0;
3972            UINT64 recPos = 0;
3973
3974            _Lock();
3975
3976            // Sanity check to ensure that essential states are not modified
3977            // during the unlocked period.
3978            if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
3979            {
3980                _UnLock();
3981                WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id,
3982                    "input state has been modified during unlocked period");
3983                goto Exit;
3984            }
3985
3986            //  Find out how much capture data is available
3987            //
3988            hr = _ptrCaptureClient->GetBuffer(&pData,           // packet which is ready to be read by used
3989                                              &framesAvailable, // #frames in the captured packet (can be zero)
3990                                              &flags,           // support flags (check)
3991                                              &recPos,          // device position of first audio frame in data packet
3992                                              &recTime);        // value of performance counter at the time of recording the first audio frame
3993
3994            if (SUCCEEDED(hr))
3995            {
3996                if (AUDCLNT_S_BUFFER_EMPTY == hr)
3997                {
3998                    // Buffer was empty => start waiting for a new capture notification event
3999                    _UnLock();
4000                    break;
4001                }
4002
4003                if (flags & AUDCLNT_BUFFERFLAGS_SILENT)
4004                {
4005                    // Treat all of the data in the packet as silence and ignore the actual data values.
4006                    WEBRTC_TRACE(kTraceWarning, kTraceAudioDevice, _id, "AUDCLNT_BUFFERFLAGS_SILENT");
4007                    pData = NULL;
4008                }
4009
4010                assert(framesAvailable != 0);
4011
4012                if (pData)
4013                {
4014                    CopyMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], pData, framesAvailable*_recAudioFrameSize);
4015                }
4016                else
4017                {
4018                    ZeroMemory(&syncBuffer[syncBufIndex*_recAudioFrameSize], framesAvailable*_recAudioFrameSize);
4019                }
4020                assert(syncBufferSize >= (syncBufIndex*_recAudioFrameSize)+framesAvailable*_recAudioFrameSize);
4021
4022                // Release the capture buffer
4023                //
4024                hr = _ptrCaptureClient->ReleaseBuffer(framesAvailable);
4025                EXIT_ON_ERROR(hr);
4026
4027                _readSamples += framesAvailable;
4028                syncBufIndex += framesAvailable;
4029
4030                QueryPerformanceCounter(&t1);
4031
4032                // Get the current recording and playout delay.
4033                uint32_t sndCardRecDelay = (uint32_t)
4034                    (((((UINT64)t1.QuadPart * _perfCounterFactor) - recTime)
4035                        / 10000) + (10*syncBufIndex) / _recBlockSize - 10);
4036                uint32_t sndCardPlayDelay =
4037                    static_cast<uint32_t>(_sndCardPlayDelay);
4038
4039                _sndCardRecDelay = sndCardRecDelay;
4040
4041                while (syncBufIndex >= _recBlockSize)
4042                {
4043                    if (_ptrAudioBuffer)
4044                    {
4045                        _ptrAudioBuffer->SetRecordedBuffer((const int8_t*)syncBuffer, _recBlockSize);
4046                        _ptrAudioBuffer->SetVQEData(sndCardPlayDelay,
4047                                                    sndCardRecDelay,
4048                                                    0);
4049
4050                        _ptrAudioBuffer->SetTypingStatus(KeyPressed());
4051
4052                        QueryPerformanceCounter(&t1);    // measure time: START
4053
4054                        _UnLock();  // release lock while making the callback
4055                        _ptrAudioBuffer->DeliverRecordedData();
4056                        _Lock();    // restore the lock
4057
4058                        QueryPerformanceCounter(&t2);    // measure time: STOP
4059
4060                        // Measure "average CPU load".
4061                        // Basically what we do here is to measure how many percent of our 10ms period
4062                        // is used for encoding and decoding. This value shuld be used as a warning indicator
4063                        // only and not seen as an absolute value. Running at ~100% will lead to bad QoS.
4064                        time = (int)(t2.QuadPart - t1.QuadPart);
4065                        _avgCPULoad = (float)(_avgCPULoad*.99 + (time + _playAcc) / (double)(_perfCounterFreq.QuadPart));
4066                        _playAcc = 0;
4067
4068                        // Sanity check to ensure that essential states are not modified during the unlocked period
4069                        if (_ptrCaptureClient == NULL || _ptrClientIn == NULL)
4070                        {
4071                            _UnLock();
4072                            WEBRTC_TRACE(kTraceCritical, kTraceAudioDevice, _id, "input state has been modified during unlocked period");
4073                            goto Exit;
4074                        }
4075                    }
4076
4077                    // store remaining data which was not able to deliver as 10ms segment
4078                    MoveMemory(&syncBuffer[0], &syncBuffer[_recBlockSize*_recAudioFrameSize], (syncBufIndex-_recBlockSize)*_recAudioFrameSize);
4079                    syncBufIndex -= _recBlockSize;
4080                    sndCardRecDelay -= 10;
4081                }
4082
4083                if (_AGC)
4084                {
4085                    uint32_t newMicLevel = _ptrAudioBuffer->NewMicLevel();
4086                    if (newMicLevel != 0)
4087                    {
4088                        // The VQE will only deliver non-zero microphone levels when a change is needed.
4089                        // Set this new mic level (received from the observer as return value in the callback).
4090                        WEBRTC_TRACE(kTraceStream, kTraceAudioDevice, _id, "AGC change of volume: new=%u",  newMicLevel);
4091                        // We store this outside of the audio buffer to avoid
4092                        // having it overwritten by the getter thread.
4093                        _newMicLevel = newMicLevel;
4094                        SetEvent(_hSetCaptureVolumeEvent);
4095                    }
4096                }
4097            }
4098            else
4099            {
4100                // If GetBuffer returns AUDCLNT_E_BUFFER_ERROR, the thread consuming the audio samples
4101                // must wait for the next processing pass. The client might benefit from keeping a count
4102                // of the failed GetBuffer calls. If GetBuffer returns this error repeatedly, the client
4103                // can start a new processing loop after shutting down the current client by calling
4104                // IAudioClient::Stop, IAudioClient::Reset, and releasing the audio client.
4105                WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4106                    "IAudioCaptureClient::GetBuffer returned AUDCLNT_E_BUFFER_ERROR, hr = 0x%08X",  hr);
4107                goto Exit;
4108            }
4109
4110            _UnLock();
4111        }
4112    }
4113
4114    // ---------------------------- THREAD LOOP ---------------------------- <<
4115
4116    hr = _ptrClientIn->Stop();
4117
4118Exit:
4119    if (FAILED(hr))
4120    {
4121        _ptrClientIn->Stop();
4122        _UnLock();
4123        _TraceCOMError(hr);
4124    }
4125
4126    RevertCaptureThreadPriority();
4127
4128    _Lock();
4129
4130    if (keepRecording)
4131    {
4132        if (_ptrClientIn != NULL)
4133        {
4134            hr = _ptrClientIn->Stop();
4135            if (FAILED(hr))
4136            {
4137                _TraceCOMError(hr);
4138            }
4139            hr = _ptrClientIn->Reset();
4140            if (FAILED(hr))
4141            {
4142                _TraceCOMError(hr);
4143            }
4144        }
4145
4146        // Trigger callback from module process thread
4147        _recError = 1;
4148        WEBRTC_TRACE(kTraceError, kTraceUtility, _id, "kRecordingError message posted: capturing thread has ended pre-maturely");
4149    }
4150    else
4151    {
4152        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "_Capturing thread is now terminated properly");
4153    }
4154
4155    SAFE_RELEASE(_ptrClientIn);
4156    SAFE_RELEASE(_ptrCaptureClient);
4157
4158    _UnLock();
4159
4160    if (syncBuffer)
4161    {
4162        delete [] syncBuffer;
4163    }
4164
4165    return (DWORD)hr;
4166}
4167
4168int32_t AudioDeviceWindowsCore::EnableBuiltInAEC(bool enable)
4169{
4170
4171    if (_recIsInitialized)
4172    {
4173        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4174            "Attempt to set Windows AEC with recording already initialized");
4175        return -1;
4176    }
4177
4178    if (_dmo == NULL)
4179    {
4180        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4181            "Built-in AEC DMO was not initialized properly at create time");
4182        return -1;
4183    }
4184
4185    _builtInAecEnabled = enable;
4186    return 0;
4187}
4188
4189bool AudioDeviceWindowsCore::BuiltInAECIsEnabled() const
4190{
4191    return _builtInAecEnabled;
4192}
4193
4194int AudioDeviceWindowsCore::SetDMOProperties()
4195{
4196    HRESULT hr = S_OK;
4197    assert(_dmo != NULL);
4198
4199    scoped_refptr<IPropertyStore> ps;
4200    {
4201        IPropertyStore* ptrPS = NULL;
4202        hr = _dmo->QueryInterface(IID_IPropertyStore,
4203                                  reinterpret_cast<void**>(&ptrPS));
4204        if (FAILED(hr) || ptrPS == NULL)
4205        {
4206            _TraceCOMError(hr);
4207            return -1;
4208        }
4209        ps = ptrPS;
4210        SAFE_RELEASE(ptrPS);
4211    }
4212
4213    // Set the AEC system mode.
4214    // SINGLE_CHANNEL_AEC - AEC processing only.
4215    if (SetVtI4Property(ps,
4216                        MFPKEY_WMAAECMA_SYSTEM_MODE,
4217                        SINGLE_CHANNEL_AEC))
4218    {
4219        return -1;
4220    }
4221
4222    // Set the AEC source mode.
4223    // VARIANT_TRUE - Source mode (we poll the AEC for captured data).
4224    if (SetBoolProperty(ps,
4225                        MFPKEY_WMAAECMA_DMO_SOURCE_MODE,
4226                        VARIANT_TRUE) == -1)
4227    {
4228        return -1;
4229    }
4230
4231    // Enable the feature mode.
4232    // This lets us override all the default processing settings below.
4233    if (SetBoolProperty(ps,
4234                        MFPKEY_WMAAECMA_FEATURE_MODE,
4235                        VARIANT_TRUE) == -1)
4236    {
4237        return -1;
4238    }
4239
4240    // Disable analog AGC (default enabled).
4241    if (SetBoolProperty(ps,
4242                        MFPKEY_WMAAECMA_MIC_GAIN_BOUNDER,
4243                        VARIANT_FALSE) == -1)
4244    {
4245        return -1;
4246    }
4247
4248    // Disable noise suppression (default enabled).
4249    // 0 - Disabled, 1 - Enabled
4250    if (SetVtI4Property(ps,
4251                        MFPKEY_WMAAECMA_FEATR_NS,
4252                        0) == -1)
4253    {
4254        return -1;
4255    }
4256
4257    // Relevant parameters to leave at default settings:
4258    // MFPKEY_WMAAECMA_FEATR_AGC - Digital AGC (disabled).
4259    // MFPKEY_WMAAECMA_FEATR_CENTER_CLIP - AEC center clipping (enabled).
4260    // MFPKEY_WMAAECMA_FEATR_ECHO_LENGTH - Filter length (256 ms).
4261    //   TODO(andrew): investigate decresing the length to 128 ms.
4262    // MFPKEY_WMAAECMA_FEATR_FRAME_SIZE - Frame size (0).
4263    //   0 is automatic; defaults to 160 samples (or 10 ms frames at the
4264    //   selected 16 kHz) as long as mic array processing is disabled.
4265    // MFPKEY_WMAAECMA_FEATR_NOISE_FILL - Comfort noise (enabled).
4266    // MFPKEY_WMAAECMA_FEATR_VAD - VAD (disabled).
4267
4268    // Set the devices selected by VoE. If using a default device, we need to
4269    // search for the device index.
4270    int inDevIndex = _inputDeviceIndex;
4271    int outDevIndex = _outputDeviceIndex;
4272    if (!_usingInputDeviceIndex)
4273    {
4274        ERole role = eCommunications;
4275        if (_inputDevice == AudioDeviceModule::kDefaultDevice)
4276        {
4277            role = eConsole;
4278        }
4279
4280        if (_GetDefaultDeviceIndex(eCapture, role, &inDevIndex) == -1)
4281        {
4282            return -1;
4283        }
4284    }
4285
4286    if (!_usingOutputDeviceIndex)
4287    {
4288        ERole role = eCommunications;
4289        if (_outputDevice == AudioDeviceModule::kDefaultDevice)
4290        {
4291            role = eConsole;
4292        }
4293
4294        if (_GetDefaultDeviceIndex(eRender, role, &outDevIndex) == -1)
4295        {
4296            return -1;
4297        }
4298    }
4299
4300    DWORD devIndex = static_cast<uint32_t>(outDevIndex << 16) +
4301                     static_cast<uint32_t>(0x0000ffff & inDevIndex);
4302    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
4303        "Capture device index: %d, render device index: %d",
4304        inDevIndex, outDevIndex);
4305    if (SetVtI4Property(ps,
4306                        MFPKEY_WMAAECMA_DEVICE_INDEXES,
4307                        devIndex) == -1)
4308    {
4309        return -1;
4310    }
4311
4312    return 0;
4313}
4314
4315int AudioDeviceWindowsCore::SetBoolProperty(IPropertyStore* ptrPS,
4316                                            REFPROPERTYKEY key,
4317                                            VARIANT_BOOL value)
4318{
4319    PROPVARIANT pv;
4320    PropVariantInit(&pv);
4321    pv.vt = VT_BOOL;
4322    pv.boolVal = value;
4323    HRESULT hr = ptrPS->SetValue(key, pv);
4324    PropVariantClear(&pv);
4325    if (FAILED(hr))
4326    {
4327        _TraceCOMError(hr);
4328        return -1;
4329    }
4330    return 0;
4331}
4332
4333int AudioDeviceWindowsCore::SetVtI4Property(IPropertyStore* ptrPS,
4334                                            REFPROPERTYKEY key,
4335                                            LONG value)
4336{
4337    PROPVARIANT pv;
4338    PropVariantInit(&pv);
4339    pv.vt = VT_I4;
4340    pv.lVal = value;
4341    HRESULT hr = ptrPS->SetValue(key, pv);
4342    PropVariantClear(&pv);
4343    if (FAILED(hr))
4344    {
4345        _TraceCOMError(hr);
4346        return -1;
4347    }
4348    return 0;
4349}
4350
4351// ----------------------------------------------------------------------------
4352//  _RefreshDeviceList
4353//
4354//  Creates a new list of endpoint rendering or capture devices after
4355//  deleting any previously created (and possibly out-of-date) list of
4356//  such devices.
4357// ----------------------------------------------------------------------------
4358
4359int32_t AudioDeviceWindowsCore::_RefreshDeviceList(EDataFlow dir)
4360{
4361    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4362
4363    HRESULT hr = S_OK;
4364    IMMDeviceCollection *pCollection = NULL;
4365
4366    assert(dir == eRender || dir == eCapture);
4367    assert(_ptrEnumerator != NULL);
4368
4369    // Create a fresh list of devices using the specified direction
4370    hr = _ptrEnumerator->EnumAudioEndpoints(
4371                           dir,
4372                           DEVICE_STATE_ACTIVE,
4373                           &pCollection);
4374    if (FAILED(hr))
4375    {
4376        _TraceCOMError(hr);
4377        SAFE_RELEASE(pCollection);
4378        return -1;
4379    }
4380
4381    if (dir == eRender)
4382    {
4383        SAFE_RELEASE(_ptrRenderCollection);
4384        _ptrRenderCollection = pCollection;
4385    }
4386    else
4387    {
4388        SAFE_RELEASE(_ptrCaptureCollection);
4389        _ptrCaptureCollection = pCollection;
4390    }
4391
4392    return 0;
4393}
4394
4395// ----------------------------------------------------------------------------
4396//  _DeviceListCount
4397//
4398//  Gets a count of the endpoint rendering or capture devices in the
4399//  current list of such devices.
4400// ----------------------------------------------------------------------------
4401
4402int16_t AudioDeviceWindowsCore::_DeviceListCount(EDataFlow dir)
4403{
4404    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4405
4406    HRESULT hr = S_OK;
4407    UINT count = 0;
4408
4409    assert(eRender == dir || eCapture == dir);
4410
4411    if (eRender == dir && NULL != _ptrRenderCollection)
4412    {
4413        hr = _ptrRenderCollection->GetCount(&count);
4414    }
4415    else if (NULL != _ptrCaptureCollection)
4416    {
4417        hr = _ptrCaptureCollection->GetCount(&count);
4418    }
4419
4420    if (FAILED(hr))
4421    {
4422        _TraceCOMError(hr);
4423        return -1;
4424    }
4425
4426    return static_cast<int16_t> (count);
4427}
4428
4429// ----------------------------------------------------------------------------
4430//  _GetListDeviceName
4431//
4432//  Gets the friendly name of an endpoint rendering or capture device
4433//  from the current list of such devices. The caller uses an index
4434//  into the list to identify the device.
4435//
4436//  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4437//  in _RefreshDeviceList().
4438// ----------------------------------------------------------------------------
4439
4440int32_t AudioDeviceWindowsCore::_GetListDeviceName(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4441{
4442    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4443
4444    HRESULT hr = S_OK;
4445    IMMDevice *pDevice = NULL;
4446
4447    assert(dir == eRender || dir == eCapture);
4448
4449    if (eRender == dir && NULL != _ptrRenderCollection)
4450    {
4451        hr = _ptrRenderCollection->Item(index, &pDevice);
4452    }
4453    else if (NULL != _ptrCaptureCollection)
4454    {
4455        hr = _ptrCaptureCollection->Item(index, &pDevice);
4456    }
4457
4458    if (FAILED(hr))
4459    {
4460        _TraceCOMError(hr);
4461        SAFE_RELEASE(pDevice);
4462        return -1;
4463    }
4464
4465    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4466    SAFE_RELEASE(pDevice);
4467    return res;
4468}
4469
4470// ----------------------------------------------------------------------------
4471//  _GetDefaultDeviceName
4472//
4473//  Gets the friendly name of an endpoint rendering or capture device
4474//  given a specified device role.
4475//
4476//  Uses: _ptrEnumerator
4477// ----------------------------------------------------------------------------
4478
4479int32_t AudioDeviceWindowsCore::_GetDefaultDeviceName(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4480{
4481    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4482
4483    HRESULT hr = S_OK;
4484    IMMDevice *pDevice = NULL;
4485
4486    assert(dir == eRender || dir == eCapture);
4487    assert(role == eConsole || role == eCommunications);
4488    assert(_ptrEnumerator != NULL);
4489
4490    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4491                           dir,
4492                           role,
4493                           &pDevice);
4494
4495    if (FAILED(hr))
4496    {
4497        _TraceCOMError(hr);
4498        SAFE_RELEASE(pDevice);
4499        return -1;
4500    }
4501
4502    int32_t res = _GetDeviceName(pDevice, szBuffer, bufferLen);
4503    SAFE_RELEASE(pDevice);
4504    return res;
4505}
4506
4507// ----------------------------------------------------------------------------
4508//  _GetListDeviceID
4509//
4510//  Gets the unique ID string of an endpoint rendering or capture device
4511//  from the current list of such devices. The caller uses an index
4512//  into the list to identify the device.
4513//
4514//  Uses: _ptrRenderCollection or _ptrCaptureCollection which is updated
4515//  in _RefreshDeviceList().
4516// ----------------------------------------------------------------------------
4517
4518int32_t AudioDeviceWindowsCore::_GetListDeviceID(EDataFlow dir, int index, LPWSTR szBuffer, int bufferLen)
4519{
4520    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4521
4522    HRESULT hr = S_OK;
4523    IMMDevice *pDevice = NULL;
4524
4525    assert(dir == eRender || dir == eCapture);
4526
4527    if (eRender == dir && NULL != _ptrRenderCollection)
4528    {
4529        hr = _ptrRenderCollection->Item(index, &pDevice);
4530    }
4531    else if (NULL != _ptrCaptureCollection)
4532    {
4533        hr = _ptrCaptureCollection->Item(index, &pDevice);
4534    }
4535
4536    if (FAILED(hr))
4537    {
4538        _TraceCOMError(hr);
4539        SAFE_RELEASE(pDevice);
4540        return -1;
4541    }
4542
4543    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4544    SAFE_RELEASE(pDevice);
4545    return res;
4546}
4547
4548// ----------------------------------------------------------------------------
4549//  _GetDefaultDeviceID
4550//
4551//  Gets the uniqe device ID of an endpoint rendering or capture device
4552//  given a specified device role.
4553//
4554//  Uses: _ptrEnumerator
4555// ----------------------------------------------------------------------------
4556
4557int32_t AudioDeviceWindowsCore::_GetDefaultDeviceID(EDataFlow dir, ERole role, LPWSTR szBuffer, int bufferLen)
4558{
4559    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4560
4561    HRESULT hr = S_OK;
4562    IMMDevice *pDevice = NULL;
4563
4564    assert(dir == eRender || dir == eCapture);
4565    assert(role == eConsole || role == eCommunications);
4566    assert(_ptrEnumerator != NULL);
4567
4568    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4569                           dir,
4570                           role,
4571                           &pDevice);
4572
4573    if (FAILED(hr))
4574    {
4575        _TraceCOMError(hr);
4576        SAFE_RELEASE(pDevice);
4577        return -1;
4578    }
4579
4580    int32_t res = _GetDeviceID(pDevice, szBuffer, bufferLen);
4581    SAFE_RELEASE(pDevice);
4582    return res;
4583}
4584
4585int32_t AudioDeviceWindowsCore::_GetDefaultDeviceIndex(EDataFlow dir,
4586                                                       ERole role,
4587                                                       int* index)
4588{
4589    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4590
4591    HRESULT hr = S_OK;
4592    WCHAR szDefaultDeviceID[MAX_PATH] = {0};
4593    WCHAR szDeviceID[MAX_PATH] = {0};
4594
4595    const size_t kDeviceIDLength = sizeof(szDeviceID)/sizeof(szDeviceID[0]);
4596    assert(kDeviceIDLength ==
4597        sizeof(szDefaultDeviceID) / sizeof(szDefaultDeviceID[0]));
4598
4599    if (_GetDefaultDeviceID(dir,
4600                            role,
4601                            szDefaultDeviceID,
4602                            kDeviceIDLength) == -1)
4603    {
4604        return -1;
4605    }
4606
4607    IMMDeviceCollection* collection = _ptrCaptureCollection;
4608    if (dir == eRender)
4609    {
4610        collection = _ptrRenderCollection;
4611    }
4612
4613    if (!collection)
4614    {
4615        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4616            "Device collection not valid");
4617        return -1;
4618    }
4619
4620    UINT count = 0;
4621    hr = collection->GetCount(&count);
4622    if (FAILED(hr))
4623    {
4624        _TraceCOMError(hr);
4625        return -1;
4626    }
4627
4628    *index = -1;
4629    for (UINT i = 0; i < count; i++)
4630    {
4631        memset(szDeviceID, 0, sizeof(szDeviceID));
4632        scoped_refptr<IMMDevice> device;
4633        {
4634            IMMDevice* ptrDevice = NULL;
4635            hr = collection->Item(i, &ptrDevice);
4636            if (FAILED(hr) || ptrDevice == NULL)
4637            {
4638                _TraceCOMError(hr);
4639                return -1;
4640            }
4641            device = ptrDevice;
4642            SAFE_RELEASE(ptrDevice);
4643        }
4644
4645        if (_GetDeviceID(device, szDeviceID, kDeviceIDLength) == -1)
4646        {
4647           return -1;
4648        }
4649
4650        if (wcsncmp(szDefaultDeviceID, szDeviceID, kDeviceIDLength) == 0)
4651        {
4652            // Found a match.
4653            *index = i;
4654            break;
4655        }
4656
4657    }
4658
4659    if (*index == -1)
4660    {
4661        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4662            "Unable to find collection index for default device");
4663        return -1;
4664    }
4665
4666    return 0;
4667}
4668
4669// ----------------------------------------------------------------------------
4670//  _GetDeviceName
4671// ----------------------------------------------------------------------------
4672
4673int32_t AudioDeviceWindowsCore::_GetDeviceName(IMMDevice* pDevice,
4674                                               LPWSTR pszBuffer,
4675                                               int bufferLen)
4676{
4677    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4678
4679    static const WCHAR szDefault[] = L"<Device not available>";
4680
4681    HRESULT hr = E_FAIL;
4682    IPropertyStore *pProps = NULL;
4683    PROPVARIANT varName;
4684
4685    assert(pszBuffer != NULL);
4686    assert(bufferLen > 0);
4687
4688    if (pDevice != NULL)
4689    {
4690        hr = pDevice->OpenPropertyStore(STGM_READ, &pProps);
4691        if (FAILED(hr))
4692        {
4693            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4694                "IMMDevice::OpenPropertyStore failed, hr = 0x%08X", hr);
4695        }
4696    }
4697
4698    // Initialize container for property value.
4699    PropVariantInit(&varName);
4700
4701    if (SUCCEEDED(hr))
4702    {
4703        // Get the endpoint device's friendly-name property.
4704        hr = pProps->GetValue(PKEY_Device_FriendlyName, &varName);
4705        if (FAILED(hr))
4706        {
4707            WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4708                "IPropertyStore::GetValue failed, hr = 0x%08X", hr);
4709        }
4710    }
4711
4712    if ((SUCCEEDED(hr)) && (VT_EMPTY == varName.vt))
4713    {
4714        hr = E_FAIL;
4715        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4716            "IPropertyStore::GetValue returned no value, hr = 0x%08X", hr);
4717    }
4718
4719    if ((SUCCEEDED(hr)) && (VT_LPWSTR != varName.vt))
4720    {
4721        // The returned value is not a wide null terminated string.
4722        hr = E_UNEXPECTED;
4723        WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
4724            "IPropertyStore::GetValue returned unexpected type, hr = 0x%08X", hr);
4725    }
4726
4727    if (SUCCEEDED(hr) && (varName.pwszVal != NULL))
4728    {
4729        // Copy the valid device name to the provided ouput buffer.
4730        wcsncpy_s(pszBuffer, bufferLen, varName.pwszVal, _TRUNCATE);
4731    }
4732    else
4733    {
4734        // Failed to find the device name.
4735        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4736    }
4737
4738    PropVariantClear(&varName);
4739    SAFE_RELEASE(pProps);
4740
4741    return 0;
4742}
4743
4744// ----------------------------------------------------------------------------
4745//  _GetDeviceID
4746// ----------------------------------------------------------------------------
4747
4748int32_t AudioDeviceWindowsCore::_GetDeviceID(IMMDevice* pDevice, LPWSTR pszBuffer, int bufferLen)
4749{
4750    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4751
4752    static const WCHAR szDefault[] = L"<Device not available>";
4753
4754    HRESULT hr = E_FAIL;
4755    LPWSTR pwszID = NULL;
4756
4757    assert(pszBuffer != NULL);
4758    assert(bufferLen > 0);
4759
4760    if (pDevice != NULL)
4761    {
4762        hr = pDevice->GetId(&pwszID);
4763    }
4764
4765    if (hr == S_OK)
4766    {
4767        // Found the device ID.
4768        wcsncpy_s(pszBuffer, bufferLen, pwszID, _TRUNCATE);
4769    }
4770    else
4771    {
4772        // Failed to find the device ID.
4773        wcsncpy_s(pszBuffer, bufferLen, szDefault, _TRUNCATE);
4774    }
4775
4776    CoTaskMemFree(pwszID);
4777    return 0;
4778}
4779
4780// ----------------------------------------------------------------------------
4781//  _GetDefaultDevice
4782// ----------------------------------------------------------------------------
4783
4784int32_t AudioDeviceWindowsCore::_GetDefaultDevice(EDataFlow dir, ERole role, IMMDevice** ppDevice)
4785{
4786    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4787
4788    HRESULT hr(S_OK);
4789
4790    assert(_ptrEnumerator != NULL);
4791
4792    hr = _ptrEnumerator->GetDefaultAudioEndpoint(
4793                                   dir,
4794                                   role,
4795                                   ppDevice);
4796    if (FAILED(hr))
4797    {
4798        _TraceCOMError(hr);
4799        return -1;
4800    }
4801
4802    return 0;
4803}
4804
4805// ----------------------------------------------------------------------------
4806//  _GetListDevice
4807// ----------------------------------------------------------------------------
4808
4809int32_t AudioDeviceWindowsCore::_GetListDevice(EDataFlow dir, int index, IMMDevice** ppDevice)
4810{
4811    HRESULT hr(S_OK);
4812
4813    assert(_ptrEnumerator != NULL);
4814
4815    IMMDeviceCollection *pCollection = NULL;
4816
4817    hr = _ptrEnumerator->EnumAudioEndpoints(
4818                               dir,
4819                               DEVICE_STATE_ACTIVE,        // only active endpoints are OK
4820                               &pCollection);
4821    if (FAILED(hr))
4822    {
4823        _TraceCOMError(hr);
4824        SAFE_RELEASE(pCollection);
4825        return -1;
4826    }
4827
4828    hr = pCollection->Item(
4829                        index,
4830                        ppDevice);
4831    if (FAILED(hr))
4832    {
4833        _TraceCOMError(hr);
4834        SAFE_RELEASE(pCollection);
4835        return -1;
4836    }
4837
4838    return 0;
4839}
4840
4841// ----------------------------------------------------------------------------
4842//  _EnumerateEndpointDevicesAll
4843// ----------------------------------------------------------------------------
4844
4845int32_t AudioDeviceWindowsCore::_EnumerateEndpointDevicesAll(EDataFlow dataFlow) const
4846{
4847    WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "%s", __FUNCTION__);
4848
4849    assert(_ptrEnumerator != NULL);
4850
4851    HRESULT hr = S_OK;
4852    IMMDeviceCollection *pCollection = NULL;
4853    IMMDevice *pEndpoint = NULL;
4854    IPropertyStore *pProps = NULL;
4855    IAudioEndpointVolume* pEndpointVolume = NULL;
4856    LPWSTR pwszID = NULL;
4857
4858    // Generate a collection of audio endpoint devices in the system.
4859    // Get states for *all* endpoint devices.
4860    // Output: IMMDeviceCollection interface.
4861    hr = _ptrEnumerator->EnumAudioEndpoints(
4862                                 dataFlow,            // data-flow direction (input parameter)
4863                                 DEVICE_STATE_ACTIVE | DEVICE_STATE_DISABLED | DEVICE_STATE_UNPLUGGED,
4864                                 &pCollection);        // release interface when done
4865
4866    EXIT_ON_ERROR(hr);
4867
4868    // use the IMMDeviceCollection interface...
4869
4870    UINT count = 0;
4871
4872    // Retrieve a count of the devices in the device collection.
4873    hr = pCollection->GetCount(&count);
4874    EXIT_ON_ERROR(hr);
4875    if (dataFlow == eRender)
4876        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#rendering endpoint devices (counting all): %u", count);
4877    else if (dataFlow == eCapture)
4878        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#capturing endpoint devices (counting all): %u", count);
4879
4880    if (count == 0)
4881    {
4882        return 0;
4883    }
4884
4885    // Each loop prints the name of an endpoint device.
4886    for (ULONG i = 0; i < count; i++)
4887    {
4888        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "Endpoint %d:", i);
4889
4890        // Get pointer to endpoint number i.
4891        // Output: IMMDevice interface.
4892        hr = pCollection->Item(
4893                            i,
4894                            &pEndpoint);
4895        CONTINUE_ON_ERROR(hr);
4896
4897        // use the IMMDevice interface of the specified endpoint device...
4898
4899        // Get the endpoint ID string (uniquely identifies the device among all audio endpoint devices)
4900        hr = pEndpoint->GetId(&pwszID);
4901        CONTINUE_ON_ERROR(hr);
4902        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "ID string    : %S", pwszID);
4903
4904        // Retrieve an interface to the device's property store.
4905        // Output: IPropertyStore interface.
4906        hr = pEndpoint->OpenPropertyStore(
4907                          STGM_READ,
4908                          &pProps);
4909        CONTINUE_ON_ERROR(hr);
4910
4911        // use the IPropertyStore interface...
4912
4913        PROPVARIANT varName;
4914        // Initialize container for property value.
4915        PropVariantInit(&varName);
4916
4917        // Get the endpoint's friendly-name property.
4918        // Example: "Speakers (Realtek High Definition Audio)"
4919        hr = pProps->GetValue(
4920                       PKEY_Device_FriendlyName,
4921                       &varName);
4922        CONTINUE_ON_ERROR(hr);
4923        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "friendly name: \"%S\"", varName.pwszVal);
4924
4925        // Get the endpoint's current device state
4926        DWORD dwState;
4927        hr = pEndpoint->GetState(&dwState);
4928        CONTINUE_ON_ERROR(hr);
4929        if (dwState & DEVICE_STATE_ACTIVE)
4930            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : *ACTIVE*", dwState);
4931        if (dwState & DEVICE_STATE_DISABLED)
4932            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : DISABLED", dwState);
4933        if (dwState & DEVICE_STATE_NOTPRESENT)
4934            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : NOTPRESENT", dwState);
4935        if (dwState & DEVICE_STATE_UNPLUGGED)
4936            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "state (0x%x)  : UNPLUGGED", dwState);
4937
4938        // Check the hardware volume capabilities.
4939        DWORD dwHwSupportMask = 0;
4940        hr = pEndpoint->Activate(__uuidof(IAudioEndpointVolume), CLSCTX_ALL,
4941                               NULL, (void**)&pEndpointVolume);
4942        CONTINUE_ON_ERROR(hr);
4943        hr = pEndpointVolume->QueryHardwareSupport(&dwHwSupportMask);
4944        CONTINUE_ON_ERROR(hr);
4945        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4946            // The audio endpoint device supports a hardware volume control
4947            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_VOLUME", dwHwSupportMask);
4948        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_MUTE)
4949            // The audio endpoint device supports a hardware mute control
4950            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_MUTE", dwHwSupportMask);
4951        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_METER)
4952            // The audio endpoint device supports a hardware peak meter
4953            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "hwmask (0x%x) : HARDWARE_SUPPORT_METER", dwHwSupportMask);
4954
4955        // Check the channel count (#channels in the audio stream that enters or leaves the audio endpoint device)
4956        UINT nChannelCount(0);
4957        hr = pEndpointVolume->GetChannelCount(
4958                                &nChannelCount);
4959        CONTINUE_ON_ERROR(hr);
4960        WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#channels    : %u", nChannelCount);
4961
4962        if (dwHwSupportMask & ENDPOINT_HARDWARE_SUPPORT_VOLUME)
4963        {
4964            // Get the volume range.
4965            float fLevelMinDB(0.0);
4966            float fLevelMaxDB(0.0);
4967            float fVolumeIncrementDB(0.0);
4968            hr = pEndpointVolume->GetVolumeRange(
4969                                    &fLevelMinDB,
4970                                    &fLevelMaxDB,
4971                                    &fVolumeIncrementDB);
4972            CONTINUE_ON_ERROR(hr);
4973            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume range : %4.2f (min), %4.2f (max), %4.2f (inc) [dB]",
4974                fLevelMinDB, fLevelMaxDB, fVolumeIncrementDB);
4975
4976            // The volume range from vmin = fLevelMinDB to vmax = fLevelMaxDB is divided
4977            // into n uniform intervals of size vinc = fVolumeIncrementDB, where
4978            // n = (vmax ?vmin) / vinc.
4979            // The values vmin, vmax, and vinc are measured in decibels. The client can set
4980            // the volume level to one of n + 1 discrete values in the range from vmin to vmax.
4981            int n = (int)((fLevelMaxDB-fLevelMinDB)/fVolumeIncrementDB);
4982            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "#intervals   : %d", n);
4983
4984            // Get information about the current step in the volume range.
4985            // This method represents the volume level of the audio stream that enters or leaves
4986            // the audio endpoint device as an index or "step" in a range of discrete volume levels.
4987            // Output value nStepCount is the number of steps in the range. Output value nStep
4988            // is the step index of the current volume level. If the number of steps is n = nStepCount,
4989            // then step index nStep can assume values from 0 (minimum volume) to n ?1 (maximum volume).
4990            UINT nStep(0);
4991            UINT nStepCount(0);
4992            hr = pEndpointVolume->GetVolumeStepInfo(
4993                                    &nStep,
4994                                    &nStepCount);
4995            CONTINUE_ON_ERROR(hr);
4996            WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id, "volume steps : %d (nStep), %d (nStepCount)", nStep, nStepCount);
4997        }
4998Next:
4999        if (FAILED(hr)) {
5000          WEBRTC_TRACE(kTraceInfo, kTraceAudioDevice, _id,
5001                       "Error when logging device information");
5002        }
5003        CoTaskMemFree(pwszID);
5004        pwszID = NULL;
5005        PropVariantClear(&varName);
5006        SAFE_RELEASE(pProps);
5007        SAFE_RELEASE(pEndpoint);
5008        SAFE_RELEASE(pEndpointVolume);
5009    }
5010    SAFE_RELEASE(pCollection);
5011    return 0;
5012
5013Exit:
5014    _TraceCOMError(hr);
5015    CoTaskMemFree(pwszID);
5016    pwszID = NULL;
5017    SAFE_RELEASE(pCollection);
5018    SAFE_RELEASE(pEndpoint);
5019    SAFE_RELEASE(pEndpointVolume);
5020    SAFE_RELEASE(pProps);
5021    return -1;
5022}
5023
5024// ----------------------------------------------------------------------------
5025//  _TraceCOMError
5026// ----------------------------------------------------------------------------
5027
5028void AudioDeviceWindowsCore::_TraceCOMError(HRESULT hr) const
5029{
5030    TCHAR buf[MAXERRORLENGTH];
5031    TCHAR errorText[MAXERRORLENGTH];
5032
5033    const DWORD dwFlags = FORMAT_MESSAGE_FROM_SYSTEM |
5034                          FORMAT_MESSAGE_IGNORE_INSERTS;
5035    const DWORD dwLangID = MAKELANGID(LANG_ENGLISH, SUBLANG_ENGLISH_US);
5036
5037    // Gets the system's human readable message string for this HRESULT.
5038    // All error message in English by default.
5039    DWORD messageLength = ::FormatMessageW(dwFlags,
5040                                           0,
5041                                           hr,
5042                                           dwLangID,
5043                                           errorText,
5044                                           MAXERRORLENGTH,
5045                                           NULL);
5046
5047    assert(messageLength <= MAXERRORLENGTH);
5048
5049    // Trims tailing white space (FormatMessage() leaves a trailing cr-lf.).
5050    for (; messageLength && ::isspace(errorText[messageLength - 1]);
5051         --messageLength)
5052    {
5053        errorText[messageLength - 1] = '\0';
5054    }
5055
5056    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id,
5057        "Core Audio method failed (hr=0x%x)", hr);
5058    StringCchPrintf(buf, MAXERRORLENGTH, TEXT("Error details: "));
5059    StringCchCat(buf, MAXERRORLENGTH, errorText);
5060    WEBRTC_TRACE(kTraceError, kTraceAudioDevice, _id, "%s", WideToUTF8(buf));
5061}
5062
5063// ----------------------------------------------------------------------------
5064//  _SetThreadName
5065// ----------------------------------------------------------------------------
5066
5067void AudioDeviceWindowsCore::_SetThreadName(DWORD dwThreadID, LPCSTR szThreadName)
5068{
5069    // See http://msdn.microsoft.com/en-us/library/xcb2z8hs(VS.71).aspx for details on the code
5070    // in this function. Name of article is "Setting a Thread Name (Unmanaged)".
5071
5072    THREADNAME_INFO info;
5073    info.dwType = 0x1000;
5074    info.szName = szThreadName;
5075    info.dwThreadID = dwThreadID;
5076    info.dwFlags = 0;
5077
5078    __try
5079    {
5080        RaiseException( 0x406D1388, 0, sizeof(info)/sizeof(DWORD), (ULONG_PTR *)&info );
5081    }
5082    __except (EXCEPTION_CONTINUE_EXECUTION)
5083    {
5084    }
5085}
5086
5087// ----------------------------------------------------------------------------
5088//  WideToUTF8
5089// ----------------------------------------------------------------------------
5090
5091char* AudioDeviceWindowsCore::WideToUTF8(const TCHAR* src) const {
5092#ifdef UNICODE
5093    const size_t kStrLen = sizeof(_str);
5094    memset(_str, 0, kStrLen);
5095    // Get required size (in bytes) to be able to complete the conversion.
5096    int required_size = WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, 0, 0, 0);
5097    if (required_size <= kStrLen)
5098    {
5099        // Process the entire input string, including the terminating null char.
5100        if (WideCharToMultiByte(CP_UTF8, 0, src, -1, _str, kStrLen, 0, 0) == 0)
5101            memset(_str, 0, kStrLen);
5102    }
5103    return _str;
5104#else
5105    return const_cast<char*>(src);
5106#endif
5107}
5108
5109
5110bool AudioDeviceWindowsCore::KeyPressed() const{
5111
5112  int key_down = 0;
5113  for (int key = VK_SPACE; key < VK_NUMLOCK; key++) {
5114    short res = GetAsyncKeyState(key);
5115    key_down |= res & 0x1; // Get the LSB
5116  }
5117  return (key_down > 0);
5118}
5119}  // namespace webrtc
5120
5121#endif  // WEBRTC_WINDOWS_CORE_AUDIO_BUILD
5122