core_audio_util_win.cc revision 5d1f7b1de12d16ceb2c938c56701a3e8bfa558f7
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "media/audio/win/core_audio_util_win.h"
6
7#include <audioclient.h>
8#include <devicetopology.h>
9#include <functiondiscoverykeys_devpkey.h>
10
11#include "base/command_line.h"
12#include "base/logging.h"
13#include "base/strings/stringprintf.h"
14#include "base/strings/utf_string_conversions.h"
15#include "base/win/scoped_co_mem.h"
16#include "base/win/scoped_handle.h"
17#include "base/win/scoped_propvariant.h"
18#include "base/win/windows_version.h"
19#include "media/base/media_switches.h"
20
21using base::win::ScopedCoMem;
22using base::win::ScopedHandle;
23
24namespace media {
25
26enum { KSAUDIO_SPEAKER_UNSUPPORTED = 0 };
27
28// Converts Microsoft's channel configuration to ChannelLayout.
29// This mapping is not perfect but the best we can do given the current
30// ChannelLayout enumerator and the Windows-specific speaker configurations
31// defined in ksmedia.h. Don't assume that the channel ordering in
32// ChannelLayout is exactly the same as the Windows specific configuration.
33// As an example: KSAUDIO_SPEAKER_7POINT1_SURROUND is mapped to
34// CHANNEL_LAYOUT_7_1 but the positions of Back L, Back R and Side L, Side R
35// speakers are different in these two definitions.
36static ChannelLayout ChannelConfigToChannelLayout(ChannelConfig config) {
37  switch (config) {
38    case KSAUDIO_SPEAKER_DIRECTOUT:
39      DVLOG(2) << "KSAUDIO_SPEAKER_DIRECTOUT=>CHANNEL_LAYOUT_NONE";
40      return CHANNEL_LAYOUT_NONE;
41    case KSAUDIO_SPEAKER_MONO:
42      DVLOG(2) << "KSAUDIO_SPEAKER_MONO=>CHANNEL_LAYOUT_MONO";
43      return CHANNEL_LAYOUT_MONO;
44    case KSAUDIO_SPEAKER_STEREO:
45      DVLOG(2) << "KSAUDIO_SPEAKER_STEREO=>CHANNEL_LAYOUT_STEREO";
46      return CHANNEL_LAYOUT_STEREO;
47    case KSAUDIO_SPEAKER_QUAD:
48      DVLOG(2) << "KSAUDIO_SPEAKER_QUAD=>CHANNEL_LAYOUT_QUAD";
49      return CHANNEL_LAYOUT_QUAD;
50    case KSAUDIO_SPEAKER_SURROUND:
51      DVLOG(2) << "KSAUDIO_SPEAKER_SURROUND=>CHANNEL_LAYOUT_4_0";
52      return CHANNEL_LAYOUT_4_0;
53    case KSAUDIO_SPEAKER_5POINT1:
54      DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1=>CHANNEL_LAYOUT_5_1_BACK";
55      return CHANNEL_LAYOUT_5_1_BACK;
56    case KSAUDIO_SPEAKER_5POINT1_SURROUND:
57      DVLOG(2) << "KSAUDIO_SPEAKER_5POINT1_SURROUND=>CHANNEL_LAYOUT_5_1";
58      return CHANNEL_LAYOUT_5_1;
59    case KSAUDIO_SPEAKER_7POINT1:
60      DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1=>CHANNEL_LAYOUT_7_1_WIDE";
61      return CHANNEL_LAYOUT_7_1_WIDE;
62    case KSAUDIO_SPEAKER_7POINT1_SURROUND:
63      DVLOG(2) << "KSAUDIO_SPEAKER_7POINT1_SURROUND=>CHANNEL_LAYOUT_7_1";
64      return CHANNEL_LAYOUT_7_1;
65    default:
66      DVLOG(2) << "Unsupported channel configuration: " << config;
67      return CHANNEL_LAYOUT_UNSUPPORTED;
68  }
69}
70
71// TODO(henrika): add mapping for all types in the ChannelLayout enumerator.
72static ChannelConfig ChannelLayoutToChannelConfig(ChannelLayout layout) {
73  switch (layout) {
74    case CHANNEL_LAYOUT_NONE:
75      DVLOG(2) << "CHANNEL_LAYOUT_NONE=>KSAUDIO_SPEAKER_UNSUPPORTED";
76      return KSAUDIO_SPEAKER_UNSUPPORTED;
77    case CHANNEL_LAYOUT_UNSUPPORTED:
78      DVLOG(2) << "CHANNEL_LAYOUT_UNSUPPORTED=>KSAUDIO_SPEAKER_UNSUPPORTED";
79      return KSAUDIO_SPEAKER_UNSUPPORTED;
80    case CHANNEL_LAYOUT_MONO:
81      DVLOG(2) << "CHANNEL_LAYOUT_MONO=>KSAUDIO_SPEAKER_MONO";
82      return KSAUDIO_SPEAKER_MONO;
83    case CHANNEL_LAYOUT_STEREO:
84      DVLOG(2) << "CHANNEL_LAYOUT_STEREO=>KSAUDIO_SPEAKER_STEREO";
85      return KSAUDIO_SPEAKER_STEREO;
86    case CHANNEL_LAYOUT_QUAD:
87      DVLOG(2) << "CHANNEL_LAYOUT_QUAD=>KSAUDIO_SPEAKER_QUAD";
88      return KSAUDIO_SPEAKER_QUAD;
89    case CHANNEL_LAYOUT_4_0:
90      DVLOG(2) << "CHANNEL_LAYOUT_4_0=>KSAUDIO_SPEAKER_SURROUND";
91      return KSAUDIO_SPEAKER_SURROUND;
92    case CHANNEL_LAYOUT_5_1_BACK:
93      DVLOG(2) << "CHANNEL_LAYOUT_5_1_BACK=>KSAUDIO_SPEAKER_5POINT1";
94      return KSAUDIO_SPEAKER_5POINT1;
95    case CHANNEL_LAYOUT_5_1:
96      DVLOG(2) << "CHANNEL_LAYOUT_5_1=>KSAUDIO_SPEAKER_5POINT1_SURROUND";
97      return KSAUDIO_SPEAKER_5POINT1_SURROUND;
98    case CHANNEL_LAYOUT_7_1_WIDE:
99      DVLOG(2) << "CHANNEL_LAYOUT_7_1_WIDE=>KSAUDIO_SPEAKER_7POINT1";
100      return KSAUDIO_SPEAKER_7POINT1;
101    case CHANNEL_LAYOUT_7_1:
102      DVLOG(2) << "CHANNEL_LAYOUT_7_1=>KSAUDIO_SPEAKER_7POINT1_SURROUND";
103      return KSAUDIO_SPEAKER_7POINT1_SURROUND;
104    default:
105      DVLOG(2) << "Unsupported channel layout: " << layout;
106      return KSAUDIO_SPEAKER_UNSUPPORTED;
107  }
108}
109
110static std::ostream& operator<<(std::ostream& os,
111                                const WAVEFORMATPCMEX& format) {
112  os << "wFormatTag: 0x" << std::hex << format.Format.wFormatTag
113     << ", nChannels: " << std::dec << format.Format.nChannels
114     << ", nSamplesPerSec: " << format.Format.nSamplesPerSec
115     << ", nAvgBytesPerSec: " << format.Format.nAvgBytesPerSec
116     << ", nBlockAlign: " << format.Format.nBlockAlign
117     << ", wBitsPerSample: " << format.Format.wBitsPerSample
118     << ", cbSize: " << format.Format.cbSize
119     << ", wValidBitsPerSample: " << format.Samples.wValidBitsPerSample
120     << ", dwChannelMask: 0x" << std::hex << format.dwChannelMask;
121  return os;
122}
123
124static bool LoadAudiosesDll() {
125  static const wchar_t* const kAudiosesDLL =
126      L"%WINDIR%\\system32\\audioses.dll";
127
128  wchar_t path[MAX_PATH] = {0};
129  ExpandEnvironmentStringsW(kAudiosesDLL, path, arraysize(path));
130  return (LoadLibraryExW(path, NULL, LOAD_WITH_ALTERED_SEARCH_PATH) != NULL);
131}
132
133static bool CanCreateDeviceEnumerator() {
134  ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
135  HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
136                                                NULL, CLSCTX_INPROC_SERVER);
137
138  // If we hit CO_E_NOTINITIALIZED, CoInitialize has not been called and it
139  // must be called at least once for each thread that uses the COM library.
140  CHECK_NE(hr, CO_E_NOTINITIALIZED);
141
142  return SUCCEEDED(hr);
143}
144
145static std::string GetDeviceID(IMMDevice* device) {
146  ScopedCoMem<WCHAR> device_id_com;
147  std::string device_id;
148  if (SUCCEEDED(device->GetId(&device_id_com)))
149    base::WideToUTF8(device_id_com, wcslen(device_id_com), &device_id);
150  return device_id;
151}
152
153bool CoreAudioUtil::IsSupported() {
154  // It is possible to force usage of WaveXxx APIs by using a command line flag.
155  const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
156  if (cmd_line->HasSwitch(switches::kForceWaveAudio)) {
157    DVLOG(1) << "Forcing usage of Windows WaveXxx APIs";
158    return false;
159  }
160
161  // Microsoft does not plan to make the Core Audio APIs available for use
162  // with earlier versions of Windows, including Microsoft Windows Server 2003,
163  // Windows XP, Windows Millennium Edition, Windows 2000, and Windows 98.
164  if (base::win::GetVersion() < base::win::VERSION_VISTA)
165    return false;
166
167  // The audio core APIs are implemented in the Mmdevapi.dll and Audioses.dll
168  // system components.
169  // Dependency Walker shows that it is enough to verify possibility to load
170  // the Audioses DLL since it depends on Mmdevapi.dll.
171  // See http://crbug.com/166397 why this extra step is required to guarantee
172  // Core Audio support.
173  static bool g_audioses_dll_available = LoadAudiosesDll();
174  if (!g_audioses_dll_available)
175    return false;
176
177  // Being able to load the Audioses.dll does not seem to be sufficient for
178  // all devices to guarantee Core Audio support. To be 100%, we also verify
179  // that it is possible to a create the IMMDeviceEnumerator interface. If this
180  // works as well we should be home free.
181  static bool g_can_create_device_enumerator = CanCreateDeviceEnumerator();
182  LOG_IF(ERROR, !g_can_create_device_enumerator)
183      << "Failed to create Core Audio device enumerator on thread with ID "
184      << GetCurrentThreadId();
185  return g_can_create_device_enumerator;
186}
187
188base::TimeDelta CoreAudioUtil::RefererenceTimeToTimeDelta(REFERENCE_TIME time) {
189  // Each unit of reference time is 100 nanoseconds <=> 0.1 microsecond.
190  return base::TimeDelta::FromMicroseconds(0.1 * time + 0.5);
191}
192
193AUDCLNT_SHAREMODE CoreAudioUtil::GetShareMode() {
194  const CommandLine* cmd_line = CommandLine::ForCurrentProcess();
195  if (cmd_line->HasSwitch(switches::kEnableExclusiveAudio))
196    return AUDCLNT_SHAREMODE_EXCLUSIVE;
197  return AUDCLNT_SHAREMODE_SHARED;
198}
199
200int CoreAudioUtil::NumberOfActiveDevices(EDataFlow data_flow) {
201  DCHECK(IsSupported());
202  // Create the IMMDeviceEnumerator interface.
203  ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
204      CreateDeviceEnumerator();
205  if (!device_enumerator)
206    return 0;
207
208  // Generate a collection of active (present and not disabled) audio endpoint
209  // devices for the specified data-flow direction.
210  // This method will succeed even if all devices are disabled.
211  ScopedComPtr<IMMDeviceCollection> collection;
212  HRESULT hr = device_enumerator->EnumAudioEndpoints(data_flow,
213                                                     DEVICE_STATE_ACTIVE,
214                                                     collection.Receive());
215  if (FAILED(hr)) {
216    LOG(ERROR) << "IMMDeviceCollection::EnumAudioEndpoints: " << std::hex << hr;
217    return 0;
218  }
219
220  // Retrieve the number of active audio devices for the specified direction
221  UINT number_of_active_devices = 0;
222  collection->GetCount(&number_of_active_devices);
223  DVLOG(2) << ((data_flow == eCapture) ? "[in ] " : "[out] ")
224           << "number of devices: " << number_of_active_devices;
225  return static_cast<int>(number_of_active_devices);
226}
227
228ScopedComPtr<IMMDeviceEnumerator> CoreAudioUtil::CreateDeviceEnumerator() {
229  DCHECK(IsSupported());
230  ScopedComPtr<IMMDeviceEnumerator> device_enumerator;
231  HRESULT hr = device_enumerator.CreateInstance(__uuidof(MMDeviceEnumerator),
232                                                NULL, CLSCTX_INPROC_SERVER);
233  CHECK(SUCCEEDED(hr));
234  return device_enumerator;
235}
236
237ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDefaultDevice(EDataFlow data_flow,
238                                                           ERole role) {
239  DCHECK(IsSupported());
240  ScopedComPtr<IMMDevice> endpoint_device;
241
242  // Create the IMMDeviceEnumerator interface.
243  ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
244      CreateDeviceEnumerator();
245  if (!device_enumerator)
246    return endpoint_device;
247
248  // Retrieve the default audio endpoint for the specified data-flow
249  // direction and role.
250  HRESULT hr = device_enumerator->GetDefaultAudioEndpoint(
251      data_flow, role, endpoint_device.Receive());
252
253  if (FAILED(hr)) {
254    DVLOG(1) << "IMMDeviceEnumerator::GetDefaultAudioEndpoint: "
255             << std::hex << hr;
256    return endpoint_device;
257  }
258
259  // Verify that the audio endpoint device is active, i.e., that the audio
260  // adapter that connects to the endpoint device is present and enabled.
261  DWORD state = DEVICE_STATE_DISABLED;
262  hr = endpoint_device->GetState(&state);
263  if (SUCCEEDED(hr)) {
264    if (!(state & DEVICE_STATE_ACTIVE)) {
265      DVLOG(1) << "Selected endpoint device is not active";
266      endpoint_device.Release();
267    }
268  }
269  return endpoint_device;
270}
271
272std::string CoreAudioUtil::GetDefaultOutputDeviceID() {
273  DCHECK(IsSupported());
274  ScopedComPtr<IMMDevice> device(CreateDefaultDevice(eRender, eConsole));
275  return device ? GetDeviceID(device) : std::string();
276}
277
278ScopedComPtr<IMMDevice> CoreAudioUtil::CreateDevice(
279    const std::string& device_id) {
280  DCHECK(IsSupported());
281  ScopedComPtr<IMMDevice> endpoint_device;
282
283  // Create the IMMDeviceEnumerator interface.
284  ScopedComPtr<IMMDeviceEnumerator> device_enumerator =
285      CreateDeviceEnumerator();
286  if (!device_enumerator)
287    return endpoint_device;
288
289  // Retrieve an audio device specified by an endpoint device-identification
290  // string.
291  HRESULT hr = device_enumerator->GetDevice(
292      base::UTF8ToUTF16(device_id).c_str(), endpoint_device.Receive());
293  DVLOG_IF(1, FAILED(hr)) << "IMMDeviceEnumerator::GetDevice: "
294                          << std::hex << hr;
295  return endpoint_device;
296}
297
298HRESULT CoreAudioUtil::GetDeviceName(IMMDevice* device, AudioDeviceName* name) {
299  DCHECK(IsSupported());
300
301  // Retrieve unique name of endpoint device.
302  // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}".
303  AudioDeviceName device_name;
304  device_name.unique_id = GetDeviceID(device);
305  if (device_name.unique_id.empty())
306    return E_FAIL;
307
308  // Retrieve user-friendly name of endpoint device.
309  // Example: "Microphone (Realtek High Definition Audio)".
310  ScopedComPtr<IPropertyStore> properties;
311  HRESULT hr = device->OpenPropertyStore(STGM_READ, properties.Receive());
312  if (FAILED(hr))
313    return hr;
314  base::win::ScopedPropVariant friendly_name;
315  hr = properties->GetValue(PKEY_Device_FriendlyName, friendly_name.Receive());
316  if (FAILED(hr))
317    return hr;
318  if (friendly_name.get().vt == VT_LPWSTR && friendly_name.get().pwszVal) {
319    base::WideToUTF8(friendly_name.get().pwszVal,
320                     wcslen(friendly_name.get().pwszVal),
321                     &device_name.device_name);
322  }
323
324  *name = device_name;
325  DVLOG(2) << "friendly name: " << device_name.device_name;
326  DVLOG(2) << "unique id    : " << device_name.unique_id;
327  return hr;
328}
329
330std::string CoreAudioUtil::GetAudioControllerID(IMMDevice* device,
331    IMMDeviceEnumerator* enumerator) {
332  DCHECK(IsSupported());
333
334  // Fetching the controller device id could be as simple as fetching the value
335  // of the "{B3F8FA53-0004-438E-9003-51A46E139BFC},2" property in the property
336  // store of the |device|, but that key isn't defined in any header and
337  // according to MS should not be relied upon.
338  // So, instead, we go deeper, look at the device topology and fetch the
339  // PKEY_Device_InstanceId of the associated physical audio device.
340  ScopedComPtr<IDeviceTopology> topology;
341  ScopedComPtr<IConnector> connector;
342  ScopedCoMem<WCHAR> filter_id;
343  if (FAILED(device->Activate(__uuidof(IDeviceTopology), CLSCTX_ALL, NULL,
344             topology.ReceiveVoid()) ||
345      // For our purposes checking the first connected device should be enough
346      // and if there are cases where there are more than one device connected
347      // we're not sure how to handle that anyway. So we pass 0.
348      FAILED(topology->GetConnector(0, connector.Receive())) ||
349      FAILED(connector->GetDeviceIdConnectedTo(&filter_id)))) {
350    DLOG(ERROR) << "Failed to get the device identifier of the audio device";
351    return std::string();
352  }
353
354  // Now look at the properties of the connected device node and fetch the
355  // instance id (PKEY_Device_InstanceId) of the device node that uniquely
356  // identifies the controller.
357  ScopedComPtr<IMMDevice> device_node;
358  ScopedComPtr<IPropertyStore> properties;
359  base::win::ScopedPropVariant instance_id;
360  if (FAILED(enumerator->GetDevice(filter_id, device_node.Receive())) ||
361      FAILED(device_node->OpenPropertyStore(STGM_READ, properties.Receive())) ||
362      FAILED(properties->GetValue(PKEY_Device_InstanceId,
363                                  instance_id.Receive())) ||
364      instance_id.get().vt != VT_LPWSTR) {
365    DLOG(ERROR) << "Failed to get instance id of the audio device node";
366    return std::string();
367  }
368
369  std::string controller_id;
370  base::WideToUTF8(instance_id.get().pwszVal,
371                   wcslen(instance_id.get().pwszVal),
372                   &controller_id);
373
374  return controller_id;
375}
376
377std::string CoreAudioUtil::GetMatchingOutputDeviceID(
378    const std::string& input_device_id) {
379  ScopedComPtr<IMMDevice> input_device(CreateDevice(input_device_id));
380  if (!input_device)
381    return std::string();
382
383  // See if we can get id of the associated controller.
384  ScopedComPtr<IMMDeviceEnumerator> enumerator(CreateDeviceEnumerator());
385  std::string controller_id(GetAudioControllerID(input_device, enumerator));
386  if (controller_id.empty())
387    return std::string();
388
389  // Now enumerate the available (and active) output devices and see if any of
390  // them is associated with the same controller.
391  ScopedComPtr<IMMDeviceCollection> collection;
392  enumerator->EnumAudioEndpoints(eRender, DEVICE_STATE_ACTIVE,
393      collection.Receive());
394  if (!collection)
395    return std::string();
396
397  UINT count = 0;
398  collection->GetCount(&count);
399  ScopedComPtr<IMMDevice> output_device;
400  for (UINT i = 0; i < count; ++i) {
401    collection->Item(i, output_device.Receive());
402    std::string output_controller_id(GetAudioControllerID(
403        output_device, enumerator));
404    if (output_controller_id == controller_id)
405      break;
406    output_device = NULL;
407  }
408
409  return output_device ? GetDeviceID(output_device) : std::string();
410}
411
412std::string CoreAudioUtil::GetFriendlyName(const std::string& device_id) {
413  DCHECK(IsSupported());
414  ScopedComPtr<IMMDevice> audio_device = CreateDevice(device_id);
415  if (!audio_device)
416    return std::string();
417
418  AudioDeviceName device_name;
419  HRESULT hr = GetDeviceName(audio_device, &device_name);
420  if (FAILED(hr))
421    return std::string();
422
423  return device_name.device_name;
424}
425
426bool CoreAudioUtil::DeviceIsDefault(EDataFlow flow,
427                                    ERole role,
428                                    const std::string& device_id) {
429  DCHECK(IsSupported());
430  ScopedComPtr<IMMDevice> device = CreateDefaultDevice(flow, role);
431  if (!device)
432    return false;
433
434  std::string str_default(GetDeviceID(device));
435  return device_id.compare(str_default) == 0;
436}
437
438EDataFlow CoreAudioUtil::GetDataFlow(IMMDevice* device) {
439  DCHECK(IsSupported());
440  ScopedComPtr<IMMEndpoint> endpoint;
441  HRESULT hr = device->QueryInterface(endpoint.Receive());
442  if (FAILED(hr)) {
443    DVLOG(1) << "IMMDevice::QueryInterface: " << std::hex << hr;
444    return eAll;
445  }
446
447  EDataFlow data_flow;
448  hr = endpoint->GetDataFlow(&data_flow);
449  if (FAILED(hr)) {
450    DVLOG(1) << "IMMEndpoint::GetDataFlow: " << std::hex << hr;
451    return eAll;
452  }
453  return data_flow;
454}
455
456ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
457    IMMDevice* audio_device) {
458  DCHECK(IsSupported());
459
460  // Creates and activates an IAudioClient COM object given the selected
461  // endpoint device.
462  ScopedComPtr<IAudioClient> audio_client;
463  HRESULT hr = audio_device->Activate(__uuidof(IAudioClient),
464                                      CLSCTX_INPROC_SERVER,
465                                      NULL,
466                                      audio_client.ReceiveVoid());
467  DVLOG_IF(1, FAILED(hr)) << "IMMDevice::Activate: " << std::hex << hr;
468  return audio_client;
469}
470
471ScopedComPtr<IAudioClient> CoreAudioUtil::CreateDefaultClient(
472    EDataFlow data_flow, ERole role) {
473  DCHECK(IsSupported());
474  ScopedComPtr<IMMDevice> default_device(CreateDefaultDevice(data_flow, role));
475  return (default_device ? CreateClient(default_device) :
476      ScopedComPtr<IAudioClient>());
477}
478
479ScopedComPtr<IAudioClient> CoreAudioUtil::CreateClient(
480    const std::string& device_id, EDataFlow data_flow, ERole role) {
481  if (device_id.empty())
482    return CreateDefaultClient(data_flow, role);
483
484  ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
485  if (!device)
486    return ScopedComPtr<IAudioClient>();
487
488 return CreateClient(device);
489}
490
491HRESULT CoreAudioUtil::GetSharedModeMixFormat(
492    IAudioClient* client, WAVEFORMATPCMEX* format) {
493  DCHECK(IsSupported());
494  ScopedCoMem<WAVEFORMATPCMEX> format_pcmex;
495  HRESULT hr = client->GetMixFormat(
496      reinterpret_cast<WAVEFORMATEX**>(&format_pcmex));
497  if (FAILED(hr))
498    return hr;
499
500  size_t bytes = sizeof(WAVEFORMATEX) + format_pcmex->Format.cbSize;
501  DCHECK_EQ(bytes, sizeof(WAVEFORMATPCMEX));
502
503  memcpy(format, format_pcmex, bytes);
504  DVLOG(2) << *format;
505
506  return hr;
507}
508
509bool CoreAudioUtil::IsFormatSupported(IAudioClient* client,
510                                      AUDCLNT_SHAREMODE share_mode,
511                                      const WAVEFORMATPCMEX* format) {
512  DCHECK(IsSupported());
513  ScopedCoMem<WAVEFORMATEXTENSIBLE> closest_match;
514  HRESULT hr = client->IsFormatSupported(
515      share_mode, reinterpret_cast<const WAVEFORMATEX*>(format),
516      reinterpret_cast<WAVEFORMATEX**>(&closest_match));
517
518  // This log can only be triggered for shared mode.
519  DLOG_IF(ERROR, hr == S_FALSE) << "Format is not supported "
520                                << "but a closest match exists.";
521  // This log can be triggered both for shared and exclusive modes.
522  DLOG_IF(ERROR, hr == AUDCLNT_E_UNSUPPORTED_FORMAT) << "Unsupported format.";
523  if (hr == S_FALSE) {
524    DVLOG(2) << *closest_match;
525  }
526
527  return (hr == S_OK);
528}
529
530bool CoreAudioUtil::IsChannelLayoutSupported(const std::string& device_id,
531                                             EDataFlow data_flow,
532                                             ERole role,
533                                             ChannelLayout channel_layout) {
534  DCHECK(IsSupported());
535
536  // First, get the preferred mixing format for shared mode streams.
537
538  ScopedComPtr<IAudioClient> client(CreateClient(device_id, data_flow, role));
539  if (!client)
540    return false;
541
542  WAVEFORMATPCMEX format;
543  HRESULT hr = GetSharedModeMixFormat(client, &format);
544  if (FAILED(hr))
545    return false;
546
547  // Next, check if it is possible to use an alternative format where the
548  // channel layout (and possibly number of channels) is modified.
549
550  // Convert generic channel layout into Windows-specific channel configuration.
551  ChannelConfig new_config = ChannelLayoutToChannelConfig(channel_layout);
552  if (new_config == KSAUDIO_SPEAKER_UNSUPPORTED) {
553    return false;
554  }
555  format.dwChannelMask = new_config;
556
557  // Modify the format if the new channel layout has changed the number of
558  // utilized channels.
559  const int channels = ChannelLayoutToChannelCount(channel_layout);
560  if (channels != format.Format.nChannels) {
561    format.Format.nChannels = channels;
562    format.Format.nBlockAlign = (format.Format.wBitsPerSample / 8) * channels;
563    format.Format.nAvgBytesPerSec = format.Format.nSamplesPerSec *
564                                    format.Format.nBlockAlign;
565  }
566  DVLOG(2) << format;
567
568  // Some devices can initialize a shared-mode stream with a format that is
569  // not identical to the mix format obtained from the GetMixFormat() method.
570  // However, chances of succeeding increases if we use the same number of
571  // channels and the same sample rate as the mix format. I.e, this call will
572  // return true only in those cases where the audio engine is able to support
573  // an even wider range of shared-mode formats where the installation package
574  // for the audio device includes a local effects (LFX) audio processing
575  // object (APO) that can handle format conversions.
576  return CoreAudioUtil::IsFormatSupported(client, AUDCLNT_SHAREMODE_SHARED,
577                                          &format);
578}
579
580HRESULT CoreAudioUtil::GetDevicePeriod(IAudioClient* client,
581                                       AUDCLNT_SHAREMODE share_mode,
582                                       REFERENCE_TIME* device_period) {
583  DCHECK(IsSupported());
584
585  // Get the period of the engine thread.
586  REFERENCE_TIME default_period = 0;
587  REFERENCE_TIME minimum_period = 0;
588  HRESULT hr = client->GetDevicePeriod(&default_period, &minimum_period);
589  if (FAILED(hr))
590    return hr;
591
592  *device_period = (share_mode == AUDCLNT_SHAREMODE_SHARED) ? default_period :
593      minimum_period;
594  DVLOG(2) << "device_period: "
595           << RefererenceTimeToTimeDelta(*device_period).InMillisecondsF()
596           << " [ms]";
597  return hr;
598}
599
600HRESULT CoreAudioUtil::GetPreferredAudioParameters(
601    IAudioClient* client, AudioParameters* params) {
602  DCHECK(IsSupported());
603  WAVEFORMATPCMEX mix_format;
604  HRESULT hr = GetSharedModeMixFormat(client, &mix_format);
605  if (FAILED(hr))
606    return hr;
607
608  REFERENCE_TIME default_period = 0;
609  hr = GetDevicePeriod(client, AUDCLNT_SHAREMODE_SHARED, &default_period);
610  if (FAILED(hr))
611    return hr;
612
613  // Get the integer mask which corresponds to the channel layout the
614  // audio engine uses for its internal processing/mixing of shared-mode
615  // streams. This mask indicates which channels are present in the multi-
616  // channel stream. The least significant bit corresponds with the Front Left
617  // speaker, the next least significant bit corresponds to the Front Right
618  // speaker, and so on, continuing in the order defined in KsMedia.h.
619  // See http://msdn.microsoft.com/en-us/library/windows/hardware/ff537083.aspx
620  // for more details.
621  ChannelConfig channel_config = mix_format.dwChannelMask;
622
623  // Convert Microsoft's channel configuration to genric ChannelLayout.
624  ChannelLayout channel_layout = ChannelConfigToChannelLayout(channel_config);
625
626  // Some devices don't appear to set a valid channel layout, so guess based on
627  // the number of channels.  See http://crbug.com/311906.
628  if (channel_layout == CHANNEL_LAYOUT_UNSUPPORTED) {
629    VLOG(1) << "Unsupported channel config: "
630            << std::hex << channel_config
631            << ".  Guessing layout by channel count: "
632            << std::dec << mix_format.Format.nChannels;
633    channel_layout = GuessChannelLayout(mix_format.Format.nChannels);
634  }
635
636  // Preferred sample rate.
637  int sample_rate = mix_format.Format.nSamplesPerSec;
638
639  // TODO(henrika): possibly use format.Format.wBitsPerSample here instead.
640  // We use a hard-coded value of 16 bits per sample today even if most audio
641  // engines does the actual mixing in 32 bits per sample.
642  int bits_per_sample = 16;
643
644  // We are using the native device period to derive the smallest possible
645  // buffer size in shared mode. Note that the actual endpoint buffer will be
646  // larger than this size but it will be possible to fill it up in two calls.
647  // TODO(henrika): ensure that this scheme works for capturing as well.
648  int frames_per_buffer = static_cast<int>(sample_rate *
649      RefererenceTimeToTimeDelta(default_period).InSecondsF() + 0.5);
650
651  DVLOG(1) << "channel_layout   : " << channel_layout;
652  DVLOG(1) << "sample_rate      : " << sample_rate;
653  DVLOG(1) << "bits_per_sample  : " << bits_per_sample;
654  DVLOG(1) << "frames_per_buffer: " << frames_per_buffer;
655
656  AudioParameters audio_params(AudioParameters::AUDIO_PCM_LOW_LATENCY,
657                               channel_layout,
658                               sample_rate,
659                               bits_per_sample,
660                               frames_per_buffer);
661
662  *params = audio_params;
663  return hr;
664}
665
666HRESULT CoreAudioUtil::GetPreferredAudioParameters(
667    EDataFlow data_flow, ERole role, AudioParameters* params) {
668  DCHECK(IsSupported());
669  ScopedComPtr<IAudioClient> client(CreateDefaultClient(data_flow, role));
670  if (!client) {
671    // Map NULL-pointer to new error code which can be different from the
672    // actual error code. The exact value is not important here.
673    return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
674  }
675  return GetPreferredAudioParameters(client, params);
676}
677
678HRESULT CoreAudioUtil::GetPreferredAudioParameters(
679    const std::string& device_id, AudioParameters* params) {
680  DCHECK(IsSupported());
681  ScopedComPtr<IMMDevice> device(CreateDevice(device_id));
682  if (!device) {
683    // Map NULL-pointer to new error code which can be different from the
684    // actual error code. The exact value is not important here.
685    return AUDCLNT_E_DEVICE_INVALIDATED;
686  }
687
688  ScopedComPtr<IAudioClient> client(CreateClient(device));
689  if (!client) {
690    // Map NULL-pointer to new error code which can be different from the
691    // actual error code. The exact value is not important here.
692    return AUDCLNT_E_ENDPOINT_CREATE_FAILED;
693  }
694  return GetPreferredAudioParameters(client, params);
695}
696
697ChannelConfig CoreAudioUtil::GetChannelConfig(const std::string& device_id,
698                                              EDataFlow data_flow) {
699  ScopedComPtr<IAudioClient> client(
700      CreateClient(device_id, data_flow, eConsole));
701
702  WAVEFORMATPCMEX format = {0};
703  if (!client || FAILED(GetSharedModeMixFormat(client, &format)))
704    return 0;
705
706  return static_cast<ChannelConfig>(format.dwChannelMask);
707}
708
709HRESULT CoreAudioUtil::SharedModeInitialize(IAudioClient* client,
710                                            const WAVEFORMATPCMEX* format,
711                                            HANDLE event_handle,
712                                            uint32* endpoint_buffer_size) {
713  DCHECK(IsSupported());
714
715  // Use default flags (i.e, dont set AUDCLNT_STREAMFLAGS_NOPERSIST) to
716  // ensure that the volume level and muting state for a rendering session
717  // are persistent across system restarts. The volume level and muting
718  // state for a capture session are never persistent.
719  DWORD stream_flags = 0;
720
721  // Enable event-driven streaming if a valid event handle is provided.
722  // After the stream starts, the audio engine will signal the event handle
723  // to notify the client each time a buffer becomes ready to process.
724  // Event-driven buffering is supported for both rendering and capturing.
725  // Both shared-mode and exclusive-mode streams can use event-driven buffering.
726  bool use_event = (event_handle != NULL &&
727                    event_handle != INVALID_HANDLE_VALUE);
728  if (use_event)
729    stream_flags |= AUDCLNT_STREAMFLAGS_EVENTCALLBACK;
730  DVLOG(2) << "stream_flags: 0x" << std::hex << stream_flags;
731
732  // Initialize the shared mode client for minimal delay.
733  HRESULT hr = client->Initialize(AUDCLNT_SHAREMODE_SHARED,
734                                  stream_flags,
735                                  0,
736                                  0,
737                                  reinterpret_cast<const WAVEFORMATEX*>(format),
738                                  NULL);
739  if (FAILED(hr)) {
740    DVLOG(1) << "IAudioClient::Initialize: " << std::hex << hr;
741    return hr;
742  }
743
744  if (use_event) {
745    hr = client->SetEventHandle(event_handle);
746    if (FAILED(hr)) {
747      DVLOG(1) << "IAudioClient::SetEventHandle: " << std::hex << hr;
748      return hr;
749    }
750  }
751
752  UINT32 buffer_size_in_frames = 0;
753  hr = client->GetBufferSize(&buffer_size_in_frames);
754  if (FAILED(hr)) {
755    DVLOG(1) << "IAudioClient::GetBufferSize: " << std::hex << hr;
756    return hr;
757  }
758
759  *endpoint_buffer_size = buffer_size_in_frames;
760  DVLOG(2) << "endpoint buffer size: " << buffer_size_in_frames;
761
762  // TODO(henrika): utilize when delay measurements are added.
763  REFERENCE_TIME  latency = 0;
764  hr = client->GetStreamLatency(&latency);
765  DVLOG(2) << "stream latency: "
766           << RefererenceTimeToTimeDelta(latency).InMillisecondsF() << " [ms]";
767  return hr;
768}
769
770ScopedComPtr<IAudioRenderClient> CoreAudioUtil::CreateRenderClient(
771    IAudioClient* client) {
772  DCHECK(IsSupported());
773
774  // Get access to the IAudioRenderClient interface. This interface
775  // enables us to write output data to a rendering endpoint buffer.
776  ScopedComPtr<IAudioRenderClient> audio_render_client;
777  HRESULT hr = client->GetService(__uuidof(IAudioRenderClient),
778                                  audio_render_client.ReceiveVoid());
779  if (FAILED(hr)) {
780    DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
781    return ScopedComPtr<IAudioRenderClient>();
782  }
783  return audio_render_client;
784}
785
786ScopedComPtr<IAudioCaptureClient> CoreAudioUtil::CreateCaptureClient(
787    IAudioClient* client) {
788  DCHECK(IsSupported());
789
790  // Get access to the IAudioCaptureClient interface. This interface
791  // enables us to read input data from a capturing endpoint buffer.
792  ScopedComPtr<IAudioCaptureClient> audio_capture_client;
793  HRESULT hr = client->GetService(__uuidof(IAudioCaptureClient),
794                                  audio_capture_client.ReceiveVoid());
795  if (FAILED(hr)) {
796    DVLOG(1) << "IAudioClient::GetService: " << std::hex << hr;
797    return ScopedComPtr<IAudioCaptureClient>();
798  }
799  return audio_capture_client;
800}
801
802bool CoreAudioUtil::FillRenderEndpointBufferWithSilence(
803    IAudioClient* client, IAudioRenderClient* render_client) {
804  DCHECK(IsSupported());
805
806  UINT32 endpoint_buffer_size = 0;
807  if (FAILED(client->GetBufferSize(&endpoint_buffer_size)))
808    return false;
809
810  UINT32 num_queued_frames = 0;
811  if (FAILED(client->GetCurrentPadding(&num_queued_frames)))
812    return false;
813
814  BYTE* data = NULL;
815  int num_frames_to_fill = endpoint_buffer_size - num_queued_frames;
816  if (FAILED(render_client->GetBuffer(num_frames_to_fill, &data)))
817    return false;
818
819  // Using the AUDCLNT_BUFFERFLAGS_SILENT flag eliminates the need to
820  // explicitly write silence data to the rendering buffer.
821  DVLOG(2) << "filling up " << num_frames_to_fill << " frames with silence";
822  return SUCCEEDED(render_client->ReleaseBuffer(num_frames_to_fill,
823                                                AUDCLNT_BUFFERFLAGS_SILENT));
824}
825
826}  // namespace media
827