core_audio_util_win.h revision 58537e28ecd584eab876aee8be7156509866d23a
1// Copyright (c) 2012 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5// Utility methods for the Core Audio API on Windows.
6// Always ensure that Core Audio is supported before using these methods.
7// Use media::CoreAudioIsSupported() for this purpose.
8// Also, all methods must be called on a valid COM thread. This can be done
9// by using the base::win::ScopedCOMInitializer helper class.
10
11#ifndef MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
12#define MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
13
14#include <audioclient.h>
15#include <mmdeviceapi.h>
16#include <string>
17
18#include "base/basictypes.h"
19#include "base/time/time.h"
20#include "base/win/scoped_comptr.h"
21#include "media/audio/audio_device_name.h"
22#include "media/audio/audio_parameters.h"
23#include "media/base/media_export.h"
24
25using base::win::ScopedComPtr;
26
27namespace media {
28
29class MEDIA_EXPORT CoreAudioUtil {
30 public:
31  // Returns true if Windows Core Audio is supported.
32  // Always verify that this method returns true before using any of the
33  // methods in this class.
34  static bool IsSupported();
35
36  // Converts between reference time to base::TimeDelta.
37  // One reference-time unit is 100 nanoseconds.
38  // Example: double s = RefererenceTimeToTimeDelta(t).InMillisecondsF();
39  static base::TimeDelta RefererenceTimeToTimeDelta(REFERENCE_TIME time);
40
41  // Returns AUDCLNT_SHAREMODE_EXCLUSIVE if --enable-exclusive-mode is used
42  // as command-line flag and AUDCLNT_SHAREMODE_SHARED otherwise (default).
43  static AUDCLNT_SHAREMODE GetShareMode();
44
45  // The Windows Multimedia Device (MMDevice) API enables audio clients to
46  // discover audio endpoint devices and determine their capabilities.
47
48  // Number of active audio devices in the specified flow data flow direction.
49  // Set |data_flow| to eAll to retrieve the total number of active audio
50  // devices.
51  static int NumberOfActiveDevices(EDataFlow data_flow);
52
53  // Creates an IMMDeviceEnumerator interface which provides methods for
54  // enumerating audio endpoint devices.
55  static ScopedComPtr<IMMDeviceEnumerator> CreateDeviceEnumerator();
56
57  // Creates a default endpoint device that is specified by a data-flow
58  // direction and role, e.g. default render device.
59  static ScopedComPtr<IMMDevice> CreateDefaultDevice(
60      EDataFlow data_flow, ERole role);
61
62  // Returns the device id of the default output device or an empty string
63  // if no such device exists or if the default device has been disabled.
64  static std::string GetDefaultOutputDeviceID();
65
66  // Creates an endpoint device that is specified by a unique endpoint device-
67  // identification string.
68  static ScopedComPtr<IMMDevice> CreateDevice(const std::string& device_id);
69
70  // Returns the unique ID and user-friendly name of a given endpoint device.
71  // Example: "{0.0.1.00000000}.{8db6020f-18e3-4f25-b6f5-7726c9122574}", and
72  //          "Microphone (Realtek High Definition Audio)".
73  static HRESULT GetDeviceName(IMMDevice* device, AudioDeviceName* name);
74
75  // Returns the device ID/path of the controller (a.k.a. physical device that
76  // |device| is connected to.  This ID will be the same for all devices from
77  // the same controller so it is useful for doing things like determining
78  // whether a set of output and input devices belong to the same controller.
79  // The device enumerator is required as well as the device itself since
80  // looking at the device topology is required and we need to open up
81  // associated devices to determine the controller id.
82  // If the ID could not be determined for some reason, an empty string is
83  // returned.
84  static std::string GetAudioControllerID(IMMDevice* device,
85      IMMDeviceEnumerator* enumerator);
86
87  // Accepts an id of an input device and finds a matching output device id.
88  // If the associated hardware does not have an audio output device (e.g.
89  // a webcam with a mic), an empty string is returned.
90  static std::string GetMatchingOutputDeviceID(
91      const std::string& input_device_id);
92
93  // Gets the user-friendly name of the endpoint device which is represented
94  // by a unique id in |device_id|.
95  static std::string GetFriendlyName(const std::string& device_id);
96
97  // Returns true if the provided unique |device_id| corresponds to the current
98  // default device for the specified by a data-flow direction and role.
99  static bool DeviceIsDefault(
100      EDataFlow flow, ERole role, const std::string& device_id);
101
102  // Query if the audio device is a rendering device or a capture device.
103  static EDataFlow GetDataFlow(IMMDevice* device);
104
105  // The Windows Audio Session API (WASAPI) enables client applications to
106  // manage the flow of audio data between the application and an audio endpoint
107  // device.
108
109  // Create an IAudioClient interface for the default IMMDevice where
110  // flow direction and role is define by |data_flow| and |role|.
111  // The IAudioClient interface enables a client to create and initialize an
112  // audio stream between an audio application and the audio engine (for a
113  // shared-mode stream) or the hardware buffer of an audio endpoint device
114  // (for an exclusive-mode stream).
115  static ScopedComPtr<IAudioClient> CreateDefaultClient(EDataFlow data_flow,
116                                                        ERole role);
117
118  // Create an IAudioClient interface for an existing IMMDevice given by
119  // |audio_device|. Flow direction and role is define by the |audio_device|.
120  static ScopedComPtr<IAudioClient> CreateClient(IMMDevice* audio_device);
121
122  // Get the mix format that the audio engine uses internally for processing
123  // of shared-mode streams. This format is not necessarily a format that the
124  // audio endpoint device supports. Thus, the caller might not succeed in
125  // creating an exclusive-mode stream with a format obtained by this method.
126  static HRESULT GetSharedModeMixFormat(IAudioClient* client,
127                                        WAVEFORMATPCMEX* format);
128
129  // Get the mix format that the audio engine uses internally for processing
130  // of shared-mode streams using the default IMMDevice where flow direction
131  // and role is define by |data_flow| and |role|.
132  static HRESULT GetDefaultSharedModeMixFormat(EDataFlow data_flow,
133                                               ERole role,
134                                               WAVEFORMATPCMEX* format);
135
136  // Returns true if the specified |client| supports the format in |format|
137  // for the given |share_mode| (shared or exclusive).
138  static bool IsFormatSupported(IAudioClient* client,
139                                AUDCLNT_SHAREMODE share_mode,
140                                const WAVEFORMATPCMEX* format);
141
142  // Returns true if the specified |channel_layout| is supported for the
143  // default IMMDevice where flow direction and role is define by |data_flow|
144  // and |role|. If this method returns true for a certain channel layout, it
145  // means that SharedModeInitialize() will succeed using a format based on
146  // the preferred format where the channel layout has been modified.
147  static bool IsChannelLayoutSupported(EDataFlow data_flow, ERole role,
148                                       ChannelLayout channel_layout);
149
150  // For a shared-mode stream, the audio engine periodically processes the
151  // data in the endpoint buffer at the period obtained in |device_period|.
152  // For an exclusive mode stream, |device_period| corresponds to the minimum
153  // time interval between successive processing by the endpoint device.
154  // This period plus the stream latency between the buffer and endpoint device
155  // represents the minimum possible latency that an audio application can
156  // achieve. The time in |device_period| is expressed in 100-nanosecond units.
157  static HRESULT GetDevicePeriod(IAudioClient* client,
158                                 AUDCLNT_SHAREMODE share_mode,
159                                 REFERENCE_TIME* device_period);
160
161  // Get the preferred audio parameters for the specified |client| or the
162  // given direction and role is define by |data_flow| and |role|, or the
163  // unique device id given by |device_id|.
164  // The acquired values should only be utilized for shared mode streamed since
165  // there are no preferred settings for an exclusive mode stream.
166  static HRESULT GetPreferredAudioParameters(IAudioClient* client,
167                                             AudioParameters* params);
168  static HRESULT GetPreferredAudioParameters(EDataFlow data_flow, ERole role,
169                                             AudioParameters* params);
170  static HRESULT GetPreferredAudioParameters(const std::string& device_id,
171                                             AudioParameters* params);
172
173  // After activating an IAudioClient interface on an audio endpoint device,
174  // the client must initialize it once, and only once, to initialize the audio
175  // stream between the client and the device. In shared mode, the client
176  // connects indirectly through the audio engine which does the mixing.
177  // In exclusive mode, the client connects directly to the audio hardware.
178  // If a valid event is provided in |event_handle|, the client will be
179  // initialized for event-driven buffer handling. If |event_handle| is set to
180  // NULL, event-driven buffer handling is not utilized.
181  static HRESULT SharedModeInitialize(IAudioClient* client,
182                                      const WAVEFORMATPCMEX* format,
183                                      HANDLE event_handle,
184                                      uint32* endpoint_buffer_size);
185  // TODO(henrika): add ExclusiveModeInitialize(...)
186
187  // Create an IAudioRenderClient client for an existing IAudioClient given by
188  // |client|. The IAudioRenderClient interface enables a client to write
189  // output data to a rendering endpoint buffer.
190  static ScopedComPtr<IAudioRenderClient> CreateRenderClient(
191      IAudioClient* client);
192
193  // Create an IAudioCaptureClient client for an existing IAudioClient given by
194  // |client|. The IAudioCaptureClient interface enables a client to read
195  // input data from a capture endpoint buffer.
196  static ScopedComPtr<IAudioCaptureClient> CreateCaptureClient(
197      IAudioClient* client);
198
199  // Fills up the endpoint rendering buffer with silence for an existing
200  // IAudioClient given by |client| and a corresponding IAudioRenderClient
201  // given by |render_client|.
202  static bool FillRenderEndpointBufferWithSilence(
203      IAudioClient* client, IAudioRenderClient* render_client);
204
205 private:
206  CoreAudioUtil() {}
207  ~CoreAudioUtil() {}
208  DISALLOW_COPY_AND_ASSIGN(CoreAudioUtil);
209};
210
211}  // namespace media
212
213#endif  // MEDIA_AUDIO_WIN_CORE_AUDIO_UTIL_WIN_H_
214