Searched refs:audio (Results 226 - 250 of 296) sorted by relevance

1234567891011>>

/external/qemu/audio/
H A Dpaaudio.c3 #include "audio.h"
18 # define D(...) VERBOSE_PRINT(audio,__VA_ARGS__)
19 # define D_ACTIVE VERBOSE_CHECK(audio)
295 dolog ("Internal logic error: Bad audio format %d\n", afmt);
528 D("%s: error opening open pulse audio library: %s",
/external/chromium_org/content/renderer/pepper/
H A Dpepper_media_stream_audio_track_host.cc143 // Clear |buffers_|, so the audio thread will drop all incoming audio data.
162 // we just cleared |buffers_| , so the audio thread will drop all incoming
163 // audio data, and not use buffers in |host_|.
172 // Fill the |buffers_|, so the audio thread can continue receiving audio data.
188 // If |InitBuffers()| is called after this task being posted from the audio
205 // the incomming audio buffer. However, this doesn't necessarily equal
236 ->audio);
/external/chromium_org/third_party/webrtc/modules/rtp_rtcp/interface/
H A Drtp_rtcp.h33 * audio - True for a audio version of the RTP/RTCP module
58 bool audio; member in struct:webrtc::RtpRtcp::Configuration
315 * Used by the codec module to deliver a video or audio frame for
622 * set audio packet size, used to determine when it's time to send a DTMF
667 * Store the audio level in dBov for header-extension-for-audio-level-
/external/chromium_org/third_party/webrtc/modules/rtp_rtcp/source/
H A Dnack_rtx_unittest.cc181 configuration.audio = false;
H A Drtcp_sender.h72 RTCPSender(const int32_t id, const bool audio,
H A Drtp_rtcp_impl.cc29 audio(false),
61 configuration.audio,
70 configuration.audio,
76 audio_(configuration.audio),
1013 // Set audio packet size, used to determine when it's time to send a DTMF
H A Drtp_sender_audio.cc76 // set audio packet size, used to determine when it's time to send a DTMF packet in silence (CNG)
124 payload->audio = true;
136 // for audio true for first packet in a speech burst
271 // A source MAY send events and coded audio packets for the same time
338 // we don't send empty audio RTP packets
438 // Update audio level extension, if included.
H A Drtp_sender_video.cc98 payload->audio = false;
H A Drtp_sender.h70 RTPSender(const int32_t id, const bool audio, Clock *clock,
235 // Set audio packet size, used to determine when it's time to send a DTMF
239 // Store the audio level in d_bov for
240 // header-extension-for-audio-level-indication.
/external/chromium_org/third_party/webrtc/modules/video_coding/main/test/
H A Dmt_rx_tx_test.cc156 configuration.audio = false;
H A Dmedia_opt_test.cc205 configuration.audio = false;
H A Drtp_player.cc221 configuration.audio = false;
/external/libvorbis/doc/
H A D05-comment.tex114 The artist generally considered responsible for the work. In popular music this is usually the performing band or singer. For classical music it would be the composer. For an audio book it would be the author of the original text.
117 The artist(s) who performed the work. In classical music this would be the conductor, orchestra, soloists. In an audio book it would be the actor who did the reading. In popular music this is typically the same as the ARTIST and is omitted.
/external/chromium_org/chrome/browser/media/
H A Dmedia_capture_devices_dispatcher.cc47 #include "media/audio/audio_manager_base.h"
170 // Use the special loopback device ID for system audio capture.
260 // AVFoundation is used for video/audio device monitoring and video capture in
520 // Currently loopback audio capture is supported only on Windows and ChromeOS.
550 // Currently loopback audio capture is supported only on Windows and ChromeOS.
733 // Set an initial error result. If neither audio or video is allowed, we'll
739 // result + a dcheck to ensure at least one of audio or video types is
745 // Get the exact audio or video device if an id is specified.
765 // If either or both audio and video devices were requested but not
872 bool audio,
870 GetDefaultDevicesForProfile( Profile* profile, bool audio, bool video, content::MediaStreamDevices* devices) argument
[all...]
/external/chromium_org/content/browser/media/
H A Dwebrtc_internals_browsertest.cc194 << request.origin << "', audio:'" << request.audio_constraints
250 std::string origin, audio, video; local
254 ASSERT_TRUE(dict->GetString("audio", &audio));
259 EXPECT_EQ(requests[i].audio_constraints, audio);
/external/chromium_org/content/renderer/media/
H A Dpeer_connection_tracker.cc104 result += ", audio: [";
524 user_media_request.audio(),
/external/chromium_org/third_party/webrtc/modules/rtp_rtcp/test/testAPI/
H A Dtest_api_audio.cc131 configuration.audio = true;
H A Dtest_api_rtcp.cc111 configuration.audio = true;
/external/sepolicy/
H A Dfile_contexts40 /dev/audio.* u:object_r:audio_device:s0
200 /data/misc/audio(/.*)? u:object_r:audio_data_file:s0
/external/wpa_supplicant_8/hostapd/src/utils/
H A Dhttp_curl.c329 STACK_OF(LogotypeAudio) *audio;
402 ASN1_IMP_SEQUENCE_OF_OPT(LogotypeData, audio, LogotypeAudio, 1)
660 num = data->audio ? sk_LogotypeAudio_num(data->audio) : 0;
/external/wpa_supplicant_8/src/utils/
H A Dhttp_curl.c329 STACK_OF(LogotypeAudio) *audio;
402 ASN1_IMP_SEQUENCE_OF_OPT(LogotypeData, audio, LogotypeAudio, 1)
660 num = data->audio ? sk_LogotypeAudio_num(data->audio) : 0;
/external/wpa_supplicant_8/wpa_supplicant/src/utils/
H A Dhttp_curl.c329 STACK_OF(LogotypeAudio) *audio;
402 ASN1_IMP_SEQUENCE_OF_OPT(LogotypeData, audio, LogotypeAudio, 1)
660 num = data->audio ? sk_LogotypeAudio_num(data->audio) : 0;
/external/bluetooth/bluedroid/audio_a2dp_hw/
H A Daudio_a2dp_hw.c23 * Description: Implements hal for bluedroid a2dp audio device
42 #include <system/audio.h>
43 #include <hardware/audio.h>
503 /* disconnect audio path */
528 /* disconnect audio path */
539 ** audio output callbacks
/external/chromium_org/third_party/libjingle/source/talk/session/media/
H A Dchannel_unittest.cc470 // kPcmuCodec is used as audio codec and kH264Codec is used as video codec.
1850 cricket::AudioContentDescription* audio) {
1851 audio->AddCodec(audio_codec);
1852 audio->set_rtcp_mux((flags & RTCP_MUX) != 0);
1854 audio->AddCrypto(cricket::CryptoParams(
1863 cricket::AudioContentDescription* audio) {
1864 *audio = source;
1876 uint32 ssrc, int flags, cricket::AudioContentDescription* audio) {
1877 audio->AddLegacyStream(ssrc);
1846 CreateContent( int flags, const cricket::AudioCodec& audio_codec, const cricket::VideoCodec& video_codec, cricket::AudioContentDescription* audio) argument
1861 CopyContent( const cricket::AudioContentDescription& source, cricket::AudioContentDescription* audio) argument
1875 AddLegacyStreamInContent( uint32 ssrc, int flags, cricket::AudioContentDescription* audio) argument
/external/chromium_org/third_party/webrtc/voice_engine/
H A Dtransmit_mixer.cc335 // --- Resample input audio and create/store the initial audio frame
352 // --- Near-end audio processing.
409 // --- Measure audio level of speech after all processing.
1137 void TransmitMixer::GenerateAudioFrame(const int16_t* audio, argument
1160 DownConvertToCodecFormat(audio,
1235 // Replace ACM audio with file.

Completed in 2880 milliseconds

1234567891011>>