Searched defs:audio (Results 1 - 23 of 23) sorted by relevance

/frameworks/base/media/tests/MediaFrameworkTest/src/com/android/mediaframeworktest/functional/audio/
H A DSimTonesTest.java17 package com.android.mediaframeworktest.functional.audio;
H A DMediaBassBoostTest.java17 package com.android.mediaframeworktest.functional.audio;
H A DMediaPresetReverbTest.java17 package com.android.mediaframeworktest.functional.audio;
210 // creating a volume controller on output mix ensures that ro.audio.silent mutes
211 // audio after the effects and not before
281 // creating a volume controller on output mix ensures that ro.audio.silent mutes
282 // audio after the effects and not before
H A DMediaVirtualizerTest.java17 package com.android.mediaframeworktest.functional.audio;
H A DMediaAudioManagerTest.java17 package com.android.mediaframeworktest.functional.audio;
155 // the audio focus request is async, so wait a bit to verify it had the expected effect
175 //Test case 1: test audio focus listener loses audio focus:
186 //Test case 2: test audio focus listener loses audio focus:
198 //Test case 3: test audio focus listener loses audio focus:
210 //Test case 4: test audio focus registering and use over 3000 iterations
220 assertTrue("audio focu
[all...]
H A DMediaEnvReverbTest.java17 package com.android.mediaframeworktest.functional.audio;
365 // creating a volume controller on output mix ensures that ro.audio.silent mutes
366 // audio after the effects and not before
438 // creating a volume controller on output mix ensures that ro.audio.silent mutes
439 // audio after the effects and not before
H A DMediaEqualizerTest.java17 package com.android.mediaframeworktest.functional.audio;
H A DMediaVisualizerTest.java17 package com.android.mediaframeworktest.functional.audio;
242 // creating a volume controller on output mix ensures that ro.audio.silent mutes
243 // audio after the effects and not before
321 // creating a volume controller on output mix ensures that ro.audio.silent mutes
322 // audio after the effects and not before
454 // creating a volume controller on output mix ensures that ro.audio.silent mutes
455 // audio after the effects and not before
H A DMediaAudioEffectTest.java17 package com.android.mediaframeworktest.functional.audio;
276 //Test case 1.4: test contructor on mediaPlayer audio session
H A DMediaAudioTrackTest.java17 package com.android.mediaframeworktest.functional.audio;
752 // count (given the audio track properties), and add 77.
/frameworks/av/media/libmediaplayerservice/nuplayer/
H A DStreamingSource.cpp141 sp<MetaData> NuPlayer::StreamingSource::getFormatMeta(bool audio) { argument
143 audio ? ATSParser::AUDIO : ATSParser::VIDEO;
156 bool audio, sp<ABuffer> *accessUnit) {
158 audio ? ATSParser::AUDIO : ATSParser::VIDEO;
155 dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) argument
H A DNuPlayerDecoder.cpp199 bool audio = !strncasecmp(oldMime.c_str(), "audio/", strlen("audio/")); local
201 if (audio) {
H A DNuPlayerSource.h66 virtual sp<AMessage> getFormat(bool audio);
69 bool audio, sp<ABuffer> *accessUnit) = 0;
96 virtual sp<MetaData> getFormatMeta(bool audio) { return NULL; } argument
H A DGenericSource.cpp79 if (!strncasecmp(mime, "audio/", 6)) {
138 readBuffer(true /* audio */);
147 readBuffer(false /* audio */);
155 sp<MetaData> NuPlayer::GenericSource::getFormatMeta(bool audio) { argument
156 sp<MediaSource> source = audio ? mAudioTrack.mSource : mVideoTrack.mSource;
166 bool audio, sp<ABuffer> *accessUnit) {
167 Track *track = audio ? &mAudioTrack : &mVideoTrack;
180 readBuffer(audio, -1ll);
193 readBuffer(false /* audio */, seekTimeUs, &actualTimeUs);
199 readBuffer(true /* audio */, seekTimeU
165 dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) argument
205 readBuffer( bool audio, int64_t seekTimeUs, int64_t *actualTimeUs) argument
[all...]
H A DHTTPLiveSource.cpp94 sp<AMessage> NuPlayer::HTTPLiveSource::getFormat(bool audio) { argument
97 audio ? LiveSession::STREAMTYPE_AUDIO
113 bool audio, sp<ABuffer> *accessUnit) {
115 audio ? LiveSession::STREAMTYPE_AUDIO
205 sp<AMessage> format = getFormat(false /* audio */);
247 bool audio = changedMask & LiveSession::STREAMTYPE_AUDIO; local
255 notify->setInt32("audio", audio);
112 dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) argument
H A DNuPlayerRenderer.cpp63 bool audio,
67 msg->setInt32("audio", static_cast<int32_t>(audio));
73 void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { argument
77 msg->setInt32("audio", static_cast<int32_t>(audio));
82 void NuPlayer::Renderer::flush(bool audio) { argument
85 if (audio) {
95 msg->setInt32("audio", static_cast<int32_t>(audio));
62 queueBuffer( bool audio, const sp<ABuffer> &buffer, const sp<AMessage> &notifyConsumed) argument
440 notifyEOS(bool audio, status_t finalResult) argument
449 int32_t audio; local
535 int32_t audio; local
565 int32_t audio; local
617 notifyFlushComplete(bool audio) argument
624 dropBufferWhileFlushing( bool audio, const sp<AMessage> &msg) argument
[all...]
H A DRTSPSource.cpp147 sp<MetaData> NuPlayer::RTSPSource::getFormatMeta(bool audio) { argument
148 sp<AnotherPacketSource> source = getSource(audio);
176 ALOGV("audio track doesn't have enough data yet. (%.2f secs buffered)",
194 bool audio, sp<ABuffer> *accessUnit) {
207 sp<AnotherPacketSource> source = getSource(audio);
218 sp<AnotherPacketSource> otherSource = getSource(!audio);
232 int64_t eosTimeout = audio ? mEOSTimeoutAudio : mEOSTimeoutVideo;
234 setEOSTimeout(audio, ALooper::GetNowUs());
236 setEOSTimeout(audio, 0);
258 setEOSTimeout(audio,
193 dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) argument
263 getSource(bool audio) argument
274 setEOSTimeout(bool audio, int64_t timeout) argument
[all...]
H A DNuPlayer.cpp93 ShutdownDecoderAction(bool audio, bool video) argument
94 : mAudio(audio),
411 false /* audio */, true /* video */));
512 // We're not currently decoding anything (no audio or
535 bool audio = msg->what() == kWhatAudioNotify; local
545 audio, codecRequest);
557 ALOGV("got %s decoder EOS", audio ? "audio" : "video");
560 audio ? "audio"
726 int32_t audio; local
768 int32_t audio; local
887 instantiateDecoder(bool audio, sp<Decoder> *decoder) argument
917 feedDecoderInputData(bool audio, const sp<AMessage> &msg) argument
1049 renderBuffer(bool audio, const sp<AMessage> &msg) argument
1109 flushDecoder(bool audio, bool needShutdown) argument
1153 getFormat(bool audio) argument
1283 performDecoderShutdown(bool audio, bool video) argument
1471 int32_t audio, video; local
1515 queueDecoderShutdown( bool audio, bool video, const sp<AMessage> &reply) argument
[all...]
/frameworks/av/media/libmediaplayerservice/nuplayer/mp4/
H A DMP4Source.cpp135 sp<AMessage> MP4Source::getFormat(bool audio) { argument
136 return mParser->getFormat(audio);
140 bool audio, sp<ABuffer> *accessUnit) {
141 return mParser->dequeueAccessUnit(audio, accessUnit);
139 dequeueAccessUnit( bool audio, sp<ABuffer> *accessUnit) argument
/frameworks/av/media/libstagefright/httplive/
H A DPlaylistFetcher.cpp954 srcType == ATSParser::VIDEO ? "video" : "audio");
1210 // This better be an ISO 13818-7 (AAC) or ISO 13818-1 (MPEG) audio
1217 // Make sure to skip all ID3 tags preceding the audio data.
1273 ALOGW("This stream only contains audio data!");
1387 bool audio = !strncasecmp(mime, "audio/", 6); local
1388 if (audio) {
/frameworks/base/core/java/android/speech/srec/
H A DRecognizer.java55 * // create and start audio input
56 * InputStream audio = new MicrophoneInputStream(11025, 11025*5);
87 * // put more audio in the Recognizer
88 * recognizer.putAudio(audio);
100 * // stop the audio device
101 * audio.close();
159 * A separate config file is needed for each audio sample rate.
265 * Process some audio and return the current status.
288 * Put audio samples into the <code>Recognizer</code>.
289 * @param buf holds the audio sample
304 putAudio(InputStream audio) argument
[all...]
/frameworks/native/include/media/openmax/
H A DOMX_Component.h96 OMX_AUDIO_PORTDEFINITIONTYPE audio; member in union:OMX_PARAM_PORTDEFINITIONTYPE::__anon1420
/frameworks/av/media/libstagefright/mp4/
H A DFragmentedMP4Parser.cpp258 sp<AMessage> FragmentedMP4Parser::getFormat(bool audio, bool synchronous) { argument
263 msg->setInt32("audio", audio);
294 msg->setInt32("audio", wantAudio);
347 status_t FragmentedMP4Parser::dequeueAccessUnit(bool audio, sp<ABuffer> *accessUnit, argument
352 msg->setInt32("audio", audio);
485 CHECK(msg->findInt32("audio", &wantAudio));
523 CHECK(msg->findInt32("audio", &wantAudio));
554 CHECK(msg->findInt32("audio",
[all...]

Completed in 256 milliseconds