AudioSource.cpp revision a5750e0dad9e90f2195ce36f2c4457fa04b2b83e
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include <inttypes.h> 18#include <stdlib.h> 19 20//#define LOG_NDEBUG 0 21#define LOG_TAG "AudioSource" 22#include <utils/Log.h> 23 24#include <media/AudioRecord.h> 25#include <media/stagefright/AudioSource.h> 26#include <media/stagefright/MediaBuffer.h> 27#include <media/stagefright/MediaDefs.h> 28#include <media/stagefright/MetaData.h> 29#include <media/stagefright/foundation/ADebug.h> 30#include <media/stagefright/foundation/ALooper.h> 31#include <cutils/properties.h> 32 33namespace android { 34 35static void AudioRecordCallbackFunction(int event, void *user, void *info) { 36 AudioSource *source = (AudioSource *) user; 37 switch (event) { 38 case AudioRecord::EVENT_MORE_DATA: { 39 source->dataCallback(*((AudioRecord::Buffer *) info)); 40 break; 41 } 42 case AudioRecord::EVENT_OVERRUN: { 43 ALOGW("AudioRecord reported overrun!"); 44 break; 45 } 46 default: 47 // does nothing 48 break; 49 } 50} 51 52AudioSource::AudioSource( 53 audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount) 54 : mStarted(false), 55 mSampleRate(sampleRate), 56 mPrevSampleTimeUs(0), 57 mNumFramesReceived(0), 58 mNumClientOwnedBuffers(0) { 59 ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount); 60 CHECK(channelCount == 1 || channelCount == 2); 61 62 size_t minFrameCount; 63 status_t status = AudioRecord::getMinFrameCount(&minFrameCount, 64 sampleRate, 65 AUDIO_FORMAT_PCM_16_BIT, 66 audio_channel_in_mask_from_count(channelCount)); 67 if (status == OK) { 68 // make sure that the AudioRecord callback never returns more than the maximum 69 // buffer size 70 uint32_t frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount; 71 72 // make sure that the AudioRecord total buffer size is large enough 73 size_t bufCount = 2; 74 while ((bufCount * frameCount) < minFrameCount) { 75 bufCount++; 76 } 77 78 mRecord = new AudioRecord( 79 inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT, 80 audio_channel_in_mask_from_count(channelCount), 81 (size_t) (bufCount * frameCount), 82 AudioRecordCallbackFunction, 83 this, 84 frameCount /*notificationFrames*/); 85 mInitCheck = mRecord->initCheck(); 86 } else { 87 mInitCheck = status; 88 } 89} 90 91AudioSource::~AudioSource() { 92 if (mStarted) { 93 reset(); 94 } 95} 96 97status_t AudioSource::initCheck() const { 98 return mInitCheck; 99} 100 101status_t AudioSource::start(MetaData *params) { 102 Mutex::Autolock autoLock(mLock); 103 if (mStarted) { 104 return UNKNOWN_ERROR; 105 } 106 107 if (mInitCheck != OK) { 108 return NO_INIT; 109 } 110 111 mTrackMaxAmplitude = false; 112 mMaxAmplitude = 0; 113 mInitialReadTimeUs = 0; 114 mStartTimeUs = 0; 115 int64_t startTimeUs; 116 if (params && params->findInt64(kKeyTime, &startTimeUs)) { 117 mStartTimeUs = startTimeUs; 118 } 119 status_t err = mRecord->start(); 120 if (err == OK) { 121 mStarted = true; 122 } else { 123 mRecord.clear(); 124 } 125 126 127 return err; 128} 129 130void AudioSource::releaseQueuedFrames_l() { 131 ALOGV("releaseQueuedFrames_l"); 132 List<MediaBuffer *>::iterator it; 133 while (!mBuffersReceived.empty()) { 134 it = mBuffersReceived.begin(); 135 (*it)->release(); 136 mBuffersReceived.erase(it); 137 } 138} 139 140void AudioSource::waitOutstandingEncodingFrames_l() { 141 ALOGV("waitOutstandingEncodingFrames_l: %" PRId64, mNumClientOwnedBuffers); 142 while (mNumClientOwnedBuffers > 0) { 143 mFrameEncodingCompletionCondition.wait(mLock); 144 } 145} 146 147status_t AudioSource::reset() { 148 Mutex::Autolock autoLock(mLock); 149 if (!mStarted) { 150 return UNKNOWN_ERROR; 151 } 152 153 if (mInitCheck != OK) { 154 return NO_INIT; 155 } 156 157 mStarted = false; 158 mRecord->stop(); 159 waitOutstandingEncodingFrames_l(); 160 releaseQueuedFrames_l(); 161 162 return OK; 163} 164 165sp<MetaData> AudioSource::getFormat() { 166 Mutex::Autolock autoLock(mLock); 167 if (mInitCheck != OK) { 168 return 0; 169 } 170 171 sp<MetaData> meta = new MetaData; 172 meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW); 173 meta->setInt32(kKeySampleRate, mSampleRate); 174 meta->setInt32(kKeyChannelCount, mRecord->channelCount()); 175 meta->setInt32(kKeyMaxInputSize, kMaxBufferSize); 176 177 return meta; 178} 179 180void AudioSource::rampVolume( 181 int32_t startFrame, int32_t rampDurationFrames, 182 uint8_t *data, size_t bytes) { 183 184 const int32_t kShift = 14; 185 int32_t fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 186 const int32_t nChannels = mRecord->channelCount(); 187 int32_t stopFrame = startFrame + bytes / sizeof(int16_t); 188 int16_t *frame = (int16_t *) data; 189 if (stopFrame > rampDurationFrames) { 190 stopFrame = rampDurationFrames; 191 } 192 193 while (startFrame < stopFrame) { 194 if (nChannels == 1) { // mono 195 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 196 ++frame; 197 ++startFrame; 198 } else { // stereo 199 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 200 frame[1] = (frame[1] * fixedMultiplier) >> kShift; 201 frame += 2; 202 startFrame += 2; 203 } 204 205 // Update the multiplier every 4 frames 206 if ((startFrame & 3) == 0) { 207 fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 208 } 209 } 210} 211 212status_t AudioSource::read( 213 MediaBuffer **out, const ReadOptions * /* options */) { 214 Mutex::Autolock autoLock(mLock); 215 *out = NULL; 216 217 if (mInitCheck != OK) { 218 return NO_INIT; 219 } 220 221 while (mStarted && mBuffersReceived.empty()) { 222 mFrameAvailableCondition.wait(mLock); 223 } 224 if (!mStarted) { 225 return OK; 226 } 227 MediaBuffer *buffer = *mBuffersReceived.begin(); 228 mBuffersReceived.erase(mBuffersReceived.begin()); 229 ++mNumClientOwnedBuffers; 230 buffer->setObserver(this); 231 buffer->add_ref(); 232 233 // Mute/suppress the recording sound 234 int64_t timeUs; 235 CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs)); 236 int64_t elapsedTimeUs = timeUs - mStartTimeUs; 237 if (elapsedTimeUs < kAutoRampStartUs) { 238 memset((uint8_t *) buffer->data(), 0, buffer->range_length()); 239 } else if (elapsedTimeUs < kAutoRampStartUs + kAutoRampDurationUs) { 240 int32_t autoRampDurationFrames = 241 ((int64_t)kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL; //Need type casting 242 243 int32_t autoRampStartFrames = 244 ((int64_t)kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL; //Need type casting 245 246 int32_t nFrames = mNumFramesReceived - autoRampStartFrames; 247 rampVolume(nFrames, autoRampDurationFrames, 248 (uint8_t *) buffer->data(), buffer->range_length()); 249 } 250 251 // Track the max recording signal amplitude. 252 if (mTrackMaxAmplitude) { 253 trackMaxAmplitude( 254 (int16_t *) buffer->data(), buffer->range_length() >> 1); 255 } 256 257 *out = buffer; 258 return OK; 259} 260 261void AudioSource::signalBufferReturned(MediaBuffer *buffer) { 262 ALOGV("signalBufferReturned: %p", buffer->data()); 263 Mutex::Autolock autoLock(mLock); 264 --mNumClientOwnedBuffers; 265 buffer->setObserver(0); 266 buffer->release(); 267 mFrameEncodingCompletionCondition.signal(); 268 return; 269} 270 271status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) { 272 int64_t timeUs = systemTime() / 1000ll; 273 274 ALOGV("dataCallbackTimestamp: %" PRId64 " us", timeUs); 275 Mutex::Autolock autoLock(mLock); 276 if (!mStarted) { 277 ALOGW("Spurious callback from AudioRecord. Drop the audio data."); 278 return OK; 279 } 280 281 // Drop retrieved and previously lost audio data. 282 if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) { 283 (void) mRecord->getInputFramesLost(); 284 ALOGV("Drop audio data at %" PRId64 "/%" PRId64 " us", timeUs, mStartTimeUs); 285 return OK; 286 } 287 288 if (mNumFramesReceived == 0 && mPrevSampleTimeUs == 0) { 289 mInitialReadTimeUs = timeUs; 290 // Initial delay 291 if (mStartTimeUs > 0) { 292 mStartTimeUs = timeUs - mStartTimeUs; 293 } else { 294 // Assume latency is constant. 295 mStartTimeUs += mRecord->latency() * 1000; 296 } 297 298 mPrevSampleTimeUs = mStartTimeUs; 299 } 300 301 size_t numLostBytes = 0; 302 if (mNumFramesReceived > 0) { // Ignore earlier frame lost 303 // getInputFramesLost() returns the number of lost frames. 304 // Convert number of frames lost to number of bytes lost. 305 numLostBytes = mRecord->getInputFramesLost() * mRecord->frameSize(); 306 } 307 308 CHECK_EQ(numLostBytes & 1, 0u); 309 CHECK_EQ(audioBuffer.size & 1, 0u); 310 if (numLostBytes > 0) { 311 // Loss of audio frames should happen rarely; thus the LOGW should 312 // not cause a logging spam 313 ALOGW("Lost audio record data: %zu bytes", numLostBytes); 314 } 315 316 while (numLostBytes > 0) { 317 size_t bufferSize = numLostBytes; 318 if (numLostBytes > kMaxBufferSize) { 319 numLostBytes -= kMaxBufferSize; 320 bufferSize = kMaxBufferSize; 321 } else { 322 numLostBytes = 0; 323 } 324 MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize); 325 memset(lostAudioBuffer->data(), 0, bufferSize); 326 lostAudioBuffer->set_range(0, bufferSize); 327 queueInputBuffer_l(lostAudioBuffer, timeUs); 328 } 329 330 if (audioBuffer.size == 0) { 331 ALOGW("Nothing is available from AudioRecord callback buffer"); 332 return OK; 333 } 334 335 const size_t bufferSize = audioBuffer.size; 336 MediaBuffer *buffer = new MediaBuffer(bufferSize); 337 memcpy((uint8_t *) buffer->data(), 338 audioBuffer.i16, audioBuffer.size); 339 buffer->set_range(0, bufferSize); 340 queueInputBuffer_l(buffer, timeUs); 341 return OK; 342} 343 344void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) { 345 const size_t bufferSize = buffer->range_length(); 346 const size_t frameSize = mRecord->frameSize(); 347 const int64_t timestampUs = 348 mPrevSampleTimeUs + 349 ((1000000LL * (bufferSize / frameSize)) + 350 (mSampleRate >> 1)) / mSampleRate; 351 352 if (mNumFramesReceived == 0) { 353 buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs); 354 } 355 356 buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs); 357 buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs); 358 mPrevSampleTimeUs = timestampUs; 359 mNumFramesReceived += bufferSize / frameSize; 360 mBuffersReceived.push_back(buffer); 361 mFrameAvailableCondition.signal(); 362} 363 364void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) { 365 for (int i = nSamples; i > 0; --i) { 366 int16_t value = *data++; 367 if (value < 0) { 368 value = -value; 369 } 370 if (mMaxAmplitude < value) { 371 mMaxAmplitude = value; 372 } 373 } 374} 375 376int16_t AudioSource::getMaxAmplitude() { 377 // First call activates the tracking. 378 if (!mTrackMaxAmplitude) { 379 mTrackMaxAmplitude = true; 380 } 381 int16_t value = mMaxAmplitude; 382 mMaxAmplitude = 0; 383 ALOGV("max amplitude since last call: %d", value); 384 return value; 385} 386 387} // namespace android 388