AudioSource.cpp revision e33054eb968cbf8ccaee1b0ff0301403902deed6
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "AudioSource" 19#include <utils/Log.h> 20 21#include <media/AudioRecord.h> 22#include <media/stagefright/AudioSource.h> 23#include <media/stagefright/MediaBuffer.h> 24#include <media/stagefright/MediaDefs.h> 25#include <media/stagefright/MetaData.h> 26#include <media/stagefright/foundation/ADebug.h> 27#include <media/stagefright/foundation/ALooper.h> 28#include <cutils/properties.h> 29#include <stdlib.h> 30 31namespace android { 32 33static void AudioRecordCallbackFunction(int event, void *user, void *info) { 34 AudioSource *source = (AudioSource *) user; 35 switch (event) { 36 case AudioRecord::EVENT_MORE_DATA: { 37 source->dataCallback(*((AudioRecord::Buffer *) info)); 38 break; 39 } 40 case AudioRecord::EVENT_OVERRUN: { 41 ALOGW("AudioRecord reported overrun!"); 42 break; 43 } 44 default: 45 // does nothing 46 break; 47 } 48} 49 50AudioSource::AudioSource( 51 audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount) 52 : mRecord(NULL), 53 mStarted(false), 54 mSampleRate(sampleRate), 55 mPrevSampleTimeUs(0), 56 mNumFramesReceived(0), 57 mNumClientOwnedBuffers(0) { 58 ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount); 59 CHECK(channelCount == 1 || channelCount == 2); 60 61 size_t minFrameCount; 62 status_t status = AudioRecord::getMinFrameCount(&minFrameCount, 63 sampleRate, 64 AUDIO_FORMAT_PCM_16_BIT, 65 audio_channel_in_mask_from_count(channelCount)); 66 if (status == OK) { 67 // make sure that the AudioRecord callback never returns more than the maximum 68 // buffer size 69 int frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount; 70 71 // make sure that the AudioRecord total buffer size is large enough 72 int bufCount = 2; 73 while ((bufCount * frameCount) < minFrameCount) { 74 bufCount++; 75 } 76 77 mRecord = new AudioRecord( 78 inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT, 79 audio_channel_in_mask_from_count(channelCount), 80 bufCount * frameCount, 81 AudioRecordCallbackFunction, 82 this, 83 frameCount); 84 mInitCheck = mRecord->initCheck(); 85 } else { 86 mInitCheck = status; 87 } 88} 89 90AudioSource::~AudioSource() { 91 if (mStarted) { 92 reset(); 93 } 94 95 delete mRecord; 96 mRecord = NULL; 97} 98 99status_t AudioSource::initCheck() const { 100 return mInitCheck; 101} 102 103status_t AudioSource::start(MetaData *params) { 104 Mutex::Autolock autoLock(mLock); 105 if (mStarted) { 106 return UNKNOWN_ERROR; 107 } 108 109 if (mInitCheck != OK) { 110 return NO_INIT; 111 } 112 113 mTrackMaxAmplitude = false; 114 mMaxAmplitude = 0; 115 mInitialReadTimeUs = 0; 116 mStartTimeUs = 0; 117 int64_t startTimeUs; 118 if (params && params->findInt64(kKeyTime, &startTimeUs)) { 119 mStartTimeUs = startTimeUs; 120 } 121 status_t err = mRecord->start(); 122 if (err == OK) { 123 mStarted = true; 124 } else { 125 delete mRecord; 126 mRecord = NULL; 127 } 128 129 130 return err; 131} 132 133void AudioSource::releaseQueuedFrames_l() { 134 ALOGV("releaseQueuedFrames_l"); 135 List<MediaBuffer *>::iterator it; 136 while (!mBuffersReceived.empty()) { 137 it = mBuffersReceived.begin(); 138 (*it)->release(); 139 mBuffersReceived.erase(it); 140 } 141} 142 143void AudioSource::waitOutstandingEncodingFrames_l() { 144 ALOGV("waitOutstandingEncodingFrames_l: %lld", mNumClientOwnedBuffers); 145 while (mNumClientOwnedBuffers > 0) { 146 mFrameEncodingCompletionCondition.wait(mLock); 147 } 148} 149 150status_t AudioSource::reset() { 151 Mutex::Autolock autoLock(mLock); 152 if (!mStarted) { 153 return UNKNOWN_ERROR; 154 } 155 156 if (mInitCheck != OK) { 157 return NO_INIT; 158 } 159 160 mStarted = false; 161 mRecord->stop(); 162 waitOutstandingEncodingFrames_l(); 163 releaseQueuedFrames_l(); 164 165 return OK; 166} 167 168sp<MetaData> AudioSource::getFormat() { 169 Mutex::Autolock autoLock(mLock); 170 if (mInitCheck != OK) { 171 return 0; 172 } 173 174 sp<MetaData> meta = new MetaData; 175 meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW); 176 meta->setInt32(kKeySampleRate, mSampleRate); 177 meta->setInt32(kKeyChannelCount, mRecord->channelCount()); 178 meta->setInt32(kKeyMaxInputSize, kMaxBufferSize); 179 180 return meta; 181} 182 183void AudioSource::rampVolume( 184 int32_t startFrame, int32_t rampDurationFrames, 185 uint8_t *data, size_t bytes) { 186 187 const int32_t kShift = 14; 188 int32_t fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 189 const int32_t nChannels = mRecord->channelCount(); 190 int32_t stopFrame = startFrame + bytes / sizeof(int16_t); 191 int16_t *frame = (int16_t *) data; 192 if (stopFrame > rampDurationFrames) { 193 stopFrame = rampDurationFrames; 194 } 195 196 while (startFrame < stopFrame) { 197 if (nChannels == 1) { // mono 198 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 199 ++frame; 200 ++startFrame; 201 } else { // stereo 202 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 203 frame[1] = (frame[1] * fixedMultiplier) >> kShift; 204 frame += 2; 205 startFrame += 2; 206 } 207 208 // Update the multiplier every 4 frames 209 if ((startFrame & 3) == 0) { 210 fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 211 } 212 } 213} 214 215status_t AudioSource::read( 216 MediaBuffer **out, const ReadOptions *options) { 217 Mutex::Autolock autoLock(mLock); 218 *out = NULL; 219 220 if (mInitCheck != OK) { 221 return NO_INIT; 222 } 223 224 while (mStarted && mBuffersReceived.empty()) { 225 mFrameAvailableCondition.wait(mLock); 226 } 227 if (!mStarted) { 228 return OK; 229 } 230 MediaBuffer *buffer = *mBuffersReceived.begin(); 231 mBuffersReceived.erase(mBuffersReceived.begin()); 232 ++mNumClientOwnedBuffers; 233 buffer->setObserver(this); 234 buffer->add_ref(); 235 236 // Mute/suppress the recording sound 237 int64_t timeUs; 238 CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs)); 239 int64_t elapsedTimeUs = timeUs - mStartTimeUs; 240 if (elapsedTimeUs < kAutoRampStartUs) { 241 memset((uint8_t *) buffer->data(), 0, buffer->range_length()); 242 } else if (elapsedTimeUs < kAutoRampStartUs + kAutoRampDurationUs) { 243 int32_t autoRampDurationFrames = 244 (kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL; 245 246 int32_t autoRampStartFrames = 247 (kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL; 248 249 int32_t nFrames = mNumFramesReceived - autoRampStartFrames; 250 rampVolume(nFrames, autoRampDurationFrames, 251 (uint8_t *) buffer->data(), buffer->range_length()); 252 } 253 254 // Track the max recording signal amplitude. 255 if (mTrackMaxAmplitude) { 256 trackMaxAmplitude( 257 (int16_t *) buffer->data(), buffer->range_length() >> 1); 258 } 259 260 *out = buffer; 261 return OK; 262} 263 264void AudioSource::signalBufferReturned(MediaBuffer *buffer) { 265 ALOGV("signalBufferReturned: %p", buffer->data()); 266 Mutex::Autolock autoLock(mLock); 267 --mNumClientOwnedBuffers; 268 buffer->setObserver(0); 269 buffer->release(); 270 mFrameEncodingCompletionCondition.signal(); 271 return; 272} 273 274status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) { 275 int64_t timeUs = systemTime() / 1000ll; 276 277 ALOGV("dataCallbackTimestamp: %lld us", timeUs); 278 Mutex::Autolock autoLock(mLock); 279 if (!mStarted) { 280 ALOGW("Spurious callback from AudioRecord. Drop the audio data."); 281 return OK; 282 } 283 284 // Drop retrieved and previously lost audio data. 285 if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) { 286 mRecord->getInputFramesLost(); 287 ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs); 288 return OK; 289 } 290 291 if (mNumFramesReceived == 0 && mPrevSampleTimeUs == 0) { 292 mInitialReadTimeUs = timeUs; 293 // Initial delay 294 if (mStartTimeUs > 0) { 295 mStartTimeUs = timeUs - mStartTimeUs; 296 } else { 297 // Assume latency is constant. 298 mStartTimeUs += mRecord->latency() * 1000; 299 } 300 301 mPrevSampleTimeUs = mStartTimeUs; 302 } 303 304 size_t numLostBytes = 0; 305 if (mNumFramesReceived > 0) { // Ignore earlier frame lost 306 // getInputFramesLost() returns the number of lost frames. 307 // Convert number of frames lost to number of bytes lost. 308 numLostBytes = mRecord->getInputFramesLost() * mRecord->frameSize(); 309 } 310 311 CHECK_EQ(numLostBytes & 1, 0u); 312 CHECK_EQ(audioBuffer.size & 1, 0u); 313 if (numLostBytes > 0) { 314 // Loss of audio frames should happen rarely; thus the LOGW should 315 // not cause a logging spam 316 ALOGW("Lost audio record data: %d bytes", numLostBytes); 317 } 318 319 while (numLostBytes > 0) { 320 size_t bufferSize = numLostBytes; 321 if (numLostBytes > kMaxBufferSize) { 322 numLostBytes -= kMaxBufferSize; 323 bufferSize = kMaxBufferSize; 324 } else { 325 numLostBytes = 0; 326 } 327 MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize); 328 memset(lostAudioBuffer->data(), 0, bufferSize); 329 lostAudioBuffer->set_range(0, bufferSize); 330 queueInputBuffer_l(lostAudioBuffer, timeUs); 331 } 332 333 if (audioBuffer.size == 0) { 334 ALOGW("Nothing is available from AudioRecord callback buffer"); 335 return OK; 336 } 337 338 const size_t bufferSize = audioBuffer.size; 339 MediaBuffer *buffer = new MediaBuffer(bufferSize); 340 memcpy((uint8_t *) buffer->data(), 341 audioBuffer.i16, audioBuffer.size); 342 buffer->set_range(0, bufferSize); 343 queueInputBuffer_l(buffer, timeUs); 344 return OK; 345} 346 347void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) { 348 const size_t bufferSize = buffer->range_length(); 349 const size_t frameSize = mRecord->frameSize(); 350 const int64_t timestampUs = 351 mPrevSampleTimeUs + 352 ((1000000LL * (bufferSize / frameSize)) + 353 (mSampleRate >> 1)) / mSampleRate; 354 355 if (mNumFramesReceived == 0) { 356 buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs); 357 } 358 359 buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs); 360 buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs); 361 mPrevSampleTimeUs = timestampUs; 362 mNumFramesReceived += bufferSize / frameSize; 363 mBuffersReceived.push_back(buffer); 364 mFrameAvailableCondition.signal(); 365} 366 367void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) { 368 for (int i = nSamples; i > 0; --i) { 369 int16_t value = *data++; 370 if (value < 0) { 371 value = -value; 372 } 373 if (mMaxAmplitude < value) { 374 mMaxAmplitude = value; 375 } 376 } 377} 378 379int16_t AudioSource::getMaxAmplitude() { 380 // First call activates the tracking. 381 if (!mTrackMaxAmplitude) { 382 mTrackMaxAmplitude = true; 383 } 384 int16_t value = mMaxAmplitude; 385 mMaxAmplitude = 0; 386 ALOGV("max amplitude since last call: %d", value); 387 return value; 388} 389 390} // namespace android 391