AudioSource.cpp revision 79e23b41fad961008bfde6e26b3c6f86878ca69d
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "AudioSource" 19#include <utils/Log.h> 20 21#include <media/stagefright/AudioSource.h> 22 23#include <media/AudioRecord.h> 24#include <media/stagefright/MediaBufferGroup.h> 25#include <media/stagefright/MediaDebug.h> 26#include <media/stagefright/MediaDefs.h> 27#include <media/stagefright/MetaData.h> 28#include <cutils/properties.h> 29#include <stdlib.h> 30 31namespace android { 32 33AudioSource::AudioSource( 34 int inputSource, uint32_t sampleRate, uint32_t channels) 35 : mStarted(false), 36 mCollectStats(false), 37 mPrevSampleTimeUs(0), 38 mTotalLostFrames(0), 39 mPrevLostBytes(0), 40 mGroup(NULL) { 41 42 LOGV("sampleRate: %d, channels: %d", sampleRate, channels); 43 CHECK(channels == 1 || channels == 2); 44 uint32_t flags = AudioRecord::RECORD_AGC_ENABLE | 45 AudioRecord::RECORD_NS_ENABLE | 46 AudioRecord::RECORD_IIR_ENABLE; 47 48 mRecord = new AudioRecord( 49 inputSource, sampleRate, AudioSystem::PCM_16_BIT, 50 channels > 1? AudioSystem::CHANNEL_IN_STEREO: AudioSystem::CHANNEL_IN_MONO, 51 4 * kMaxBufferSize / sizeof(int16_t), /* Enable ping-pong buffers */ 52 flags); 53 54 mInitCheck = mRecord->initCheck(); 55} 56 57AudioSource::~AudioSource() { 58 if (mStarted) { 59 stop(); 60 } 61 62 delete mRecord; 63 mRecord = NULL; 64} 65 66status_t AudioSource::initCheck() const { 67 return mInitCheck; 68} 69 70status_t AudioSource::start(MetaData *params) { 71 if (mStarted) { 72 return UNKNOWN_ERROR; 73 } 74 75 if (mInitCheck != OK) { 76 return NO_INIT; 77 } 78 79 char value[PROPERTY_VALUE_MAX]; 80 if (property_get("media.stagefright.record-stats", value, NULL) 81 && (!strcmp(value, "1") || !strcasecmp(value, "true"))) { 82 mCollectStats = true; 83 } 84 85 mTrackMaxAmplitude = false; 86 mMaxAmplitude = 0; 87 mInitialReadTimeUs = 0; 88 mStartTimeUs = 0; 89 int64_t startTimeUs; 90 if (params && params->findInt64(kKeyTime, &startTimeUs)) { 91 mStartTimeUs = startTimeUs; 92 } 93 status_t err = mRecord->start(); 94 95 if (err == OK) { 96 mGroup = new MediaBufferGroup; 97 mGroup->add_buffer(new MediaBuffer(kMaxBufferSize)); 98 99 mStarted = true; 100 } 101 102 return err; 103} 104 105status_t AudioSource::stop() { 106 if (!mStarted) { 107 return UNKNOWN_ERROR; 108 } 109 110 if (mInitCheck != OK) { 111 return NO_INIT; 112 } 113 114 mRecord->stop(); 115 116 delete mGroup; 117 mGroup = NULL; 118 119 mStarted = false; 120 121 if (mCollectStats) { 122 LOGI("Total lost audio frames: %lld", 123 mTotalLostFrames + (mPrevLostBytes >> 1)); 124 } 125 126 return OK; 127} 128 129sp<MetaData> AudioSource::getFormat() { 130 if (mInitCheck != OK) { 131 return 0; 132 } 133 134 sp<MetaData> meta = new MetaData; 135 meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW); 136 meta->setInt32(kKeySampleRate, mRecord->getSampleRate()); 137 meta->setInt32(kKeyChannelCount, mRecord->channelCount()); 138 meta->setInt32(kKeyMaxInputSize, kMaxBufferSize); 139 140 return meta; 141} 142 143void AudioSource::rampVolume( 144 int32_t startFrame, int32_t rampDurationFrames, 145 uint8_t *data, size_t bytes) { 146 147 const int32_t kShift = 14; 148 int32_t fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 149 const int32_t nChannels = mRecord->channelCount(); 150 int32_t stopFrame = startFrame + bytes / sizeof(int16_t); 151 int16_t *frame = (int16_t *) data; 152 if (stopFrame > rampDurationFrames) { 153 stopFrame = rampDurationFrames; 154 } 155 156 while (startFrame < stopFrame) { 157 if (nChannels == 1) { // mono 158 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 159 ++frame; 160 ++startFrame; 161 } else { // stereo 162 frame[0] = (frame[0] * fixedMultiplier) >> kShift; 163 frame[1] = (frame[1] * fixedMultiplier) >> kShift; 164 frame += 2; 165 startFrame += 2; 166 } 167 168 // Update the multiplier every 4 frames 169 if ((startFrame & 3) == 0) { 170 fixedMultiplier = (startFrame << kShift) / rampDurationFrames; 171 } 172 } 173} 174 175status_t AudioSource::read( 176 MediaBuffer **out, const ReadOptions *options) { 177 178 if (mInitCheck != OK) { 179 return NO_INIT; 180 } 181 182 int64_t readTimeUs = systemTime() / 1000; 183 *out = NULL; 184 185 MediaBuffer *buffer; 186 CHECK_EQ(mGroup->acquire_buffer(&buffer), OK); 187 188 int err = 0; 189 if (mStarted) { 190 191 uint32_t numFramesRecorded; 192 mRecord->getPosition(&numFramesRecorded); 193 194 195 if (numFramesRecorded == 0 && mPrevSampleTimeUs == 0) { 196 mInitialReadTimeUs = readTimeUs; 197 // Initial delay 198 if (mStartTimeUs > 0) { 199 mStartTimeUs = readTimeUs - mStartTimeUs; 200 } else { 201 // Assume latency is constant. 202 mStartTimeUs += mRecord->latency() * 1000; 203 } 204 mPrevSampleTimeUs = mStartTimeUs; 205 } 206 207 uint32_t sampleRate = mRecord->getSampleRate(); 208 209 // Insert null frames when lost frames are detected. 210 int64_t timestampUs = mPrevSampleTimeUs; 211 uint32_t numLostBytes = mRecord->getInputFramesLost() << 1; 212 numLostBytes += mPrevLostBytes; 213#if 0 214 // Simulate lost frames 215 numLostBytes = ((rand() * 1.0 / RAND_MAX)) * 2 * kMaxBufferSize; 216 numLostBytes &= 0xFFFFFFFE; // Alignment requirement 217 218 // Reduce the chance to lose 219 if (rand() * 1.0 / RAND_MAX >= 0.05) { 220 numLostBytes = 0; 221 } 222#endif 223 if (numLostBytes > 0) { 224 if (numLostBytes > kMaxBufferSize) { 225 mPrevLostBytes = numLostBytes - kMaxBufferSize; 226 numLostBytes = kMaxBufferSize; 227 } else { 228 mPrevLostBytes = 0; 229 } 230 231 CHECK_EQ(numLostBytes & 1, 0); 232 timestampUs += ((1000000LL * (numLostBytes >> 1)) + 233 (sampleRate >> 1)) / sampleRate; 234 235 CHECK(timestampUs > mPrevSampleTimeUs); 236 if (mCollectStats) { 237 mTotalLostFrames += (numLostBytes >> 1); 238 } 239 memset(buffer->data(), 0, numLostBytes); 240 buffer->set_range(0, numLostBytes); 241 if (numFramesRecorded == 0) { 242 buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs); 243 } 244 buffer->meta_data()->setInt64(kKeyTime, mStartTimeUs + mPrevSampleTimeUs); 245 buffer->meta_data()->setInt64(kKeyDriftTime, readTimeUs - mInitialReadTimeUs); 246 mPrevSampleTimeUs = timestampUs; 247 *out = buffer; 248 return OK; 249 } 250 251 ssize_t n = mRecord->read(buffer->data(), buffer->size()); 252 if (n < 0) { 253 buffer->release(); 254 return (status_t)n; 255 } 256 257 int64_t recordDurationUs = (1000000LL * n >> 1) / sampleRate; 258 timestampUs += recordDurationUs; 259 260 if (mPrevSampleTimeUs - mStartTimeUs < kAutoRampStartUs) { 261 // Mute the initial video recording signal 262 memset((uint8_t *) buffer->data(), 0, n); 263 } else if (mPrevSampleTimeUs - mStartTimeUs < kAutoRampStartUs + kAutoRampDurationUs) { 264 int32_t autoRampDurationFrames = 265 (kAutoRampDurationUs * sampleRate + 500000LL) / 1000000LL; 266 267 int32_t autoRampStartFrames = 268 (kAutoRampStartUs * sampleRate + 500000LL) / 1000000LL; 269 270 int32_t nFrames = numFramesRecorded - autoRampStartFrames; 271 rampVolume(nFrames, autoRampDurationFrames, (uint8_t *) buffer->data(), n); 272 } 273 if (mTrackMaxAmplitude) { 274 trackMaxAmplitude((int16_t *) buffer->data(), n >> 1); 275 } 276 277 if (numFramesRecorded == 0) { 278 buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs); 279 } 280 281 buffer->meta_data()->setInt64(kKeyTime, mStartTimeUs + mPrevSampleTimeUs); 282 buffer->meta_data()->setInt64(kKeyDriftTime, readTimeUs - mInitialReadTimeUs); 283 CHECK(timestampUs > mPrevSampleTimeUs); 284 mPrevSampleTimeUs = timestampUs; 285 LOGV("initial delay: %lld, sample rate: %d, timestamp: %lld", 286 mStartTimeUs, sampleRate, timestampUs); 287 288 buffer->set_range(0, n); 289 290 *out = buffer; 291 return OK; 292 } 293 294 return OK; 295} 296 297void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) { 298 for (int i = nSamples; i > 0; --i) { 299 int16_t value = *data++; 300 if (value < 0) { 301 value = -value; 302 } 303 if (mMaxAmplitude < value) { 304 mMaxAmplitude = value; 305 } 306 } 307} 308 309int16_t AudioSource::getMaxAmplitude() { 310 // First call activates the tracking. 311 if (!mTrackMaxAmplitude) { 312 mTrackMaxAmplitude = true; 313 } 314 int16_t value = mMaxAmplitude; 315 mMaxAmplitude = 0; 316 LOGV("max amplitude since last call: %d", value); 317 return value; 318} 319 320} // namespace android 321