AudioSource.cpp revision 082830f92373a1b9e512dbbfb940187ffa1c2c6f
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "AudioSource"
19#include <utils/Log.h>
20
21#include <media/AudioRecord.h>
22#include <media/stagefright/AudioSource.h>
23#include <media/stagefright/MediaBuffer.h>
24#include <media/stagefright/MediaDefs.h>
25#include <media/stagefright/MetaData.h>
26#include <media/stagefright/foundation/ADebug.h>
27#include <media/stagefright/foundation/ALooper.h>
28#include <cutils/properties.h>
29#include <stdlib.h>
30
31namespace android {
32
33static void AudioRecordCallbackFunction(int event, void *user, void *info) {
34    AudioSource *source = (AudioSource *) user;
35    switch (event) {
36        case AudioRecord::EVENT_MORE_DATA: {
37            source->dataCallback(*((AudioRecord::Buffer *) info));
38            break;
39        }
40        case AudioRecord::EVENT_OVERRUN: {
41            ALOGW("AudioRecord reported overrun!");
42            break;
43        }
44        default:
45            // does nothing
46            break;
47    }
48}
49
50AudioSource::AudioSource(
51        audio_source_t inputSource, uint32_t sampleRate, uint32_t channelCount)
52    : mRecord(NULL),
53      mStarted(false),
54      mSampleRate(sampleRate),
55      mPrevSampleTimeUs(0),
56      mNumFramesReceived(0),
57      mNumClientOwnedBuffers(0),
58      mUseLooperTime(false) {
59
60    ALOGV("sampleRate: %d, channelCount: %d", sampleRate, channelCount);
61    CHECK(channelCount == 1 || channelCount == 2);
62
63    int minFrameCount;
64    status_t status = AudioRecord::getMinFrameCount(&minFrameCount,
65                                           sampleRate,
66                                           AUDIO_FORMAT_PCM_16_BIT,
67                                           audio_channel_in_mask_from_count(channelCount));
68    if (status == OK) {
69        // make sure that the AudioRecord callback never returns more than the maximum
70        // buffer size
71        int frameCount = kMaxBufferSize / sizeof(int16_t) / channelCount;
72
73        // make sure that the AudioRecord total buffer size is large enough
74        int bufCount = 2;
75        while ((bufCount * frameCount) < minFrameCount) {
76            bufCount++;
77        }
78
79        mRecord = new AudioRecord(
80                    inputSource, sampleRate, AUDIO_FORMAT_PCM_16_BIT,
81                    audio_channel_in_mask_from_count(channelCount),
82                    bufCount * frameCount,
83                    AudioRecordCallbackFunction,
84                    this,
85                    frameCount);
86        mInitCheck = mRecord->initCheck();
87    } else {
88        mInitCheck = status;
89    }
90}
91
92AudioSource::~AudioSource() {
93    if (mStarted) {
94        reset();
95    }
96
97    delete mRecord;
98    mRecord = NULL;
99}
100
101status_t AudioSource::initCheck() const {
102    return mInitCheck;
103}
104
105void AudioSource::setUseLooperTime(bool useLooperTime) {
106    CHECK(!mStarted);
107
108    mUseLooperTime = useLooperTime;
109}
110
111status_t AudioSource::start(MetaData *params) {
112    Mutex::Autolock autoLock(mLock);
113    if (mStarted) {
114        return UNKNOWN_ERROR;
115    }
116
117    if (mInitCheck != OK) {
118        return NO_INIT;
119    }
120
121    mTrackMaxAmplitude = false;
122    mMaxAmplitude = 0;
123    mInitialReadTimeUs = 0;
124    mStartTimeUs = 0;
125    int64_t startTimeUs;
126    if (params && params->findInt64(kKeyTime, &startTimeUs)) {
127        mStartTimeUs = startTimeUs;
128    }
129    status_t err = mRecord->start();
130    if (err == OK) {
131        mStarted = true;
132    } else {
133        delete mRecord;
134        mRecord = NULL;
135    }
136
137
138    return err;
139}
140
141void AudioSource::releaseQueuedFrames_l() {
142    ALOGV("releaseQueuedFrames_l");
143    List<MediaBuffer *>::iterator it;
144    while (!mBuffersReceived.empty()) {
145        it = mBuffersReceived.begin();
146        (*it)->release();
147        mBuffersReceived.erase(it);
148    }
149}
150
151void AudioSource::waitOutstandingEncodingFrames_l() {
152    ALOGV("waitOutstandingEncodingFrames_l: %lld", mNumClientOwnedBuffers);
153    while (mNumClientOwnedBuffers > 0) {
154        mFrameEncodingCompletionCondition.wait(mLock);
155    }
156}
157
158status_t AudioSource::reset() {
159    Mutex::Autolock autoLock(mLock);
160    if (!mStarted) {
161        return UNKNOWN_ERROR;
162    }
163
164    if (mInitCheck != OK) {
165        return NO_INIT;
166    }
167
168    mStarted = false;
169    mRecord->stop();
170    waitOutstandingEncodingFrames_l();
171    releaseQueuedFrames_l();
172
173    return OK;
174}
175
176sp<MetaData> AudioSource::getFormat() {
177    Mutex::Autolock autoLock(mLock);
178    if (mInitCheck != OK) {
179        return 0;
180    }
181
182    sp<MetaData> meta = new MetaData;
183    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
184    meta->setInt32(kKeySampleRate, mSampleRate);
185    meta->setInt32(kKeyChannelCount, mRecord->channelCount());
186    meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
187
188    return meta;
189}
190
191void AudioSource::rampVolume(
192        int32_t startFrame, int32_t rampDurationFrames,
193        uint8_t *data,   size_t bytes) {
194
195    const int32_t kShift = 14;
196    int32_t fixedMultiplier = (startFrame << kShift) / rampDurationFrames;
197    const int32_t nChannels = mRecord->channelCount();
198    int32_t stopFrame = startFrame + bytes / sizeof(int16_t);
199    int16_t *frame = (int16_t *) data;
200    if (stopFrame > rampDurationFrames) {
201        stopFrame = rampDurationFrames;
202    }
203
204    while (startFrame < stopFrame) {
205        if (nChannels == 1) {  // mono
206            frame[0] = (frame[0] * fixedMultiplier) >> kShift;
207            ++frame;
208            ++startFrame;
209        } else {               // stereo
210            frame[0] = (frame[0] * fixedMultiplier) >> kShift;
211            frame[1] = (frame[1] * fixedMultiplier) >> kShift;
212            frame += 2;
213            startFrame += 2;
214        }
215
216        // Update the multiplier every 4 frames
217        if ((startFrame & 3) == 0) {
218            fixedMultiplier = (startFrame << kShift) / rampDurationFrames;
219        }
220    }
221}
222
223status_t AudioSource::read(
224        MediaBuffer **out, const ReadOptions *options) {
225    Mutex::Autolock autoLock(mLock);
226    *out = NULL;
227
228    if (mInitCheck != OK) {
229        return NO_INIT;
230    }
231
232    while (mStarted && mBuffersReceived.empty()) {
233        mFrameAvailableCondition.wait(mLock);
234    }
235    if (!mStarted) {
236        return OK;
237    }
238    MediaBuffer *buffer = *mBuffersReceived.begin();
239    mBuffersReceived.erase(mBuffersReceived.begin());
240    ++mNumClientOwnedBuffers;
241    buffer->setObserver(this);
242    buffer->add_ref();
243
244    // Mute/suppress the recording sound
245    int64_t timeUs;
246    CHECK(buffer->meta_data()->findInt64(kKeyTime, &timeUs));
247    int64_t elapsedTimeUs = timeUs - mStartTimeUs;
248    if (elapsedTimeUs < kAutoRampStartUs) {
249        memset((uint8_t *) buffer->data(), 0, buffer->range_length());
250    } else if (elapsedTimeUs < kAutoRampStartUs + kAutoRampDurationUs) {
251        int32_t autoRampDurationFrames =
252                    (kAutoRampDurationUs * mSampleRate + 500000LL) / 1000000LL;
253
254        int32_t autoRampStartFrames =
255                    (kAutoRampStartUs * mSampleRate + 500000LL) / 1000000LL;
256
257        int32_t nFrames = mNumFramesReceived - autoRampStartFrames;
258        rampVolume(nFrames, autoRampDurationFrames,
259                (uint8_t *) buffer->data(), buffer->range_length());
260    }
261
262    // Track the max recording signal amplitude.
263    if (mTrackMaxAmplitude) {
264        trackMaxAmplitude(
265            (int16_t *) buffer->data(), buffer->range_length() >> 1);
266    }
267
268    *out = buffer;
269    return OK;
270}
271
272void AudioSource::signalBufferReturned(MediaBuffer *buffer) {
273    ALOGV("signalBufferReturned: %p", buffer->data());
274    Mutex::Autolock autoLock(mLock);
275    --mNumClientOwnedBuffers;
276    buffer->setObserver(0);
277    buffer->release();
278    mFrameEncodingCompletionCondition.signal();
279    return;
280}
281
282status_t AudioSource::dataCallback(const AudioRecord::Buffer& audioBuffer) {
283    int64_t timeUs =
284        mUseLooperTime ? ALooper::GetNowUs() : (systemTime() / 1000ll);
285
286    ALOGV("dataCallbackTimestamp: %lld us", timeUs);
287    Mutex::Autolock autoLock(mLock);
288    if (!mStarted) {
289        ALOGW("Spurious callback from AudioRecord. Drop the audio data.");
290        return OK;
291    }
292
293    // Drop retrieved and previously lost audio data.
294    if (mNumFramesReceived == 0 && timeUs < mStartTimeUs) {
295        mRecord->getInputFramesLost();
296        ALOGV("Drop audio data at %lld/%lld us", timeUs, mStartTimeUs);
297        return OK;
298    }
299
300    if (mNumFramesReceived == 0 && mPrevSampleTimeUs == 0) {
301        mInitialReadTimeUs = timeUs;
302        // Initial delay
303        if (mUseLooperTime) {
304            mStartTimeUs = timeUs;
305        } else if (mStartTimeUs > 0) {
306            mStartTimeUs = timeUs - mStartTimeUs;
307        } else {
308            // Assume latency is constant.
309            mStartTimeUs += mRecord->latency() * 1000;
310        }
311
312        mPrevSampleTimeUs = mStartTimeUs;
313    }
314
315    size_t numLostBytes = 0;
316    if (mNumFramesReceived > 0) {  // Ignore earlier frame lost
317        // getInputFramesLost() returns the number of lost frames.
318        // Convert number of frames lost to number of bytes lost.
319        numLostBytes = mRecord->getInputFramesLost() * mRecord->frameSize();
320    }
321
322    CHECK_EQ(numLostBytes & 1, 0u);
323    CHECK_EQ(audioBuffer.size & 1, 0u);
324    if (numLostBytes > 0) {
325        // Loss of audio frames should happen rarely; thus the LOGW should
326        // not cause a logging spam
327        ALOGW("Lost audio record data: %d bytes", numLostBytes);
328    }
329
330    while (numLostBytes > 0) {
331        size_t bufferSize = numLostBytes;
332        if (numLostBytes > kMaxBufferSize) {
333            numLostBytes -= kMaxBufferSize;
334            bufferSize = kMaxBufferSize;
335        } else {
336            numLostBytes = 0;
337        }
338        MediaBuffer *lostAudioBuffer = new MediaBuffer(bufferSize);
339        memset(lostAudioBuffer->data(), 0, bufferSize);
340        lostAudioBuffer->set_range(0, bufferSize);
341        queueInputBuffer_l(lostAudioBuffer, timeUs);
342    }
343
344    if (audioBuffer.size == 0) {
345        ALOGW("Nothing is available from AudioRecord callback buffer");
346        return OK;
347    }
348
349    const size_t bufferSize = audioBuffer.size;
350    MediaBuffer *buffer = new MediaBuffer(bufferSize);
351    memcpy((uint8_t *) buffer->data(),
352            audioBuffer.i16, audioBuffer.size);
353    buffer->set_range(0, bufferSize);
354    queueInputBuffer_l(buffer, timeUs);
355    return OK;
356}
357
358void AudioSource::queueInputBuffer_l(MediaBuffer *buffer, int64_t timeUs) {
359    const size_t bufferSize = buffer->range_length();
360    const size_t frameSize = mRecord->frameSize();
361    const int64_t timestampUs =
362                mPrevSampleTimeUs +
363                    ((1000000LL * (bufferSize / frameSize)) +
364                        (mSampleRate >> 1)) / mSampleRate;
365
366    if (mNumFramesReceived == 0) {
367        buffer->meta_data()->setInt64(kKeyAnchorTime, mStartTimeUs);
368    }
369
370    buffer->meta_data()->setInt64(kKeyTime, mPrevSampleTimeUs);
371    buffer->meta_data()->setInt64(kKeyDriftTime, timeUs - mInitialReadTimeUs);
372    mPrevSampleTimeUs = timestampUs;
373    mNumFramesReceived += bufferSize / frameSize;
374    mBuffersReceived.push_back(buffer);
375    mFrameAvailableCondition.signal();
376}
377
378void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) {
379    for (int i = nSamples; i > 0; --i) {
380        int16_t value = *data++;
381        if (value < 0) {
382            value = -value;
383        }
384        if (mMaxAmplitude < value) {
385            mMaxAmplitude = value;
386        }
387    }
388}
389
390int16_t AudioSource::getMaxAmplitude() {
391    // First call activates the tracking.
392    if (!mTrackMaxAmplitude) {
393        mTrackMaxAmplitude = true;
394    }
395    int16_t value = mMaxAmplitude;
396    mMaxAmplitude = 0;
397    ALOGV("max amplitude since last call: %d", value);
398    return value;
399}
400
401}  // namespace android
402