AudioSource.cpp revision d707fcb3e29707ca4a5935c294ef0b38eb5aba5f
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "AudioSource"
19#include <utils/Log.h>
20
21#include <media/stagefright/AudioSource.h>
22
23#include <media/AudioRecord.h>
24#include <media/stagefright/MediaBufferGroup.h>
25#include <media/stagefright/MediaDebug.h>
26#include <media/stagefright/MediaDefs.h>
27#include <media/stagefright/MetaData.h>
28#include <cutils/properties.h>
29#include <stdlib.h>
30
31namespace android {
32
33AudioSource::AudioSource(
34        int inputSource, uint32_t sampleRate, uint32_t channels)
35    : mStarted(false),
36      mCollectStats(false),
37      mPrevSampleTimeUs(0),
38      mTotalLostFrames(0),
39      mPrevLostBytes(0),
40      mGroup(NULL) {
41
42    LOGV("sampleRate: %d, channels: %d", sampleRate, channels);
43    CHECK(channels == 1 || channels == 2);
44    uint32_t flags = AudioRecord::RECORD_AGC_ENABLE |
45                     AudioRecord::RECORD_NS_ENABLE  |
46                     AudioRecord::RECORD_IIR_ENABLE;
47
48    mRecord = new AudioRecord(
49                inputSource, sampleRate, AudioSystem::PCM_16_BIT,
50                channels > 1? AudioSystem::CHANNEL_IN_STEREO: AudioSystem::CHANNEL_IN_MONO,
51                4 * kMaxBufferSize / sizeof(int16_t), /* Enable ping-pong buffers */
52                flags);
53
54    mInitCheck = mRecord->initCheck();
55}
56
57AudioSource::~AudioSource() {
58    if (mStarted) {
59        stop();
60    }
61
62    delete mRecord;
63    mRecord = NULL;
64}
65
66status_t AudioSource::initCheck() const {
67    return mInitCheck;
68}
69
70status_t AudioSource::start(MetaData *params) {
71    if (mStarted) {
72        return UNKNOWN_ERROR;
73    }
74
75    if (mInitCheck != OK) {
76        return NO_INIT;
77    }
78
79    char value[PROPERTY_VALUE_MAX];
80    if (property_get("media.stagefright.record-stats", value, NULL)
81        && (!strcmp(value, "1") || !strcasecmp(value, "true"))) {
82        mCollectStats = true;
83    }
84
85    mTrackMaxAmplitude = false;
86    mMaxAmplitude = 0;
87    mInitialReadTimeUs = 0;
88    mStartTimeUs = 0;
89    int64_t startTimeUs;
90    if (params && params->findInt64(kKeyTime, &startTimeUs)) {
91        mStartTimeUs = startTimeUs;
92    }
93    status_t err = mRecord->start();
94
95    if (err == OK) {
96        mGroup = new MediaBufferGroup;
97        mGroup->add_buffer(new MediaBuffer(kMaxBufferSize));
98
99        mStarted = true;
100    }
101
102    return err;
103}
104
105status_t AudioSource::stop() {
106    if (!mStarted) {
107        return UNKNOWN_ERROR;
108    }
109
110    if (mInitCheck != OK) {
111        return NO_INIT;
112    }
113
114    mRecord->stop();
115
116    delete mGroup;
117    mGroup = NULL;
118
119    mStarted = false;
120
121    if (mCollectStats) {
122        LOGI("Total lost audio frames: %lld",
123            mTotalLostFrames + (mPrevLostBytes >> 1));
124    }
125
126    return OK;
127}
128
129sp<MetaData> AudioSource::getFormat() {
130    if (mInitCheck != OK) {
131        return 0;
132    }
133
134    sp<MetaData> meta = new MetaData;
135    meta->setCString(kKeyMIMEType, MEDIA_MIMETYPE_AUDIO_RAW);
136    meta->setInt32(kKeySampleRate, mRecord->getSampleRate());
137    meta->setInt32(kKeyChannelCount, mRecord->channelCount());
138    meta->setInt32(kKeyMaxInputSize, kMaxBufferSize);
139
140    return meta;
141}
142
143/*
144 * Returns -1 if frame skipping request is too long.
145 * Returns  0 if there is no need to skip frames.
146 * Returns  1 if we need to skip frames.
147 */
148static int skipFrame(int64_t timestampUs,
149        const MediaSource::ReadOptions *options) {
150
151    int64_t skipFrameUs;
152    if (!options || !options->getSkipFrame(&skipFrameUs)) {
153        return 0;
154    }
155
156    if (skipFrameUs <= timestampUs) {
157        return 0;
158    }
159
160    // Safe guard against the abuse of the kSkipFrame_Option.
161    if (skipFrameUs - timestampUs >= 1E6) {
162        LOGE("Frame skipping requested is way too long: %lld us",
163            skipFrameUs - timestampUs);
164
165        return -1;
166    }
167
168    LOGV("skipFrame: %lld us > timestamp: %lld us",
169        skipFrameUs, timestampUs);
170
171    return 1;
172
173}
174
175void AudioSource::rampVolume(
176        int32_t startFrame, int32_t rampDurationFrames,
177        uint8_t *data,   size_t bytes) {
178
179    const int32_t kShift = 14;
180    int32_t fixedMultiplier = (startFrame << kShift) / rampDurationFrames;
181    const int32_t nChannels = mRecord->channelCount();
182    int32_t stopFrame = startFrame + bytes / sizeof(int16_t);
183    int16_t *frame = (int16_t *) data;
184    if (stopFrame > rampDurationFrames) {
185        stopFrame = rampDurationFrames;
186    }
187
188    while (startFrame < stopFrame) {
189        if (nChannels == 1) {  // mono
190            frame[0] = (frame[0] * fixedMultiplier) >> kShift;
191            ++frame;
192            ++startFrame;
193        } else {               // stereo
194            frame[0] = (frame[0] * fixedMultiplier) >> kShift;
195            frame[1] = (frame[1] * fixedMultiplier) >> kShift;
196            frame += 2;
197            startFrame += 2;
198        }
199
200        // Update the multiplier every 4 frames
201        if ((startFrame & 3) == 0) {
202            fixedMultiplier = (startFrame << kShift) / rampDurationFrames;
203        }
204    }
205}
206
207status_t AudioSource::read(
208        MediaBuffer **out, const ReadOptions *options) {
209
210    if (mInitCheck != OK) {
211        return NO_INIT;
212    }
213
214    int64_t readTimeUs = systemTime() / 1000;
215    *out = NULL;
216
217    MediaBuffer *buffer;
218    CHECK_EQ(mGroup->acquire_buffer(&buffer), OK);
219
220    int err = 0;
221    while (mStarted) {
222
223        uint32_t numFramesRecorded;
224        mRecord->getPosition(&numFramesRecorded);
225
226
227        if (numFramesRecorded == 0 && mPrevSampleTimeUs == 0) {
228            mInitialReadTimeUs = readTimeUs;
229            // Initial delay
230            if (mStartTimeUs > 0) {
231                mStartTimeUs = readTimeUs - mStartTimeUs;
232            } else {
233                // Assume latency is constant.
234                mStartTimeUs += mRecord->latency() * 1000;
235            }
236            mPrevSampleTimeUs = mStartTimeUs;
237        }
238
239        uint32_t sampleRate = mRecord->getSampleRate();
240
241        // Insert null frames when lost frames are detected.
242        int64_t timestampUs = mPrevSampleTimeUs;
243        uint32_t numLostBytes = mRecord->getInputFramesLost() << 1;
244        numLostBytes += mPrevLostBytes;
245#if 0
246        // Simulate lost frames
247        numLostBytes = ((rand() * 1.0 / RAND_MAX)) * 2 * kMaxBufferSize;
248        numLostBytes &= 0xFFFFFFFE; // Alignment requirement
249
250        // Reduce the chance to lose
251        if (rand() * 1.0 / RAND_MAX >= 0.05) {
252            numLostBytes = 0;
253        }
254#endif
255        if (numLostBytes > 0) {
256            if (numLostBytes > kMaxBufferSize) {
257                mPrevLostBytes = numLostBytes - kMaxBufferSize;
258                numLostBytes = kMaxBufferSize;
259            }
260
261            CHECK_EQ(numLostBytes & 1, 0);
262            timestampUs += ((1000000LL * (numLostBytes >> 1)) +
263                    (sampleRate >> 1)) / sampleRate;
264
265            CHECK(timestampUs > mPrevSampleTimeUs);
266            if (mCollectStats) {
267                mTotalLostFrames += (numLostBytes >> 1);
268            }
269            if ((err = skipFrame(timestampUs, options)) == -1) {
270                buffer->release();
271                return UNKNOWN_ERROR;
272            } else if (err != 0) {
273                continue;
274            }
275            memset(buffer->data(), 0, numLostBytes);
276            buffer->set_range(0, numLostBytes);
277            if (numFramesRecorded == 0) {
278                buffer->meta_data()->setInt64(kKeyTime, mStartTimeUs);
279            }
280            buffer->meta_data()->setInt64(kKeyDriftTime, readTimeUs - mInitialReadTimeUs);
281            mPrevSampleTimeUs = timestampUs;
282            *out = buffer;
283            return OK;
284        }
285
286        ssize_t n = mRecord->read(buffer->data(), buffer->size());
287        if (n < 0) {
288            buffer->release();
289            return (status_t)n;
290        }
291
292        int64_t recordDurationUs = (1000000LL * n >> 1) / sampleRate;
293        timestampUs += recordDurationUs;
294        if ((err = skipFrame(timestampUs, options)) == -1) {
295            buffer->release();
296            return UNKNOWN_ERROR;
297        } else if (err != 0) {
298            continue;
299        }
300
301        if (mPrevSampleTimeUs - mStartTimeUs < kAutoRampStartUs) {
302            // Mute the initial video recording signal
303            memset((uint8_t *) buffer->data(), 0, n);
304        } else if (mPrevSampleTimeUs - mStartTimeUs < kAutoRampStartUs + kAutoRampDurationUs) {
305            int32_t autoRampDurationFrames =
306                    (kAutoRampDurationUs * sampleRate + 500000LL) / 1000000LL;
307
308            int32_t autoRampStartFrames =
309                    (kAutoRampStartUs * sampleRate + 500000LL) / 1000000LL;
310
311            int32_t nFrames = numFramesRecorded - autoRampStartFrames;
312            rampVolume(nFrames, autoRampDurationFrames, (uint8_t *) buffer->data(), n);
313        }
314        if (mTrackMaxAmplitude) {
315            trackMaxAmplitude((int16_t *) buffer->data(), n >> 1);
316        }
317
318        if (numFramesRecorded == 0) {
319            buffer->meta_data()->setInt64(kKeyTime, mStartTimeUs);
320        }
321        buffer->meta_data()->setInt64(kKeyDriftTime, readTimeUs - mInitialReadTimeUs);
322        CHECK(timestampUs > mPrevSampleTimeUs);
323        mPrevSampleTimeUs = timestampUs;
324        LOGV("initial delay: %lld, sample rate: %d, timestamp: %lld",
325                mStartTimeUs, sampleRate, timestampUs);
326
327        buffer->set_range(0, n);
328
329        *out = buffer;
330        return OK;
331    }
332
333    return OK;
334}
335
336void AudioSource::trackMaxAmplitude(int16_t *data, int nSamples) {
337    for (int i = nSamples; i > 0; --i) {
338        int16_t value = *data++;
339        if (value < 0) {
340            value = -value;
341        }
342        if (mMaxAmplitude < value) {
343            mMaxAmplitude = value;
344        }
345    }
346}
347
348int16_t AudioSource::getMaxAmplitude() {
349    // First call activates the tracking.
350    if (!mTrackMaxAmplitude) {
351        mTrackMaxAmplitude = true;
352    }
353    int16_t value = mMaxAmplitude;
354    mMaxAmplitude = 0;
355    LOGV("max amplitude since last call: %d", value);
356    return value;
357}
358
359}  // namespace android
360