AudioStreamInternalCapture.cpp revision bcc3674648bc6f554d89a2a5d7721ed41c53f83b
1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <algorithm>
22#include <aaudio/AAudio.h>
23
24#include "client/AudioStreamInternalCapture.h"
25#include "utility/AudioClock.h"
26
27#define ATRACE_TAG ATRACE_TAG_AUDIO
28#include <utils/Trace.h>
29
30using android::WrappingBuffer;
31
32using namespace aaudio;
33
34AudioStreamInternalCapture::AudioStreamInternalCapture(AAudioServiceInterface  &serviceInterface,
35                                                 bool inService)
36    : AudioStreamInternal(serviceInterface, inService) {
37
38}
39
40AudioStreamInternalCapture::~AudioStreamInternalCapture() {}
41
42void AudioStreamInternalCapture::advanceClientToMatchServerPosition() {
43    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
44    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
45
46    // Bump offset so caller does not see the retrograde motion in getFramesRead().
47    int64_t offset = readCounter - writeCounter;
48    mFramesOffsetFromService += offset;
49    ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
50          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
51
52    // Force readCounter to match writeCounter.
53    // This is because we cannot change the write counter in the hardware.
54    mAudioEndpoint.setDataReadCounter(writeCounter);
55}
56
57// Write the data, block if needed and timeoutMillis > 0
58aaudio_result_t AudioStreamInternalCapture::read(void *buffer, int32_t numFrames,
59                                               int64_t timeoutNanoseconds)
60{
61    return processData(buffer, numFrames, timeoutNanoseconds);
62}
63
64// Read as much data as we can without blocking.
65aaudio_result_t AudioStreamInternalCapture::processDataNow(void *buffer, int32_t numFrames,
66                                                  int64_t currentNanoTime, int64_t *wakeTimePtr) {
67    aaudio_result_t result = processCommands();
68    if (result != AAUDIO_OK) {
69        return result;
70    }
71
72    const char *traceName = "aaRdNow";
73    ATRACE_BEGIN(traceName);
74
75    if (mClockModel.isStarting()) {
76        // Still haven't got any timestamps from server.
77        // Keep waiting until we get some valid timestamps then start writing to the
78        // current buffer position.
79        ALOGD("processDataNow() wait for valid timestamps");
80        // Sleep very briefly and hope we get a timestamp soon.
81        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
82        ATRACE_END();
83        return 0;
84    }
85    // If we have gotten this far then we have at least one timestamp from server.
86
87    if (mAudioEndpoint.isFreeRunning()) {
88        //ALOGD("AudioStreamInternalCapture::processDataNow() - update remote counter");
89        // Update data queue based on the timing model.
90        int64_t estimatedRemoteCounter = mClockModel.convertTimeToPosition(currentNanoTime);
91        // TODO refactor, maybe use setRemoteCounter()
92        mAudioEndpoint.setDataWriteCounter(estimatedRemoteCounter);
93    }
94
95    // This code assumes that we have already received valid timestamps.
96    if (mNeedCatchUp.isRequested()) {
97        // Catch an MMAP pointer that is already advancing.
98        // This will avoid initial underruns caused by a slow cold start.
99        advanceClientToMatchServerPosition();
100        mNeedCatchUp.acknowledge();
101    }
102
103    // If the write index passed the read index then consider it an overrun.
104    if (mAudioEndpoint.getEmptyFramesAvailable() < 0) {
105        mXRunCount++;
106        if (ATRACE_ENABLED()) {
107            ATRACE_INT("aaOverRuns", mXRunCount);
108        }
109    }
110
111    // Read some data from the buffer.
112    //ALOGD("AudioStreamInternalCapture::processDataNow() - readNowWithConversion(%d)", numFrames);
113    int32_t framesProcessed = readNowWithConversion(buffer, numFrames);
114    //ALOGD("AudioStreamInternalCapture::processDataNow() - tried to read %d frames, read %d",
115    //    numFrames, framesProcessed);
116    if (ATRACE_ENABLED()) {
117        ATRACE_INT("aaRead", framesProcessed);
118    }
119
120    // Calculate an ideal time to wake up.
121    if (wakeTimePtr != nullptr && framesProcessed >= 0) {
122        // By default wake up a few milliseconds from now.  // TODO review
123        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
124        aaudio_stream_state_t state = getState();
125        //ALOGD("AudioStreamInternalCapture::processDataNow() - wakeTime based on %s",
126        //      AAudio_convertStreamStateToText(state));
127        switch (state) {
128            case AAUDIO_STREAM_STATE_OPEN:
129            case AAUDIO_STREAM_STATE_STARTING:
130                break;
131            case AAUDIO_STREAM_STATE_STARTED:
132            {
133                // When do we expect the next write burst to occur?
134
135                // Calculate frame position based off of the readCounter because
136                // the writeCounter might have just advanced in the background,
137                // causing us to sleep until a later burst.
138                int64_t nextPosition = mAudioEndpoint.getDataReadCounter() + mFramesPerBurst;
139                wakeTime = mClockModel.convertPositionToTime(nextPosition);
140            }
141                break;
142            default:
143                break;
144        }
145        *wakeTimePtr = wakeTime;
146
147    }
148
149    ATRACE_END();
150    return framesProcessed;
151}
152
153aaudio_result_t AudioStreamInternalCapture::readNowWithConversion(void *buffer,
154                                                                int32_t numFrames) {
155    // ALOGD("AudioStreamInternalCapture::readNowWithConversion(%p, %d)",
156    //              buffer, numFrames);
157    WrappingBuffer wrappingBuffer;
158    uint8_t *destination = (uint8_t *) buffer;
159    int32_t framesLeft = numFrames;
160
161    mAudioEndpoint.getFullFramesAvailable(&wrappingBuffer);
162
163    // Read data in one or two parts.
164    for (int partIndex = 0; framesLeft > 0 && partIndex < WrappingBuffer::SIZE; partIndex++) {
165        int32_t framesToProcess = framesLeft;
166        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
167        if (framesAvailable <= 0) break;
168
169        if (framesToProcess > framesAvailable) {
170            framesToProcess = framesAvailable;
171        }
172
173        int32_t numBytes = getBytesPerFrame() * framesToProcess;
174        int32_t numSamples = framesToProcess * getSamplesPerFrame();
175
176        // TODO factor this out into a utility function
177        if (mDeviceFormat == getFormat()) {
178            memcpy(destination, wrappingBuffer.data[partIndex], numBytes);
179        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16
180                   && getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
181            AAudioConvert_pcm16ToFloat(
182                    (const int16_t *) wrappingBuffer.data[partIndex],
183                    (float *) destination,
184                    numSamples,
185                    1.0f);
186        } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT
187                   && getFormat() == AAUDIO_FORMAT_PCM_I16) {
188            AAudioConvert_floatToPcm16(
189                    (const float *) wrappingBuffer.data[partIndex],
190                    (int16_t *) destination,
191                    numSamples,
192                    1.0f);
193        } else {
194            ALOGE("Format conversion not supported!");
195            return AAUDIO_ERROR_INVALID_FORMAT;
196        }
197        destination += numBytes;
198        framesLeft -= framesToProcess;
199    }
200
201    int32_t framesProcessed = numFrames - framesLeft;
202    mAudioEndpoint.advanceReadIndex(framesProcessed);
203
204    //ALOGD("AudioStreamInternalCapture::readNowWithConversion() returns %d", framesProcessed);
205    return framesProcessed;
206}
207
208int64_t AudioStreamInternalCapture::getFramesWritten() {
209    int64_t framesWrittenHardware;
210    if (isActive()) {
211        framesWrittenHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
212    } else {
213        framesWrittenHardware = mAudioEndpoint.getDataWriteCounter();
214    }
215    // Prevent retrograde motion.
216    mLastFramesWritten = std::max(mLastFramesWritten,
217                                  framesWrittenHardware + mFramesOffsetFromService);
218    //ALOGD("AudioStreamInternalCapture::getFramesWritten() returns %lld",
219    //      (long long)mLastFramesWritten);
220    return mLastFramesWritten;
221}
222
223int64_t AudioStreamInternalCapture::getFramesRead() {
224    int64_t frames = mAudioEndpoint.getDataReadCounter() + mFramesOffsetFromService;
225    //ALOGD("AudioStreamInternalCapture::getFramesRead() returns %lld", (long long)frames);
226    return frames;
227}
228
229// Read data from the stream and pass it to the callback for processing.
230void *AudioStreamInternalCapture::callbackLoop() {
231    aaudio_result_t result = AAUDIO_OK;
232    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
233    AAudioStream_dataCallback appCallback = getDataCallbackProc();
234    if (appCallback == nullptr) return NULL;
235
236    // result might be a frame count
237    while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
238
239        // Read audio data from stream.
240        int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
241
242        // This is a BLOCKING READ!
243        result = read(mCallbackBuffer, mCallbackFrames, timeoutNanos);
244        if ((result != mCallbackFrames)) {
245            ALOGE("AudioStreamInternalCapture(): callbackLoop: read() returned %d", result);
246            if (result >= 0) {
247                // Only read some of the frames requested. Must have timed out.
248                result = AAUDIO_ERROR_TIMEOUT;
249            }
250            AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
251            if (errorCallback != nullptr) {
252                (*errorCallback)(
253                        (AAudioStream *) this,
254                        getErrorCallbackUserData(),
255                        result);
256            }
257            break;
258        }
259
260        // Call application using the AAudio callback interface.
261        callbackResult = (*appCallback)(
262                (AAudioStream *) this,
263                getDataCallbackUserData(),
264                mCallbackBuffer,
265                mCallbackFrames);
266
267        if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
268            ALOGD("AudioStreamInternalCapture(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
269            break;
270        }
271    }
272
273    ALOGD("AudioStreamInternalCapture(): callbackLoop() exiting, result = %d, isActive() = %d",
274          result, (int) isActive());
275    return NULL;
276}
277