1/*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG (mInService ? "AAudioService" : "AAudio")
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#define ATRACE_TAG ATRACE_TAG_AUDIO
22
23#include <utils/Trace.h>
24
25#include "client/AudioStreamInternalPlay.h"
26#include "utility/AudioClock.h"
27
28using android::WrappingBuffer;
29
30using namespace aaudio;
31
32AudioStreamInternalPlay::AudioStreamInternalPlay(AAudioServiceInterface  &serviceInterface,
33                                                       bool inService)
34        : AudioStreamInternal(serviceInterface, inService) {
35
36}
37
38AudioStreamInternalPlay::~AudioStreamInternalPlay() {}
39
40
41aaudio_result_t AudioStreamInternalPlay::requestPauseInternal()
42{
43    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
44        ALOGE("AudioStreamInternal::requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
45              mServiceStreamHandle);
46        return AAUDIO_ERROR_INVALID_STATE;
47    }
48
49    mClockModel.stop(AudioClock::getNanoseconds());
50    setState(AAUDIO_STREAM_STATE_PAUSING);
51    mAtomicTimestamp.clear();
52    return mServiceInterface.pauseStream(mServiceStreamHandle);
53}
54
55aaudio_result_t AudioStreamInternalPlay::requestPause()
56{
57    aaudio_result_t result = stopCallback();
58    if (result != AAUDIO_OK) {
59        return result;
60    }
61    result = requestPauseInternal();
62    return result;
63}
64
65aaudio_result_t AudioStreamInternalPlay::requestFlush() {
66    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
67        ALOGE("AudioStreamInternal::requestFlush() mServiceStreamHandle invalid = 0x%08X",
68              mServiceStreamHandle);
69        return AAUDIO_ERROR_INVALID_STATE;
70    }
71
72    setState(AAUDIO_STREAM_STATE_FLUSHING);
73    return mServiceInterface.flushStream(mServiceStreamHandle);
74}
75
76void AudioStreamInternalPlay::advanceClientToMatchServerPosition() {
77    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
78    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
79
80    // Bump offset so caller does not see the retrograde motion in getFramesRead().
81    int64_t offset = writeCounter - readCounter;
82    mFramesOffsetFromService += offset;
83    ALOGD("advanceClientToMatchServerPosition() readN = %lld, writeN = %lld, offset = %lld",
84          (long long)readCounter, (long long)writeCounter, (long long)mFramesOffsetFromService);
85
86    // Force writeCounter to match readCounter.
87    // This is because we cannot change the read counter in the hardware.
88    mAudioEndpoint.setDataWriteCounter(readCounter);
89}
90
91void AudioStreamInternalPlay::onFlushFromServer() {
92    advanceClientToMatchServerPosition();
93}
94
95// Write the data, block if needed and timeoutMillis > 0
96aaudio_result_t AudioStreamInternalPlay::write(const void *buffer, int32_t numFrames,
97                                           int64_t timeoutNanoseconds)
98
99{
100    return processData((void *)buffer, numFrames, timeoutNanoseconds);
101}
102
103// Write as much data as we can without blocking.
104aaudio_result_t AudioStreamInternalPlay::processDataNow(void *buffer, int32_t numFrames,
105                                              int64_t currentNanoTime, int64_t *wakeTimePtr) {
106    aaudio_result_t result = processCommands();
107    if (result != AAUDIO_OK) {
108        return result;
109    }
110
111    const char *traceName = "aaWrNow";
112    ATRACE_BEGIN(traceName);
113
114    if (mClockModel.isStarting()) {
115        // Still haven't got any timestamps from server.
116        // Keep waiting until we get some valid timestamps then start writing to the
117        // current buffer position.
118        ALOGD("processDataNow() wait for valid timestamps");
119        // Sleep very briefly and hope we get a timestamp soon.
120        *wakeTimePtr = currentNanoTime + (2000 * AAUDIO_NANOS_PER_MICROSECOND);
121        ATRACE_END();
122        return 0;
123    }
124    // If we have gotten this far then we have at least one timestamp from server.
125
126    // If a DMA channel or DSP is reading the other end then we have to update the readCounter.
127    if (mAudioEndpoint.isFreeRunning()) {
128        // Update data queue based on the timing model.
129        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
130        // ALOGD("AudioStreamInternal::processDataNow() - estimatedReadCounter = %d", (int)estimatedReadCounter);
131        mAudioEndpoint.setDataReadCounter(estimatedReadCounter);
132    }
133
134    if (mNeedCatchUp.isRequested()) {
135        // Catch an MMAP pointer that is already advancing.
136        // This will avoid initial underruns caused by a slow cold start.
137        advanceClientToMatchServerPosition();
138        mNeedCatchUp.acknowledge();
139    }
140
141    // If the read index passed the write index then consider it an underrun.
142    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
143        mXRunCount++;
144        if (ATRACE_ENABLED()) {
145            ATRACE_INT("aaUnderRuns", mXRunCount);
146        }
147    }
148
149    // Write some data to the buffer.
150    //ALOGD("AudioStreamInternal::processDataNow() - writeNowWithConversion(%d)", numFrames);
151    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
152    //ALOGD("AudioStreamInternal::processDataNow() - tried to write %d frames, wrote %d",
153    //    numFrames, framesWritten);
154    if (ATRACE_ENABLED()) {
155        ATRACE_INT("aaWrote", framesWritten);
156    }
157
158    // Calculate an ideal time to wake up.
159    if (wakeTimePtr != nullptr && framesWritten >= 0) {
160        // By default wake up a few milliseconds from now.  // TODO review
161        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
162        aaudio_stream_state_t state = getState();
163        //ALOGD("AudioStreamInternal::processDataNow() - wakeTime based on %s",
164        //      AAudio_convertStreamStateToText(state));
165        switch (state) {
166            case AAUDIO_STREAM_STATE_OPEN:
167            case AAUDIO_STREAM_STATE_STARTING:
168                if (framesWritten != 0) {
169                    // Don't wait to write more data. Just prime the buffer.
170                    wakeTime = currentNanoTime;
171                }
172                break;
173            case AAUDIO_STREAM_STATE_STARTED:
174            {
175                // When do we expect the next read burst to occur?
176
177                // Calculate frame position based off of the writeCounter because
178                // the readCounter might have just advanced in the background,
179                // causing us to sleep until a later burst.
180                int64_t nextPosition = mAudioEndpoint.getDataWriteCounter() + mFramesPerBurst
181                        - mAudioEndpoint.getBufferSizeInFrames();
182                wakeTime = mClockModel.convertPositionToTime(nextPosition);
183            }
184                break;
185            default:
186                break;
187        }
188        *wakeTimePtr = wakeTime;
189
190    }
191
192    ATRACE_END();
193    return framesWritten;
194}
195
196
197aaudio_result_t AudioStreamInternalPlay::writeNowWithConversion(const void *buffer,
198                                                            int32_t numFrames) {
199    // ALOGD("AudioStreamInternal::writeNowWithConversion(%p, %d)",
200    //              buffer, numFrames);
201    WrappingBuffer wrappingBuffer;
202    uint8_t *source = (uint8_t *) buffer;
203    int32_t framesLeft = numFrames;
204
205    mAudioEndpoint.getEmptyFramesAvailable(&wrappingBuffer);
206
207    // Write data in one or two parts.
208    int partIndex = 0;
209    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
210        int32_t framesToWrite = framesLeft;
211        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
212        if (framesAvailable > 0) {
213            if (framesToWrite > framesAvailable) {
214                framesToWrite = framesAvailable;
215            }
216            int32_t numBytes = getBytesPerFrame() * framesToWrite;
217            int32_t numSamples = framesToWrite * getSamplesPerFrame();
218            // Data conversion.
219            float levelFrom;
220            float levelTo;
221            bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
222                                                   &levelFrom, &levelTo);
223            // The formats are validated when the stream is opened so we do not have to
224            // check for illegal combinations here.
225            // TODO factor this out into a utility function
226            if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
227                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
228                    AAudio_linearRamp(
229                            (const float *) source,
230                            (float *) wrappingBuffer.data[partIndex],
231                            framesToWrite,
232                            getSamplesPerFrame(),
233                            levelFrom,
234                            levelTo);
235                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
236                    if (ramping) {
237                        AAudioConvert_floatToPcm16(
238                                (const float *) source,
239                                (int16_t *) wrappingBuffer.data[partIndex],
240                                framesToWrite,
241                                getSamplesPerFrame(),
242                                levelFrom,
243                                levelTo);
244                    } else {
245                        AAudioConvert_floatToPcm16(
246                                (const float *) source,
247                                (int16_t *) wrappingBuffer.data[partIndex],
248                                numSamples,
249                                levelTo);
250                    }
251                }
252            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
253                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
254                    if (ramping) {
255                        AAudioConvert_pcm16ToFloat(
256                                (const int16_t *) source,
257                                (float *) wrappingBuffer.data[partIndex],
258                                framesToWrite,
259                                getSamplesPerFrame(),
260                                levelFrom,
261                                levelTo);
262                    } else {
263                        AAudioConvert_pcm16ToFloat(
264                                (const int16_t *) source,
265                                (float *) wrappingBuffer.data[partIndex],
266                                numSamples,
267                                levelTo);
268                    }
269                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
270                    AAudio_linearRamp(
271                            (const int16_t *) source,
272                            (int16_t *) wrappingBuffer.data[partIndex],
273                            framesToWrite,
274                            getSamplesPerFrame(),
275                            levelFrom,
276                            levelTo);
277                }
278            }
279            source += numBytes;
280            framesLeft -= framesToWrite;
281        } else {
282            break;
283        }
284        partIndex++;
285    }
286    int32_t framesWritten = numFrames - framesLeft;
287    mAudioEndpoint.advanceWriteIndex(framesWritten);
288
289    // ALOGD("AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
290    return framesWritten;
291}
292
293int64_t AudioStreamInternalPlay::getFramesRead()
294{
295    int64_t framesReadHardware;
296    if (isActive()) {
297        framesReadHardware = mClockModel.convertTimeToPosition(AudioClock::getNanoseconds());
298    } else {
299        framesReadHardware = mAudioEndpoint.getDataReadCounter();
300    }
301    int64_t framesRead = framesReadHardware + mFramesOffsetFromService;
302    // Prevent retrograde motion.
303    if (framesRead < mLastFramesRead) {
304        framesRead = mLastFramesRead;
305    } else {
306        mLastFramesRead = framesRead;
307    }
308    //ALOGD("AudioStreamInternalPlay::getFramesRead() returns %lld", (long long)framesRead);
309    return framesRead;
310}
311
312int64_t AudioStreamInternalPlay::getFramesWritten()
313{
314    int64_t framesWritten = mAudioEndpoint.getDataWriteCounter()
315                               + mFramesOffsetFromService;
316    //ALOGD("AudioStreamInternalPlay::getFramesWritten() returns %lld", (long long)framesWritten);
317    return framesWritten;
318}
319
320
321// Render audio in the application callback and then write the data to the stream.
322void *AudioStreamInternalPlay::callbackLoop() {
323    aaudio_result_t result = AAUDIO_OK;
324    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
325    AAudioStream_dataCallback appCallback = getDataCallbackProc();
326    if (appCallback == nullptr) return NULL;
327    int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
328
329    // result might be a frame count
330    while (mCallbackEnabled.load() && isActive() && (result >= 0)) {
331        // Call application using the AAudio callback interface.
332        callbackResult = (*appCallback)(
333                (AAudioStream *) this,
334                getDataCallbackUserData(),
335                mCallbackBuffer,
336                mCallbackFrames);
337
338        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
339            // Write audio data to stream. This is a BLOCKING WRITE!
340            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
341            if ((result != mCallbackFrames)) {
342                ALOGE("AudioStreamInternalPlay(): callbackLoop: write() returned %d", result);
343                if (result >= 0) {
344                    // Only wrote some of the frames requested. Must have timed out.
345                    result = AAUDIO_ERROR_TIMEOUT;
346                }
347                AAudioStream_errorCallback errorCallback = getErrorCallbackProc();
348                if (errorCallback != nullptr) {
349                    (*errorCallback)(
350                            (AAudioStream *) this,
351                            getErrorCallbackUserData(),
352                            result);
353                }
354                break;
355            }
356        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
357            ALOGD("AudioStreamInternalPlay(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
358            break;
359        }
360    }
361
362    ALOGD("AudioStreamInternalPlay(): callbackLoop() exiting, result = %d, isActive() = %d",
363          result, (int) isActive());
364    return NULL;
365}
366
367//------------------------------------------------------------------------------
368// Implementation of PlayerBase
369status_t AudioStreamInternalPlay::doSetVolume() {
370    mVolumeRamp.setTarget(mStreamVolume * getDuckAndMuteVolume());
371    return android::NO_ERROR;
372}
373