AudioStreamInternal.cpp revision 5204d315c6c6f53188f8d1414dd1b55b6c90142b
1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudio"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <stdint.h>
22#include <assert.h>
23
24#include <binder/IServiceManager.h>
25
26#include <aaudio/AAudio.h>
27#include <utils/String16.h>
28
29#include "AudioClock.h"
30#include "AudioEndpointParcelable.h"
31#include "binding/AAudioStreamRequest.h"
32#include "binding/AAudioStreamConfiguration.h"
33#include "binding/IAAudioService.h"
34#include "binding/AAudioServiceMessage.h"
35#include "core/AudioStreamBuilder.h"
36#include "fifo/FifoBuffer.h"
37#include "utility/LinearRamp.h"
38
39#include "AudioStreamInternal.h"
40
41#define LOG_TIMESTAMPS   0
42
43using android::String16;
44using android::Mutex;
45using android::WrappingBuffer;
46
47using namespace aaudio;
48
49#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
50
51// Wait at least this many times longer than the operation should take.
52#define MIN_TIMEOUT_OPERATIONS    4
53
54//static int64_t s_logCounter = 0;
55//#define MYLOG_CONDITION   (mInService == true && s_logCounter++ < 500)
56//#define MYLOG_CONDITION   (s_logCounter++ < 500000)
57#define MYLOG_CONDITION   (1)
58
59AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
60        : AudioStream()
61        , mClockModel()
62        , mAudioEndpoint()
63        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
64        , mFramesPerBurst(16)
65        , mServiceInterface(serviceInterface)
66        , mInService(inService) {
67}
68
69AudioStreamInternal::~AudioStreamInternal() {
70}
71
72aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
73
74    aaudio_result_t result = AAUDIO_OK;
75    AAudioStreamRequest request;
76    AAudioStreamConfiguration configuration;
77
78    result = AudioStream::open(builder);
79    if (result < 0) {
80        return result;
81    }
82
83    // We have to do volume scaling. So we prefer FLOAT format.
84    if (getFormat() == AAUDIO_UNSPECIFIED) {
85        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
86    }
87    // Request FLOAT for the shared mixer.
88    request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
89
90    // Build the request to send to the server.
91    request.setUserId(getuid());
92    request.setProcessId(getpid());
93    request.setDirection(getDirection());
94    request.setSharingModeMatchRequired(isSharingModeMatchRequired());
95
96    request.getConfiguration().setDeviceId(getDeviceId());
97    request.getConfiguration().setSampleRate(getSampleRate());
98    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
99    request.getConfiguration().setSharingMode(getSharingMode());
100
101    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
102
103    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
104    if (mServiceStreamHandle < 0) {
105        result = mServiceStreamHandle;
106        ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
107    } else {
108        result = configuration.validate();
109        if (result != AAUDIO_OK) {
110            close();
111            return result;
112        }
113        // Save results of the open.
114        setSampleRate(configuration.getSampleRate());
115        setSamplesPerFrame(configuration.getSamplesPerFrame());
116        setDeviceId(configuration.getDeviceId());
117
118        // Save device format so we can do format conversion and volume scaling together.
119        mDeviceFormat = configuration.getAudioFormat();
120
121        result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
122        if (result != AAUDIO_OK) {
123            ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
124                  getLocationName(), result);
125            mServiceInterface.closeStream(mServiceStreamHandle);
126            return result;
127        }
128
129        // resolve parcelable into a descriptor
130        result = mEndPointParcelable.resolve(&mEndpointDescriptor);
131        if (result != AAUDIO_OK) {
132            ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
133            mServiceInterface.closeStream(mServiceStreamHandle);
134            return result;
135        }
136
137        // Configure endpoint based on descriptor.
138        mAudioEndpoint.configure(&mEndpointDescriptor);
139
140        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
141        int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
142
143        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
144                 getLocationName(), mFramesPerBurst, capacity);
145        // Validate result from server.
146        if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
147            ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
148            return AAUDIO_ERROR_OUT_OF_RANGE;
149        }
150        if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
151            ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
152            return AAUDIO_ERROR_OUT_OF_RANGE;
153        }
154
155        mClockModel.setSampleRate(getSampleRate());
156        mClockModel.setFramesPerBurst(mFramesPerBurst);
157
158        if (getDataCallbackProc()) {
159            mCallbackFrames = builder.getFramesPerDataCallback();
160            if (mCallbackFrames > getBufferCapacity() / 2) {
161                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
162                      mCallbackFrames, getBufferCapacity());
163                mServiceInterface.closeStream(mServiceStreamHandle);
164                return AAUDIO_ERROR_OUT_OF_RANGE;
165
166            } else if (mCallbackFrames < 0) {
167                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
168                mServiceInterface.closeStream(mServiceStreamHandle);
169                return AAUDIO_ERROR_OUT_OF_RANGE;
170
171            }
172            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
173                mCallbackFrames = mFramesPerBurst;
174            }
175
176            int32_t bytesPerFrame = getSamplesPerFrame()
177                                    * AAudioConvert_formatToSizeInBytes(getFormat());
178            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
179            mCallbackBuffer = new uint8_t[callbackBufferSize];
180        }
181
182        setState(AAUDIO_STREAM_STATE_OPEN);
183    }
184    return result;
185}
186
187aaudio_result_t AudioStreamInternal::close() {
188    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
189             mServiceStreamHandle);
190    if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
191        aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
192        mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
193
194        mServiceInterface.closeStream(serviceStreamHandle);
195        delete[] mCallbackBuffer;
196        return mEndPointParcelable.close();
197    } else {
198        return AAUDIO_ERROR_INVALID_HANDLE;
199    }
200}
201
202
203// Render audio in the application callback and then write the data to the stream.
204void *AudioStreamInternal::callbackLoop() {
205    aaudio_result_t result = AAUDIO_OK;
206    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
207    AAudioStream_dataCallback appCallback = getDataCallbackProc();
208    if (appCallback == nullptr) return NULL;
209
210    // result might be a frame count
211    while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
212        // Call application using the AAudio callback interface.
213        callbackResult = (*appCallback)(
214                (AAudioStream *) this,
215                getDataCallbackUserData(),
216                mCallbackBuffer,
217                mCallbackFrames);
218
219        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
220            // Write audio data to stream.
221            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
222
223            // This is a BLOCKING WRITE!
224            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
225            if ((result != mCallbackFrames)) {
226                ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
227                if (result >= 0) {
228                    // Only wrote some of the frames requested. Must have timed out.
229                    result = AAUDIO_ERROR_TIMEOUT;
230                }
231                if (getErrorCallbackProc() != nullptr) {
232                    (*getErrorCallbackProc())(
233                            (AAudioStream *) this,
234                            getErrorCallbackUserData(),
235                            result);
236                }
237                break;
238            }
239        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
240            ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
241            break;
242        }
243    }
244
245    ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
246          result, (int) isPlaying());
247    return NULL;
248}
249
250static void *aaudio_callback_thread_proc(void *context)
251{
252    AudioStreamInternal *stream = (AudioStreamInternal *)context;
253    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
254    if (stream != NULL) {
255        return stream->callbackLoop();
256    } else {
257        return NULL;
258    }
259}
260
261aaudio_result_t AudioStreamInternal::requestStart()
262{
263    int64_t startTime;
264    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
265    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
266        return AAUDIO_ERROR_INVALID_STATE;
267    }
268
269    startTime = AudioClock::getNanoseconds();
270    mClockModel.start(startTime);
271    processTimestamp(0, startTime);
272    setState(AAUDIO_STREAM_STATE_STARTING);
273    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
274
275    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
276        // Launch the callback loop thread.
277        int64_t periodNanos = mCallbackFrames
278                              * AAUDIO_NANOS_PER_SECOND
279                              / getSampleRate();
280        mCallbackEnabled.store(true);
281        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
282    }
283    return result;
284}
285
286int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
287
288    // Wait for at least a second or some number of callbacks to join the thread.
289    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
290                                  * framesPerOperation
291                                  * AAUDIO_NANOS_PER_SECOND)
292                                  / getSampleRate();
293    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
294        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
295    }
296    return timeoutNanoseconds;
297}
298
299aaudio_result_t AudioStreamInternal::stopCallback()
300{
301    if (isDataCallbackActive()) {
302        mCallbackEnabled.store(false);
303        return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
304    } else {
305        return AAUDIO_OK;
306    }
307}
308
309aaudio_result_t AudioStreamInternal::requestPauseInternal()
310{
311    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
312        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
313              mServiceStreamHandle);
314        return AAUDIO_ERROR_INVALID_STATE;
315    }
316
317    mClockModel.stop(AudioClock::getNanoseconds());
318    setState(AAUDIO_STREAM_STATE_PAUSING);
319    return mServiceInterface.pauseStream(mServiceStreamHandle);
320}
321
322aaudio_result_t AudioStreamInternal::requestPause()
323{
324    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
325    aaudio_result_t result = stopCallback();
326    if (result != AAUDIO_OK) {
327        return result;
328    }
329    result = requestPauseInternal();
330    ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
331    return result;
332}
333
334aaudio_result_t AudioStreamInternal::requestFlush() {
335    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
336    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
337        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
338              mServiceStreamHandle);
339        return AAUDIO_ERROR_INVALID_STATE;
340    }
341
342    setState(AAUDIO_STREAM_STATE_FLUSHING);
343    return mServiceInterface.flushStream(mServiceStreamHandle);
344}
345
346void AudioStreamInternal::onFlushFromServer() {
347    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
348    int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
349    int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
350
351    // Bump offset so caller does not see the retrograde motion in getFramesRead().
352    int64_t framesFlushed = writeCounter - readCounter;
353    mFramesOffsetFromService += framesFlushed;
354
355    // Flush written frames by forcing writeCounter to readCounter.
356    // This is because we cannot move the read counter in the hardware.
357    mAudioEndpoint.setDownDataWriteCounter(readCounter);
358}
359
360aaudio_result_t AudioStreamInternal::requestStopInternal()
361{
362    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
363        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
364              mServiceStreamHandle);
365        return AAUDIO_ERROR_INVALID_STATE;
366    }
367
368    mClockModel.stop(AudioClock::getNanoseconds());
369    setState(AAUDIO_STREAM_STATE_STOPPING);
370    return mServiceInterface.stopStream(mServiceStreamHandle);
371}
372
373aaudio_result_t AudioStreamInternal::requestStop()
374{
375    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
376    aaudio_result_t result = stopCallback();
377    if (result != AAUDIO_OK) {
378        return result;
379    }
380    result = requestStopInternal();
381    ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
382    return result;
383}
384
385aaudio_result_t AudioStreamInternal::registerThread() {
386    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
387        return AAUDIO_ERROR_INVALID_STATE;
388    }
389    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
390                                              getpid(),
391                                              gettid(),
392                                              getPeriodNanoseconds());
393}
394
395aaudio_result_t AudioStreamInternal::unregisterThread() {
396    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
397        return AAUDIO_ERROR_INVALID_STATE;
398    }
399    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
400}
401
402aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
403                           int64_t *framePosition,
404                           int64_t *timeNanoseconds) {
405    // TODO Generate in server and pass to client. Return latest.
406    int64_t time = AudioClock::getNanoseconds();
407    *framePosition = mClockModel.convertTimeToPosition(time);
408    *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
409    return AAUDIO_OK;
410}
411
412aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
413    if (isDataCallbackActive()) {
414        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
415    }
416    return processCommands();
417}
418
419#if LOG_TIMESTAMPS
420static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
421    static int64_t oldPosition = 0;
422    static int64_t oldTime = 0;
423    int64_t framePosition = command.timestamp.position;
424    int64_t nanoTime = command.timestamp.timestamp;
425    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
426         (long long) framePosition,
427         (long long) nanoTime);
428    int64_t nanosDelta = nanoTime - oldTime;
429    if (nanosDelta > 0 && oldTime > 0) {
430        int64_t framesDelta = framePosition - oldPosition;
431        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
432        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
433        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
434        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
435    }
436    oldPosition = framePosition;
437    oldTime = nanoTime;
438}
439#endif
440
441aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
442    int64_t framePosition = 0;
443#if LOG_TIMESTAMPS
444    AudioStreamInternal_LogTimestamp(command);
445#endif
446    framePosition = message->timestamp.position;
447    processTimestamp(framePosition, message->timestamp.timestamp);
448    return AAUDIO_OK;
449}
450
451aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
452    aaudio_result_t result = AAUDIO_OK;
453    ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
454    switch (message->event.event) {
455        case AAUDIO_SERVICE_EVENT_STARTED:
456            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
457            setState(AAUDIO_STREAM_STATE_STARTED);
458            break;
459        case AAUDIO_SERVICE_EVENT_PAUSED:
460            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
461            setState(AAUDIO_STREAM_STATE_PAUSED);
462            break;
463        case AAUDIO_SERVICE_EVENT_STOPPED:
464            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
465            setState(AAUDIO_STREAM_STATE_STOPPED);
466            break;
467        case AAUDIO_SERVICE_EVENT_FLUSHED:
468            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
469            setState(AAUDIO_STREAM_STATE_FLUSHED);
470            onFlushFromServer();
471            break;
472        case AAUDIO_SERVICE_EVENT_CLOSED:
473            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
474            setState(AAUDIO_STREAM_STATE_CLOSED);
475            break;
476        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
477            result = AAUDIO_ERROR_DISCONNECTED;
478            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
479            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
480            break;
481        case AAUDIO_SERVICE_EVENT_VOLUME:
482            mVolumeRamp.setTarget((float) message->event.dataDouble);
483            ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f",
484                     message->event.dataDouble);
485            break;
486        default:
487            ALOGW("WARNING - processCommands() Unrecognized event = %d",
488                 (int) message->event.event);
489            break;
490    }
491    return result;
492}
493
494// Process all the commands coming from the server.
495aaudio_result_t AudioStreamInternal::processCommands() {
496    aaudio_result_t result = AAUDIO_OK;
497
498    while (result == AAUDIO_OK) {
499        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
500        AAudioServiceMessage message;
501        if (mAudioEndpoint.readUpCommand(&message) != 1) {
502            break; // no command this time, no problem
503        }
504        switch (message.what) {
505        case AAudioServiceMessage::code::TIMESTAMP:
506            result = onTimestampFromServer(&message);
507            break;
508
509        case AAudioServiceMessage::code::EVENT:
510            result = onEventFromServer(&message);
511            break;
512
513        default:
514            ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
515                 (int) message.what);
516            result = AAUDIO_ERROR_UNEXPECTED_VALUE;
517            break;
518        }
519    }
520    return result;
521}
522
523// Write the data, block if needed and timeoutMillis > 0
524aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
525                                         int64_t timeoutNanoseconds)
526{
527    aaudio_result_t result = AAUDIO_OK;
528    int32_t loopCount = 0;
529    uint8_t* source = (uint8_t*)buffer;
530    int64_t currentTimeNanos = AudioClock::getNanoseconds();
531    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
532    int32_t framesLeft = numFrames;
533
534    // Write until all the data has been written or until a timeout occurs.
535    while (framesLeft > 0) {
536        // The call to writeNow() will not block. It will just write as much as it can.
537        int64_t wakeTimeNanos = 0;
538        aaudio_result_t framesWritten = writeNow(source, framesLeft,
539                                               currentTimeNanos, &wakeTimeNanos);
540        if (framesWritten < 0) {
541            ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
542            result = framesWritten;
543            break;
544        }
545        framesLeft -= (int32_t) framesWritten;
546        source += framesWritten * getBytesPerFrame();
547
548        // Should we block?
549        if (timeoutNanoseconds == 0) {
550            break; // don't block
551        } else if (framesLeft > 0) {
552            // clip the wake time to something reasonable
553            if (wakeTimeNanos < currentTimeNanos) {
554                wakeTimeNanos = currentTimeNanos;
555            }
556            if (wakeTimeNanos > deadlineNanos) {
557                // If we time out, just return the framesWritten so far.
558                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
559                      (long long) timeoutNanoseconds);
560                break;
561            }
562
563            int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
564            AudioClock::sleepForNanos(sleepForNanos);
565            currentTimeNanos = AudioClock::getNanoseconds();
566        }
567    }
568
569    // return error or framesWritten
570    (void) loopCount;
571    return (result < 0) ? result : numFrames - framesLeft;
572}
573
574// Write as much data as we can without blocking.
575aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
576                                         int64_t currentNanoTime, int64_t *wakeTimePtr) {
577    aaudio_result_t result = processCommands();
578    if (result != AAUDIO_OK) {
579        return result;
580    }
581
582    if (mAudioEndpoint.isOutputFreeRunning()) {
583        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
584        // Update data queue based on the timing model.
585        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
586        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
587    }
588    // TODO else query from endpoint cuz set by actual reader, maybe
589
590    // If the read index passed the write index then consider it an underrun.
591    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
592        mXRunCount++;
593    }
594
595    // Write some data to the buffer.
596    //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
597    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
598    //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
599    //    numFrames, framesWritten);
600
601    // Calculate an ideal time to wake up.
602    if (wakeTimePtr != nullptr && framesWritten >= 0) {
603        // By default wake up a few milliseconds from now.  // TODO review
604        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
605        aaudio_stream_state_t state = getState();
606        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
607        //      AAudio_convertStreamStateToText(state));
608        switch (state) {
609            case AAUDIO_STREAM_STATE_OPEN:
610            case AAUDIO_STREAM_STATE_STARTING:
611                if (framesWritten != 0) {
612                    // Don't wait to write more data. Just prime the buffer.
613                    wakeTime = currentNanoTime;
614                }
615                break;
616            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
617                {
618                    uint32_t burstSize = mFramesPerBurst;
619                    if (burstSize < 32) {
620                        burstSize = 32; // TODO review
621                    }
622
623                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
624                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
625                }
626                break;
627            default:
628                break;
629        }
630        *wakeTimePtr = wakeTime;
631
632    }
633//    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
634//         (unsigned long long)currentNanoTime,
635//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
636//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
637    return framesWritten;
638}
639
640
641aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
642                                       int32_t numFrames) {
643    // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)",
644    //              buffer, numFrames);
645    WrappingBuffer wrappingBuffer;
646    uint8_t *source = (uint8_t *) buffer;
647    int32_t framesLeft = numFrames;
648
649    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
650
651    // Read data in one or two parts.
652    int partIndex = 0;
653    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
654        int32_t framesToWrite = framesLeft;
655        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
656        if (framesAvailable > 0) {
657            if (framesToWrite > framesAvailable) {
658                framesToWrite = framesAvailable;
659            }
660            int32_t numBytes = getBytesPerFrame() * framesToWrite;
661            int32_t numSamples = framesToWrite * getSamplesPerFrame();
662            // Data conversion.
663            float levelFrom;
664            float levelTo;
665            bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(),
666                                    &levelFrom, &levelTo);
667            // The formats are validated when the stream is opened so we do not have to
668            // check for illegal combinations here.
669            if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) {
670                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
671                    AAudio_linearRamp(
672                            (const float *) source,
673                            (float *) wrappingBuffer.data[partIndex],
674                            framesToWrite,
675                            getSamplesPerFrame(),
676                            levelFrom,
677                            levelTo);
678                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
679                    if (ramping) {
680                        AAudioConvert_floatToPcm16(
681                                (const float *) source,
682                                (int16_t *) wrappingBuffer.data[partIndex],
683                                framesToWrite,
684                                getSamplesPerFrame(),
685                                levelFrom,
686                                levelTo);
687                    } else {
688                        AAudioConvert_floatToPcm16(
689                                (const float *) source,
690                                (int16_t *) wrappingBuffer.data[partIndex],
691                                numSamples,
692                                levelTo);
693                    }
694                }
695            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) {
696                if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
697                    if (ramping) {
698                        AAudioConvert_pcm16ToFloat(
699                                (const int16_t *) source,
700                                (float *) wrappingBuffer.data[partIndex],
701                                framesToWrite,
702                                getSamplesPerFrame(),
703                                levelFrom,
704                                levelTo);
705                    } else {
706                        AAudioConvert_pcm16ToFloat(
707                                (const int16_t *) source,
708                                (float *) wrappingBuffer.data[partIndex],
709                                numSamples,
710                                levelTo);
711                    }
712                } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
713                    AAudio_linearRamp(
714                            (const int16_t *) source,
715                            (int16_t *) wrappingBuffer.data[partIndex],
716                            framesToWrite,
717                            getSamplesPerFrame(),
718                            levelFrom,
719                            levelTo);
720                }
721            }
722            source += numBytes;
723            framesLeft -= framesToWrite;
724        } else {
725            break;
726        }
727        partIndex++;
728    }
729    int32_t framesWritten = numFrames - framesLeft;
730    mAudioEndpoint.advanceWriteIndex(framesWritten);
731
732    if (framesWritten > 0) {
733        incrementFramesWritten(framesWritten);
734    }
735    // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
736    return framesWritten;
737}
738
739void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
740    mClockModel.processTimestamp( position, time);
741}
742
743aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
744    int32_t actualFrames = 0;
745    // Round to the next highest burst size.
746    if (getFramesPerBurst() > 0) {
747        int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
748        requestedFrames = numBursts * getFramesPerBurst();
749    }
750
751    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
752    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
753             getLocationName(), requestedFrames, actualFrames);
754    if (result < 0) {
755        return result;
756    } else {
757        return (aaudio_result_t) actualFrames;
758    }
759}
760
761int32_t AudioStreamInternal::getBufferSize() const
762{
763    return mAudioEndpoint.getBufferSizeInFrames();
764}
765
766int32_t AudioStreamInternal::getBufferCapacity() const
767{
768    return mAudioEndpoint.getBufferCapacityInFrames();
769}
770
771int32_t AudioStreamInternal::getFramesPerBurst() const
772{
773    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
774}
775
776int64_t AudioStreamInternal::getFramesRead()
777{
778    int64_t framesRead =
779            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
780            + mFramesOffsetFromService;
781    // Prevent retrograde motion.
782    if (framesRead < mLastFramesRead) {
783        framesRead = mLastFramesRead;
784    } else {
785        mLastFramesRead = framesRead;
786    }
787    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
788    return framesRead;
789}
790
791int64_t AudioStreamInternal::getFramesWritten()
792{
793    int64_t getFramesWritten = mAudioEndpoint.getDownDataWriteCounter()
794            + mFramesOffsetFromService;
795    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten);
796    return getFramesWritten;
797}
798