AudioStreamInternal.cpp revision 677d7916c0fa6f0955aae8f3ef921383e285beb2
1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudio"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <assert.h>
22
23#include <binder/IServiceManager.h>
24#include <utils/Mutex.h>
25
26#include <aaudio/AAudio.h>
27#include <utils/String16.h>
28
29#include "utility/AudioClock.h"
30#include "AudioStreamInternal.h"
31#include "binding/AAudioServiceMessage.h"
32
33#include "core/AudioStreamBuilder.h"
34
35#define LOG_TIMESTAMPS   0
36
37using android::String16;
38using android::IServiceManager;
39using android::defaultServiceManager;
40using android::interface_cast;
41using android::Mutex;
42
43using namespace aaudio;
44
45static android::Mutex gServiceLock;
46static sp<IAAudioService>  gAAudioService;
47
48#define AAUDIO_SERVICE_NAME   "AAudioService"
49
50#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
51
52// Wait at least this many times longer than the operation should take.
53#define MIN_TIMEOUT_OPERATIONS    4
54
55// Helper function to get access to the "AAudioService" service.
56// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp
57static const sp<IAAudioService> getAAudioService() {
58    sp<IBinder> binder;
59    Mutex::Autolock _l(gServiceLock);
60    if (gAAudioService == 0) {
61        sp<IServiceManager> sm = defaultServiceManager();
62        // Try several times to get the service.
63        int retries = 4;
64        do {
65            binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while.
66            if (binder != 0) {
67                break;
68            }
69        } while (retries-- > 0);
70
71        if (binder != 0) {
72            // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp
73            // TODO Create a DeathRecipient that disconnects all active streams.
74            gAAudioService = interface_cast<IAAudioService>(binder);
75        } else {
76            ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME);
77        }
78    }
79    return gAAudioService;
80}
81
82AudioStreamInternal::AudioStreamInternal()
83        : AudioStream()
84        , mClockModel()
85        , mAudioEndpoint()
86        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
87        , mFramesPerBurst(16)
88{
89}
90
91AudioStreamInternal::~AudioStreamInternal() {
92}
93
94aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
95
96    const sp<IAAudioService>& service = getAAudioService();
97    if (service == 0) return AAUDIO_ERROR_NO_SERVICE;
98
99    aaudio_result_t result = AAUDIO_OK;
100    AAudioStreamRequest request;
101    AAudioStreamConfiguration configuration;
102
103    result = AudioStream::open(builder);
104    if (result < 0) {
105        return result;
106    }
107
108    // Build the request to send to the server.
109    request.setUserId(getuid());
110    request.setProcessId(getpid());
111    request.getConfiguration().setDeviceId(getDeviceId());
112    request.getConfiguration().setSampleRate(getSampleRate());
113    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
114    request.getConfiguration().setAudioFormat(getFormat());
115    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
116    request.dump();
117
118    mServiceStreamHandle = service->openStream(request, configuration);
119    ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X",
120         (unsigned int)mServiceStreamHandle);
121    if (mServiceStreamHandle < 0) {
122        result = mServiceStreamHandle;
123        ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result);
124    } else {
125        result = configuration.validate();
126        if (result != AAUDIO_OK) {
127            close();
128            return result;
129        }
130        // Save results of the open.
131        setSampleRate(configuration.getSampleRate());
132        setSamplesPerFrame(configuration.getSamplesPerFrame());
133        setFormat(configuration.getAudioFormat());
134
135        aaudio::AudioEndpointParcelable parcelable;
136        result = service->getStreamDescription(mServiceStreamHandle, parcelable);
137        if (result != AAUDIO_OK) {
138            ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result);
139            service->closeStream(mServiceStreamHandle);
140            return result;
141        }
142        // resolve parcelable into a descriptor
143        parcelable.resolve(&mEndpointDescriptor);
144
145        // Configure endpoint based on descriptor.
146        mAudioEndpoint.configure(&mEndpointDescriptor);
147
148        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
149        assert(mFramesPerBurst >= 16);
150        assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024);
151
152        mClockModel.setSampleRate(getSampleRate());
153        mClockModel.setFramesPerBurst(mFramesPerBurst);
154
155        if (getDataCallbackProc()) {
156            mCallbackFrames = builder.getFramesPerDataCallback();
157            if (mCallbackFrames > getBufferCapacity() / 2) {
158                ALOGE("AudioStreamInternal.open(): framesPerCallback too large");
159                service->closeStream(mServiceStreamHandle);
160                return AAUDIO_ERROR_OUT_OF_RANGE;
161
162            } else if (mCallbackFrames < 0) {
163                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
164                service->closeStream(mServiceStreamHandle);
165                return AAUDIO_ERROR_OUT_OF_RANGE;
166
167            }
168            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
169                mCallbackFrames = mFramesPerBurst;
170            }
171
172            int32_t bytesPerFrame = getSamplesPerFrame()
173                                    * AAudioConvert_formatToSizeInBytes(getFormat());
174            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
175            mCallbackBuffer = new uint8_t[callbackBufferSize];
176        }
177
178        setState(AAUDIO_STREAM_STATE_OPEN);
179    }
180    return result;
181}
182
183aaudio_result_t AudioStreamInternal::close() {
184    ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle);
185    if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
186        aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
187        mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
188        const sp<IAAudioService>& aaudioService = getAAudioService();
189        if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
190        aaudioService->closeStream(serviceStreamHandle);
191        delete[] mCallbackBuffer;
192        return AAUDIO_OK;
193    } else {
194        return AAUDIO_ERROR_INVALID_HANDLE;
195    }
196}
197
198// Render audio in the application callback and then write the data to the stream.
199void *AudioStreamInternal::callbackLoop() {
200    aaudio_result_t result = AAUDIO_OK;
201    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
202    AAudioStream_dataCallback appCallback = getDataCallbackProc();
203    if (appCallback == nullptr) return NULL;
204
205    // result might be a frame count
206    while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
207        // Call application using the AAudio callback interface.
208        callbackResult = (*appCallback)(
209                (AAudioStream *) this,
210                getDataCallbackUserData(),
211                mCallbackBuffer,
212                mCallbackFrames);
213
214        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
215            // Write audio data to stream.
216            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
217
218            // This is a BLOCKING WRITE!
219            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
220            if ((result != mCallbackFrames)) {
221                ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
222                if (result >= 0) {
223                    // Only wrote some of the frames requested. Must have timed out.
224                    result = AAUDIO_ERROR_TIMEOUT;
225                }
226                if (getErrorCallbackProc() != nullptr) {
227                    (*getErrorCallbackProc())(
228                            (AAudioStream *) this,
229                            getErrorCallbackUserData(),
230                            result);
231                }
232                break;
233            }
234        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
235            ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
236            break;
237        }
238    }
239
240    ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
241          result, (int) isPlaying());
242    return NULL; // TODO review
243}
244
245static void *aaudio_callback_thread_proc(void *context)
246{
247    AudioStreamInternal *stream = (AudioStreamInternal *)context;
248    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
249    if (stream != NULL) {
250        return stream->callbackLoop();
251    } else {
252        return NULL;
253    }
254}
255
256aaudio_result_t AudioStreamInternal::requestStart()
257{
258    int64_t startTime;
259    ALOGD("AudioStreamInternal(): start()");
260    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
261        return AAUDIO_ERROR_INVALID_STATE;
262    }
263    const sp<IAAudioService>& aaudioService = getAAudioService();
264    if (aaudioService == 0) {
265        return AAUDIO_ERROR_NO_SERVICE;
266    }
267    startTime = AudioClock::getNanoseconds();
268    mClockModel.start(startTime);
269    processTimestamp(0, startTime);
270    setState(AAUDIO_STREAM_STATE_STARTING);
271    aaudio_result_t result = aaudioService->startStream(mServiceStreamHandle);
272
273    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
274        // Launch the callback loop thread.
275        int64_t periodNanos = mCallbackFrames
276                              * AAUDIO_NANOS_PER_SECOND
277                              / getSampleRate();
278        mCallbackEnabled.store(true);
279        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
280    }
281    return result;
282}
283
284int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
285
286    // Wait for at least a second or some number of callbacks to join the thread.
287    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND)
288                         / getSampleRate();
289    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
290        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
291    }
292    return timeoutNanoseconds;
293}
294
295aaudio_result_t AudioStreamInternal::stopCallback()
296{
297    if (isDataCallbackActive()) {
298        mCallbackEnabled.store(false);
299        return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
300    } else {
301        return AAUDIO_OK;
302    }
303}
304
305aaudio_result_t AudioStreamInternal::requestPauseInternal()
306{
307    ALOGD("AudioStreamInternal(): pause()");
308    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
309        return AAUDIO_ERROR_INVALID_STATE;
310    }
311    const sp<IAAudioService>& aaudioService = getAAudioService();
312    if (aaudioService == 0) {
313        return AAUDIO_ERROR_NO_SERVICE;
314    }
315    mClockModel.stop(AudioClock::getNanoseconds());
316    setState(AAUDIO_STREAM_STATE_PAUSING);
317    return aaudioService->pauseStream(mServiceStreamHandle);
318}
319
320aaudio_result_t AudioStreamInternal::requestPause()
321{
322    aaudio_result_t result = stopCallback();
323    if (result != AAUDIO_OK) {
324        return result;
325    }
326    return requestPauseInternal();
327}
328
329aaudio_result_t AudioStreamInternal::requestFlush() {
330    ALOGD("AudioStreamInternal(): flush()");
331    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
332        return AAUDIO_ERROR_INVALID_STATE;
333    }
334    const sp<IAAudioService>& aaudioService = getAAudioService();
335    if (aaudioService == 0) {
336        return AAUDIO_ERROR_NO_SERVICE;
337    }
338    setState(AAUDIO_STREAM_STATE_FLUSHING);
339    return aaudioService->flushStream(mServiceStreamHandle);
340}
341
342void AudioStreamInternal::onFlushFromServer() {
343    ALOGD("AudioStreamInternal(): onFlushFromServer()");
344    int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
345    int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
346    // Bump offset so caller does not see the retrograde motion in getFramesRead().
347    int64_t framesFlushed = writeCounter - readCounter;
348    mFramesOffsetFromService += framesFlushed;
349    // Flush written frames by forcing writeCounter to readCounter.
350    // This is because we cannot move the read counter in the hardware.
351    mAudioEndpoint.setDownDataWriteCounter(readCounter);
352}
353
354aaudio_result_t AudioStreamInternal::requestStop()
355{
356    // TODO better implementation of requestStop()
357    aaudio_result_t result = requestPause();
358    if (result == AAUDIO_OK) {
359        aaudio_stream_state_t state;
360        result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING,
361                                    &state,
362                                    500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code
363        if (result == AAUDIO_OK) {
364            result = requestFlush();
365        }
366    }
367    return result;
368}
369
370aaudio_result_t AudioStreamInternal::registerThread() {
371    ALOGD("AudioStreamInternal(): registerThread()");
372    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
373        return AAUDIO_ERROR_INVALID_STATE;
374    }
375    const sp<IAAudioService>& aaudioService = getAAudioService();
376    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
377    return aaudioService->registerAudioThread(mServiceStreamHandle,
378                                         gettid(),
379                                         getPeriodNanoseconds());
380}
381
382aaudio_result_t AudioStreamInternal::unregisterThread() {
383    ALOGD("AudioStreamInternal(): unregisterThread()");
384    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
385        return AAUDIO_ERROR_INVALID_STATE;
386    }
387    const sp<IAAudioService>& aaudioService = getAAudioService();
388    if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE;
389    return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid());
390}
391
392aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
393                           int64_t *framePosition,
394                           int64_t *timeNanoseconds) {
395    // TODO implement using real HAL
396    int64_t time = AudioClock::getNanoseconds();
397    *framePosition = mClockModel.convertTimeToPosition(time);
398    *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
399    return AAUDIO_OK;
400}
401
402aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
403    if (isDataCallbackActive()) {
404        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
405    }
406    return processCommands();
407}
408
409#if LOG_TIMESTAMPS
410static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
411    static int64_t oldPosition = 0;
412    static int64_t oldTime = 0;
413    int64_t framePosition = command.timestamp.position;
414    int64_t nanoTime = command.timestamp.timestamp;
415    ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
416         (long long) framePosition,
417         (long long) nanoTime);
418    int64_t nanosDelta = nanoTime - oldTime;
419    if (nanosDelta > 0 && oldTime > 0) {
420        int64_t framesDelta = framePosition - oldPosition;
421        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
422        ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
423        ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
424        ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
425    }
426    oldPosition = framePosition;
427    oldTime = nanoTime;
428}
429#endif
430
431aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
432    int64_t framePosition = 0;
433#if LOG_TIMESTAMPS
434    AudioStreamInternal_LogTimestamp(command);
435#endif
436    framePosition = message->timestamp.position;
437    processTimestamp(framePosition, message->timestamp.timestamp);
438    return AAUDIO_OK;
439}
440
441aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
442    aaudio_result_t result = AAUDIO_OK;
443    ALOGD("processCommands() got event %d", message->event.event);
444    switch (message->event.event) {
445        case AAUDIO_SERVICE_EVENT_STARTED:
446            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
447            setState(AAUDIO_STREAM_STATE_STARTED);
448            break;
449        case AAUDIO_SERVICE_EVENT_PAUSED:
450            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
451            setState(AAUDIO_STREAM_STATE_PAUSED);
452            break;
453        case AAUDIO_SERVICE_EVENT_FLUSHED:
454            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
455            setState(AAUDIO_STREAM_STATE_FLUSHED);
456            onFlushFromServer();
457            break;
458        case AAUDIO_SERVICE_EVENT_CLOSED:
459            ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
460            setState(AAUDIO_STREAM_STATE_CLOSED);
461            break;
462        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
463            result = AAUDIO_ERROR_DISCONNECTED;
464            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
465            break;
466        default:
467            ALOGW("WARNING - processCommands() Unrecognized event = %d",
468                 (int) message->event.event);
469            break;
470    }
471    return result;
472}
473
474// Process all the commands coming from the server.
475aaudio_result_t AudioStreamInternal::processCommands() {
476    aaudio_result_t result = AAUDIO_OK;
477
478    while (result == AAUDIO_OK) {
479        AAudioServiceMessage message;
480        if (mAudioEndpoint.readUpCommand(&message) != 1) {
481            break; // no command this time, no problem
482        }
483        switch (message.what) {
484        case AAudioServiceMessage::code::TIMESTAMP:
485            result = onTimestampFromServer(&message);
486            break;
487
488        case AAudioServiceMessage::code::EVENT:
489            result = onEventFromServer(&message);
490            break;
491
492        default:
493            ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
494                 (int) message.what);
495            result = AAUDIO_ERROR_UNEXPECTED_VALUE;
496            break;
497        }
498    }
499    return result;
500}
501
502// Write the data, block if needed and timeoutMillis > 0
503aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
504                                         int64_t timeoutNanoseconds)
505{
506    aaudio_result_t result = AAUDIO_OK;
507    uint8_t* source = (uint8_t*)buffer;
508    int64_t currentTimeNanos = AudioClock::getNanoseconds();
509    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
510    int32_t framesLeft = numFrames;
511//    ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------",
512//         buffer, numFrames, (unsigned long long) currentTimeNanos, mState);
513
514    // Write until all the data has been written or until a timeout occurs.
515    while (framesLeft > 0) {
516        // The call to writeNow() will not block. It will just write as much as it can.
517        int64_t wakeTimeNanos = 0;
518        aaudio_result_t framesWritten = writeNow(source, framesLeft,
519                                               currentTimeNanos, &wakeTimeNanos);
520//        ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten);
521        if (framesWritten < 0) {
522            result = framesWritten;
523            break;
524        }
525        framesLeft -= (int32_t) framesWritten;
526        source += framesWritten * getBytesPerFrame();
527
528        // Should we block?
529        if (timeoutNanoseconds == 0) {
530            break; // don't block
531        } else if (framesLeft > 0) {
532            //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos);
533            // clip the wake time to something reasonable
534            if (wakeTimeNanos < currentTimeNanos) {
535                wakeTimeNanos = currentTimeNanos;
536            }
537            if (wakeTimeNanos > deadlineNanos) {
538                // If we time out, just return the framesWritten so far.
539                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds);
540                break;
541            }
542
543            //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos,
544            //        (long long) (wakeTimeNanos - currentTimeNanos));
545            AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos);
546            currentTimeNanos = AudioClock::getNanoseconds();
547        }
548    }
549
550    // return error or framesWritten
551    return (result < 0) ? result : numFrames - framesLeft;
552}
553
554// Write as much data as we can without blocking.
555aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
556                                         int64_t currentNanoTime, int64_t *wakeTimePtr) {
557    {
558        aaudio_result_t result = processCommands();
559        if (result != AAUDIO_OK) {
560            return result;
561        }
562    }
563
564    if (mAudioEndpoint.isOutputFreeRunning()) {
565        // Update data queue based on the timing model.
566        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
567        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
568        // If the read index passed the write index then consider it an underrun.
569        if (mAudioEndpoint.getFullFramesAvailable() < 0) {
570            mXRunCount++;
571        }
572    }
573    // TODO else query from endpoint cuz set by actual reader, maybe
574
575    // Write some data to the buffer.
576    int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames);
577    if (framesWritten > 0) {
578        incrementFramesWritten(framesWritten);
579    }
580    //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
581    //    numFrames, framesWritten);
582
583    // Calculate an ideal time to wake up.
584    if (wakeTimePtr != nullptr && framesWritten >= 0) {
585        // By default wake up a few milliseconds from now.  // TODO review
586        int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND);
587        switch (getState()) {
588            case AAUDIO_STREAM_STATE_OPEN:
589            case AAUDIO_STREAM_STATE_STARTING:
590                if (framesWritten != 0) {
591                    // Don't wait to write more data. Just prime the buffer.
592                    wakeTime = currentNanoTime;
593                }
594                break;
595            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
596                {
597                    uint32_t burstSize = mFramesPerBurst;
598                    if (burstSize < 32) {
599                        burstSize = 32; // TODO review
600                    }
601
602                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
603                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
604                }
605                break;
606            default:
607                break;
608        }
609        *wakeTimePtr = wakeTime;
610
611    }
612//    ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
613//         (unsigned long long)currentNanoTime,
614//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
615//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
616    return framesWritten;
617}
618
619void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
620    mClockModel.processTimestamp( position, time);
621}
622
623aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
624    int32_t actualFrames = 0;
625    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
626    if (result < 0) {
627        return result;
628    } else {
629        return (aaudio_result_t) actualFrames;
630    }
631}
632
633int32_t AudioStreamInternal::getBufferSize() const
634{
635    return mAudioEndpoint.getBufferSizeInFrames();
636}
637
638int32_t AudioStreamInternal::getBufferCapacity() const
639{
640    return mAudioEndpoint.getBufferCapacityInFrames();
641}
642
643int32_t AudioStreamInternal::getFramesPerBurst() const
644{
645    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
646}
647
648int64_t AudioStreamInternal::getFramesRead()
649{
650    int64_t framesRead =
651            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
652            + mFramesOffsetFromService;
653    // Prevent retrograde motion.
654    if (framesRead < mLastFramesRead) {
655        framesRead = mLastFramesRead;
656    } else {
657        mLastFramesRead = framesRead;
658    }
659    ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
660    return framesRead;
661}
662
663// TODO implement getTimestamp
664