AudioStreamInternal.cpp revision 71f35bb687476694882a617ba4a810a0bb56fe23
1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudio"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#include <stdint.h>
22#include <assert.h>
23
24#include <binder/IServiceManager.h>
25
26#include <aaudio/AAudio.h>
27#include <utils/String16.h>
28
29#include "AudioClock.h"
30#include "AudioEndpointParcelable.h"
31#include "binding/AAudioStreamRequest.h"
32#include "binding/AAudioStreamConfiguration.h"
33#include "binding/IAAudioService.h"
34#include "binding/AAudioServiceMessage.h"
35#include "fifo/FifoBuffer.h"
36
37#include "core/AudioStreamBuilder.h"
38#include "AudioStreamInternal.h"
39
40#define LOG_TIMESTAMPS   0
41
42using android::String16;
43using android::Mutex;
44using android::WrappingBuffer;
45
46using namespace aaudio;
47
48#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
49
50// Wait at least this many times longer than the operation should take.
51#define MIN_TIMEOUT_OPERATIONS    4
52
53//static int64_t s_logCounter = 0;
54//#define MYLOG_CONDITION   (mInService == true && s_logCounter++ < 500)
55//#define MYLOG_CONDITION   (s_logCounter++ < 500000)
56#define MYLOG_CONDITION   (1)
57
58AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
59        : AudioStream()
60        , mClockModel()
61        , mAudioEndpoint()
62        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
63        , mFramesPerBurst(16)
64        , mServiceInterface(serviceInterface)
65        , mInService(inService) {
66}
67
68AudioStreamInternal::~AudioStreamInternal() {
69}
70
71aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
72
73    aaudio_result_t result = AAUDIO_OK;
74    AAudioStreamRequest request;
75    AAudioStreamConfiguration configuration;
76
77    result = AudioStream::open(builder);
78    if (result < 0) {
79        return result;
80    }
81
82    // We have to do volume scaling. So we prefer FLOAT format.
83    if (getFormat() == AAUDIO_UNSPECIFIED) {
84        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
85    }
86    // Request FLOAT for the shared mixer.
87    request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
88
89    // Build the request to send to the server.
90    request.setUserId(getuid());
91    request.setProcessId(getpid());
92    request.setDirection(getDirection());
93    request.setSharingModeMatchRequired(isSharingModeMatchRequired());
94
95    request.getConfiguration().setDeviceId(getDeviceId());
96    request.getConfiguration().setSampleRate(getSampleRate());
97    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
98    request.getConfiguration().setSharingMode(getSharingMode());
99
100    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
101
102    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
103    if (mServiceStreamHandle < 0) {
104        result = mServiceStreamHandle;
105        ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
106    } else {
107        result = configuration.validate();
108        if (result != AAUDIO_OK) {
109            close();
110            return result;
111        }
112        // Save results of the open.
113        setSampleRate(configuration.getSampleRate());
114        setSamplesPerFrame(configuration.getSamplesPerFrame());
115        setDeviceId(configuration.getDeviceId());
116
117        // Save device format so we can do format conversion and volume scaling together.
118        mDeviceFormat = configuration.getAudioFormat();
119
120        result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
121        if (result != AAUDIO_OK) {
122            ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
123                  getLocationName(), result);
124            mServiceInterface.closeStream(mServiceStreamHandle);
125            return result;
126        }
127
128        // resolve parcelable into a descriptor
129        result = mEndPointParcelable.resolve(&mEndpointDescriptor);
130        if (result != AAUDIO_OK) {
131            ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
132            mServiceInterface.closeStream(mServiceStreamHandle);
133            return result;
134        }
135
136        // Configure endpoint based on descriptor.
137        mAudioEndpoint.configure(&mEndpointDescriptor);
138
139        mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
140        int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames;
141
142        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
143                 getLocationName(), mFramesPerBurst, capacity);
144        // Validate result from server.
145        if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
146            ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
147            return AAUDIO_ERROR_OUT_OF_RANGE;
148        }
149        if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
150            ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
151            return AAUDIO_ERROR_OUT_OF_RANGE;
152        }
153
154        mClockModel.setSampleRate(getSampleRate());
155        mClockModel.setFramesPerBurst(mFramesPerBurst);
156
157        if (getDataCallbackProc()) {
158            mCallbackFrames = builder.getFramesPerDataCallback();
159            if (mCallbackFrames > getBufferCapacity() / 2) {
160                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
161                      mCallbackFrames, getBufferCapacity());
162                mServiceInterface.closeStream(mServiceStreamHandle);
163                return AAUDIO_ERROR_OUT_OF_RANGE;
164
165            } else if (mCallbackFrames < 0) {
166                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
167                mServiceInterface.closeStream(mServiceStreamHandle);
168                return AAUDIO_ERROR_OUT_OF_RANGE;
169
170            }
171            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
172                mCallbackFrames = mFramesPerBurst;
173            }
174
175            int32_t bytesPerFrame = getSamplesPerFrame()
176                                    * AAudioConvert_formatToSizeInBytes(getFormat());
177            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
178            mCallbackBuffer = new uint8_t[callbackBufferSize];
179        }
180
181        setState(AAUDIO_STREAM_STATE_OPEN);
182    }
183    return result;
184}
185
186aaudio_result_t AudioStreamInternal::close() {
187    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
188             mServiceStreamHandle);
189    if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
190        aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
191        mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
192
193        mServiceInterface.closeStream(serviceStreamHandle);
194        delete[] mCallbackBuffer;
195        return mEndPointParcelable.close();
196    } else {
197        return AAUDIO_ERROR_INVALID_HANDLE;
198    }
199}
200
201
202// Render audio in the application callback and then write the data to the stream.
203void *AudioStreamInternal::callbackLoop() {
204    aaudio_result_t result = AAUDIO_OK;
205    aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE;
206    AAudioStream_dataCallback appCallback = getDataCallbackProc();
207    if (appCallback == nullptr) return NULL;
208
209    // result might be a frame count
210    while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) {
211        // Call application using the AAudio callback interface.
212        callbackResult = (*appCallback)(
213                (AAudioStream *) this,
214                getDataCallbackUserData(),
215                mCallbackBuffer,
216                mCallbackFrames);
217
218        if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) {
219            // Write audio data to stream.
220            int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames);
221
222            // This is a BLOCKING WRITE!
223            result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos);
224            if ((result != mCallbackFrames)) {
225                ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result);
226                if (result >= 0) {
227                    // Only wrote some of the frames requested. Must have timed out.
228                    result = AAUDIO_ERROR_TIMEOUT;
229                }
230                if (getErrorCallbackProc() != nullptr) {
231                    (*getErrorCallbackProc())(
232                            (AAudioStream *) this,
233                            getErrorCallbackUserData(),
234                            result);
235                }
236                break;
237            }
238        } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) {
239            ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP");
240            break;
241        }
242    }
243
244    ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d",
245          result, (int) isPlaying());
246    return NULL; // TODO review
247}
248
249static void *aaudio_callback_thread_proc(void *context)
250{
251    AudioStreamInternal *stream = (AudioStreamInternal *)context;
252    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
253    if (stream != NULL) {
254        return stream->callbackLoop();
255    } else {
256        return NULL;
257    }
258}
259
260aaudio_result_t AudioStreamInternal::requestStart()
261{
262    int64_t startTime;
263    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
264    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
265        return AAUDIO_ERROR_INVALID_STATE;
266    }
267
268    startTime = AudioClock::getNanoseconds();
269    mClockModel.start(startTime);
270    processTimestamp(0, startTime);
271    setState(AAUDIO_STREAM_STATE_STARTING);
272    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
273
274    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
275        // Launch the callback loop thread.
276        int64_t periodNanos = mCallbackFrames
277                              * AAUDIO_NANOS_PER_SECOND
278                              / getSampleRate();
279        mCallbackEnabled.store(true);
280        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
281    }
282    return result;
283}
284
285int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
286
287    // Wait for at least a second or some number of callbacks to join the thread.
288    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
289                                  * framesPerOperation
290                                  * AAUDIO_NANOS_PER_SECOND)
291                                  / getSampleRate();
292    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
293        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
294    }
295    return timeoutNanoseconds;
296}
297
298aaudio_result_t AudioStreamInternal::stopCallback()
299{
300    if (isDataCallbackActive()) {
301        mCallbackEnabled.store(false);
302        return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames));
303    } else {
304        return AAUDIO_OK;
305    }
306}
307
308aaudio_result_t AudioStreamInternal::requestPauseInternal()
309{
310    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
311        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
312              mServiceStreamHandle);
313        return AAUDIO_ERROR_INVALID_STATE;
314    }
315
316    mClockModel.stop(AudioClock::getNanoseconds());
317    setState(AAUDIO_STREAM_STATE_PAUSING);
318    return mServiceInterface.pauseStream(mServiceStreamHandle);
319}
320
321aaudio_result_t AudioStreamInternal::requestPause()
322{
323    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
324    aaudio_result_t result = stopCallback();
325    if (result != AAUDIO_OK) {
326        return result;
327    }
328    result = requestPauseInternal();
329    ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
330    return result;
331}
332
333aaudio_result_t AudioStreamInternal::requestFlush() {
334    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
335    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
336        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
337              mServiceStreamHandle);
338        return AAUDIO_ERROR_INVALID_STATE;
339    }
340
341    setState(AAUDIO_STREAM_STATE_FLUSHING);
342    return mServiceInterface.flushStream(mServiceStreamHandle);
343}
344
345void AudioStreamInternal::onFlushFromServer() {
346    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
347    int64_t readCounter = mAudioEndpoint.getDownDataReadCounter();
348    int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter();
349
350    // Bump offset so caller does not see the retrograde motion in getFramesRead().
351    int64_t framesFlushed = writeCounter - readCounter;
352    mFramesOffsetFromService += framesFlushed;
353
354    // Flush written frames by forcing writeCounter to readCounter.
355    // This is because we cannot move the read counter in the hardware.
356    mAudioEndpoint.setDownDataWriteCounter(readCounter);
357}
358
359aaudio_result_t AudioStreamInternal::requestStopInternal()
360{
361    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
362        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
363              mServiceStreamHandle);
364        return AAUDIO_ERROR_INVALID_STATE;
365    }
366
367    mClockModel.stop(AudioClock::getNanoseconds());
368    setState(AAUDIO_STREAM_STATE_STOPPING);
369    return mServiceInterface.stopStream(mServiceStreamHandle);
370}
371
372aaudio_result_t AudioStreamInternal::requestStop()
373{
374    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
375    aaudio_result_t result = stopCallback();
376    if (result != AAUDIO_OK) {
377        return result;
378    }
379    result = requestStopInternal();
380    ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
381    return result;
382}
383
384aaudio_result_t AudioStreamInternal::registerThread() {
385    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
386        return AAUDIO_ERROR_INVALID_STATE;
387    }
388    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
389                                              getpid(),
390                                              gettid(),
391                                              getPeriodNanoseconds());
392}
393
394aaudio_result_t AudioStreamInternal::unregisterThread() {
395    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
396        return AAUDIO_ERROR_INVALID_STATE;
397    }
398    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
399}
400
401aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
402                           int64_t *framePosition,
403                           int64_t *timeNanoseconds) {
404    // TODO implement using real HAL
405    int64_t time = AudioClock::getNanoseconds();
406    *framePosition = mClockModel.convertTimeToPosition(time);
407    *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay
408    return AAUDIO_OK;
409}
410
411aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
412    if (isDataCallbackActive()) {
413        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
414    }
415    return processCommands();
416}
417
418#if LOG_TIMESTAMPS
419static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) {
420    static int64_t oldPosition = 0;
421    static int64_t oldTime = 0;
422    int64_t framePosition = command.timestamp.position;
423    int64_t nanoTime = command.timestamp.timestamp;
424    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
425         (long long) framePosition,
426         (long long) nanoTime);
427    int64_t nanosDelta = nanoTime - oldTime;
428    if (nanosDelta > 0 && oldTime > 0) {
429        int64_t framesDelta = framePosition - oldPosition;
430        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
431        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
432        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
433        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
434    }
435    oldPosition = framePosition;
436    oldTime = nanoTime;
437}
438#endif
439
440aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
441    int64_t framePosition = 0;
442#if LOG_TIMESTAMPS
443    AudioStreamInternal_LogTimestamp(command);
444#endif
445    framePosition = message->timestamp.position;
446    processTimestamp(framePosition, message->timestamp.timestamp);
447    return AAUDIO_OK;
448}
449
450aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
451    aaudio_result_t result = AAUDIO_OK;
452    ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
453    switch (message->event.event) {
454        case AAUDIO_SERVICE_EVENT_STARTED:
455            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
456            setState(AAUDIO_STREAM_STATE_STARTED);
457            break;
458        case AAUDIO_SERVICE_EVENT_PAUSED:
459            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
460            setState(AAUDIO_STREAM_STATE_PAUSED);
461            break;
462        case AAUDIO_SERVICE_EVENT_STOPPED:
463            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
464            setState(AAUDIO_STREAM_STATE_STOPPED);
465            break;
466        case AAUDIO_SERVICE_EVENT_FLUSHED:
467            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
468            setState(AAUDIO_STREAM_STATE_FLUSHED);
469            onFlushFromServer();
470            break;
471        case AAUDIO_SERVICE_EVENT_CLOSED:
472            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
473            setState(AAUDIO_STREAM_STATE_CLOSED);
474            break;
475        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
476            result = AAUDIO_ERROR_DISCONNECTED;
477            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
478            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
479            break;
480        case AAUDIO_SERVICE_EVENT_VOLUME:
481            mVolume = message->event.dataDouble;
482            ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", mVolume);
483            break;
484        default:
485            ALOGW("WARNING - processCommands() Unrecognized event = %d",
486                 (int) message->event.event);
487            break;
488    }
489    return result;
490}
491
492// Process all the commands coming from the server.
493aaudio_result_t AudioStreamInternal::processCommands() {
494    aaudio_result_t result = AAUDIO_OK;
495
496    while (result == AAUDIO_OK) {
497        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
498        AAudioServiceMessage message;
499        if (mAudioEndpoint.readUpCommand(&message) != 1) {
500            break; // no command this time, no problem
501        }
502        switch (message.what) {
503        case AAudioServiceMessage::code::TIMESTAMP:
504            result = onTimestampFromServer(&message);
505            break;
506
507        case AAudioServiceMessage::code::EVENT:
508            result = onEventFromServer(&message);
509            break;
510
511        default:
512            ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
513                 (int) message.what);
514            result = AAUDIO_ERROR_UNEXPECTED_VALUE;
515            break;
516        }
517    }
518    return result;
519}
520
521// Write the data, block if needed and timeoutMillis > 0
522aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames,
523                                         int64_t timeoutNanoseconds)
524{
525    aaudio_result_t result = AAUDIO_OK;
526    int32_t loopCount = 0;
527    uint8_t* source = (uint8_t*)buffer;
528    int64_t currentTimeNanos = AudioClock::getNanoseconds();
529    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
530    int32_t framesLeft = numFrames;
531
532    // Write until all the data has been written or until a timeout occurs.
533    while (framesLeft > 0) {
534        // The call to writeNow() will not block. It will just write as much as it can.
535        int64_t wakeTimeNanos = 0;
536        aaudio_result_t framesWritten = writeNow(source, framesLeft,
537                                               currentTimeNanos, &wakeTimeNanos);
538        if (framesWritten < 0) {
539            ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten);
540            result = framesWritten;
541            break;
542        }
543        framesLeft -= (int32_t) framesWritten;
544        source += framesWritten * getBytesPerFrame();
545
546        // Should we block?
547        if (timeoutNanoseconds == 0) {
548            break; // don't block
549        } else if (framesLeft > 0) {
550            // clip the wake time to something reasonable
551            if (wakeTimeNanos < currentTimeNanos) {
552                wakeTimeNanos = currentTimeNanos;
553            }
554            if (wakeTimeNanos > deadlineNanos) {
555                // If we time out, just return the framesWritten so far.
556                ALOGE("AudioStreamInternal::write(): timed out after %lld nanos",
557                      (long long) timeoutNanoseconds);
558                break;
559            }
560
561            int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
562            AudioClock::sleepForNanos(sleepForNanos);
563            currentTimeNanos = AudioClock::getNanoseconds();
564        }
565    }
566
567    // return error or framesWritten
568    (void) loopCount;
569    return (result < 0) ? result : numFrames - framesLeft;
570}
571
572// Write as much data as we can without blocking.
573aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames,
574                                         int64_t currentNanoTime, int64_t *wakeTimePtr) {
575
576    {
577        aaudio_result_t result = processCommands();
578        if (result != AAUDIO_OK) {
579            return result;
580        }
581    }
582
583    if (mAudioEndpoint.isOutputFreeRunning()) {
584        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter");
585        // Update data queue based on the timing model.
586        int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime);
587        mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter);
588    }
589    // TODO else query from endpoint cuz set by actual reader, maybe
590
591    // If the read index passed the write index then consider it an underrun.
592    if (mAudioEndpoint.getFullFramesAvailable() < 0) {
593        mXRunCount++;
594    }
595
596    // Write some data to the buffer.
597    //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames);
598    int32_t framesWritten = writeNowWithConversion(buffer, numFrames);
599    //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d",
600    //    numFrames, framesWritten);
601
602    // Calculate an ideal time to wake up.
603    if (wakeTimePtr != nullptr && framesWritten >= 0) {
604        // By default wake up a few milliseconds from now.  // TODO review
605        int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND);
606        aaudio_stream_state_t state = getState();
607        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s",
608        //      AAudio_convertStreamStateToText(state));
609        switch (state) {
610            case AAUDIO_STREAM_STATE_OPEN:
611            case AAUDIO_STREAM_STATE_STARTING:
612                if (framesWritten != 0) {
613                    // Don't wait to write more data. Just prime the buffer.
614                    wakeTime = currentNanoTime;
615                }
616                break;
617            case AAUDIO_STREAM_STATE_STARTED:   // When do we expect the next read burst to occur?
618                {
619                    uint32_t burstSize = mFramesPerBurst;
620                    if (burstSize < 32) {
621                        burstSize = 32; // TODO review
622                    }
623
624                    uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize;
625                    wakeTime = mClockModel.convertPositionToTime(nextReadPosition);
626                }
627                break;
628            default:
629                break;
630        }
631        *wakeTimePtr = wakeTime;
632
633    }
634//    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu",
635//         (unsigned long long)currentNanoTime,
636//         (unsigned long long)mAudioEndpoint.getDownDataReadCounter(),
637//         (unsigned long long)mAudioEndpoint.getDownDataWriteCounter());
638    return framesWritten;
639}
640
641
642// TODO this function needs a major cleanup.
643aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer,
644                                       int32_t numFrames) {
645    // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", buffer, numFrames);
646    WrappingBuffer wrappingBuffer;
647    uint8_t *source = (uint8_t *) buffer;
648    int32_t framesLeft = numFrames;
649
650    mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer);
651
652    // Read data in one or two parts.
653    int partIndex = 0;
654    while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) {
655        int32_t framesToWrite = framesLeft;
656        int32_t framesAvailable = wrappingBuffer.numFrames[partIndex];
657        if (framesAvailable > 0) {
658            if (framesToWrite > framesAvailable) {
659                framesToWrite = framesAvailable;
660            }
661            int32_t numBytes = getBytesPerFrame() * framesToWrite;
662            // TODO handle volume scaling
663            if (getFormat() == mDeviceFormat) {
664                // Copy straight through.
665                memcpy(wrappingBuffer.data[partIndex], source, numBytes);
666            } else if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT
667                       && mDeviceFormat == AAUDIO_FORMAT_PCM_I16) {
668                // Data conversion.
669                AAudioConvert_floatToPcm16(
670                        (const float *) source,
671                        framesToWrite * getSamplesPerFrame(),
672                        (int16_t *) wrappingBuffer.data[partIndex]);
673            } else if (getFormat() == AAUDIO_FORMAT_PCM_I16
674                       && mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) {
675                // Data conversion.
676                AAudioConvert_pcm16ToFloat(
677                        (const int16_t *) source,
678                        framesToWrite * getSamplesPerFrame(),
679                        (float *) wrappingBuffer.data[partIndex]);
680            } else {
681                // TODO handle more conversions
682                ALOGE("AudioStreamInternal::writeNowWithConversion() unsupported formats: %d, %d",
683                      getFormat(), mDeviceFormat);
684                return AAUDIO_ERROR_UNEXPECTED_VALUE;
685            }
686
687            source += numBytes;
688            framesLeft -= framesToWrite;
689        } else {
690            break;
691        }
692        partIndex++;
693    }
694    int32_t framesWritten = numFrames - framesLeft;
695    mAudioEndpoint.advanceWriteIndex(framesWritten);
696
697    if (framesWritten > 0) {
698        incrementFramesWritten(framesWritten);
699    }
700    // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten);
701    return framesWritten;
702}
703
704void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
705    mClockModel.processTimestamp( position, time);
706}
707
708aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
709    int32_t actualFrames = 0;
710    // Round to the next highest burst size.
711    if (getFramesPerBurst() > 0) {
712        int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
713        requestedFrames = numBursts * getFramesPerBurst();
714    }
715
716    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
717    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
718             getLocationName(), requestedFrames, actualFrames);
719    if (result < 0) {
720        return result;
721    } else {
722        return (aaudio_result_t) actualFrames;
723    }
724}
725
726int32_t AudioStreamInternal::getBufferSize() const
727{
728    return mAudioEndpoint.getBufferSizeInFrames();
729}
730
731int32_t AudioStreamInternal::getBufferCapacity() const
732{
733    return mAudioEndpoint.getBufferCapacityInFrames();
734}
735
736int32_t AudioStreamInternal::getFramesPerBurst() const
737{
738    return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst;
739}
740
741int64_t AudioStreamInternal::getFramesRead()
742{
743    int64_t framesRead =
744            mClockModel.convertTimeToPosition(AudioClock::getNanoseconds())
745            + mFramesOffsetFromService;
746    // Prevent retrograde motion.
747    if (framesRead < mLastFramesRead) {
748        framesRead = mLastFramesRead;
749    } else {
750        mLastFramesRead = framesRead;
751    }
752    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead);
753    return framesRead;
754}
755
756// TODO implement getTimestamp
757