AudioStreamInternal.cpp revision 17fff38dd9d467bc5fb6cd5b9a6b183951c7750d
1/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "AAudio"
18//#define LOG_NDEBUG 0
19#include <utils/Log.h>
20
21#define ATRACE_TAG ATRACE_TAG_AUDIO
22
23#include <stdint.h>
24#include <assert.h>
25
26#include <binder/IServiceManager.h>
27
28#include <aaudio/AAudio.h>
29#include <utils/String16.h>
30#include <utils/Trace.h>
31
32#include "AudioClock.h"
33#include "AudioEndpointParcelable.h"
34#include "binding/AAudioStreamRequest.h"
35#include "binding/AAudioStreamConfiguration.h"
36#include "binding/IAAudioService.h"
37#include "binding/AAudioServiceMessage.h"
38#include "core/AudioStreamBuilder.h"
39#include "fifo/FifoBuffer.h"
40#include "utility/LinearRamp.h"
41
42#include "AudioStreamInternal.h"
43
44using android::String16;
45using android::Mutex;
46using android::WrappingBuffer;
47
48using namespace aaudio;
49
50#define MIN_TIMEOUT_NANOS        (1000 * AAUDIO_NANOS_PER_MILLISECOND)
51
52// Wait at least this many times longer than the operation should take.
53#define MIN_TIMEOUT_OPERATIONS    4
54
55//static int64_t s_logCounter = 0;
56//#define MYLOG_CONDITION   (mInService == true && s_logCounter++ < 500)
57//#define MYLOG_CONDITION   (s_logCounter++ < 500000)
58#define MYLOG_CONDITION   (1)
59
60#define LOG_TIMESTAMPS   0
61
62AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface  &serviceInterface, bool inService)
63        : AudioStream()
64        , mClockModel()
65        , mAudioEndpoint()
66        , mServiceStreamHandle(AAUDIO_HANDLE_INVALID)
67        , mFramesPerBurst(16)
68        , mServiceInterface(serviceInterface)
69        , mInService(inService) {
70}
71
72AudioStreamInternal::~AudioStreamInternal() {
73}
74
75aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) {
76
77    aaudio_result_t result = AAUDIO_OK;
78    AAudioStreamRequest request;
79    AAudioStreamConfiguration configuration;
80
81    result = AudioStream::open(builder);
82    if (result < 0) {
83        return result;
84    }
85
86    // We have to do volume scaling. So we prefer FLOAT format.
87    if (getFormat() == AAUDIO_UNSPECIFIED) {
88        setFormat(AAUDIO_FORMAT_PCM_FLOAT);
89    }
90    // Request FLOAT for the shared mixer.
91    request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT);
92
93    // Build the request to send to the server.
94    request.setUserId(getuid());
95    request.setProcessId(getpid());
96    request.setDirection(getDirection());
97    request.setSharingModeMatchRequired(isSharingModeMatchRequired());
98
99    request.getConfiguration().setDeviceId(getDeviceId());
100    request.getConfiguration().setSampleRate(getSampleRate());
101    request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame());
102    request.getConfiguration().setSharingMode(getSharingMode());
103
104    request.getConfiguration().setBufferCapacity(builder.getBufferCapacity());
105
106    mServiceStreamHandle = mServiceInterface.openStream(request, configuration);
107    if (mServiceStreamHandle < 0) {
108        result = mServiceStreamHandle;
109        ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result);
110    } else {
111        result = configuration.validate();
112        if (result != AAUDIO_OK) {
113            close();
114            return result;
115        }
116        // Save results of the open.
117        setSampleRate(configuration.getSampleRate());
118        setSamplesPerFrame(configuration.getSamplesPerFrame());
119        setDeviceId(configuration.getDeviceId());
120
121        // Save device format so we can do format conversion and volume scaling together.
122        mDeviceFormat = configuration.getAudioFormat();
123
124        result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable);
125        if (result != AAUDIO_OK) {
126            ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d",
127                  getLocationName(), result);
128            mServiceInterface.closeStream(mServiceStreamHandle);
129            return result;
130        }
131
132        // resolve parcelable into a descriptor
133        result = mEndPointParcelable.resolve(&mEndpointDescriptor);
134        if (result != AAUDIO_OK) {
135            ALOGE("AudioStreamInternal.open(): resolve() returns %d", result);
136            mServiceInterface.closeStream(mServiceStreamHandle);
137            return result;
138        }
139
140        // Configure endpoint based on descriptor.
141        mAudioEndpoint.configure(&mEndpointDescriptor);
142
143        mFramesPerBurst = mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
144        int32_t capacity = mEndpointDescriptor.dataQueueDescriptor.capacityInFrames;
145
146        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d",
147                 getLocationName(), mFramesPerBurst, capacity);
148        // Validate result from server.
149        if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) {
150            ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst);
151            return AAUDIO_ERROR_OUT_OF_RANGE;
152        }
153        if (capacity < mFramesPerBurst || capacity > 32 * 1024) {
154            ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity);
155            return AAUDIO_ERROR_OUT_OF_RANGE;
156        }
157
158        mClockModel.setSampleRate(getSampleRate());
159        mClockModel.setFramesPerBurst(mFramesPerBurst);
160
161        if (getDataCallbackProc()) {
162            mCallbackFrames = builder.getFramesPerDataCallback();
163            if (mCallbackFrames > getBufferCapacity() / 2) {
164                ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d",
165                      mCallbackFrames, getBufferCapacity());
166                mServiceInterface.closeStream(mServiceStreamHandle);
167                return AAUDIO_ERROR_OUT_OF_RANGE;
168
169            } else if (mCallbackFrames < 0) {
170                ALOGE("AudioStreamInternal.open(): framesPerCallback negative");
171                mServiceInterface.closeStream(mServiceStreamHandle);
172                return AAUDIO_ERROR_OUT_OF_RANGE;
173
174            }
175            if (mCallbackFrames == AAUDIO_UNSPECIFIED) {
176                mCallbackFrames = mFramesPerBurst;
177            }
178
179            int32_t bytesPerFrame = getSamplesPerFrame()
180                                    * AAudioConvert_formatToSizeInBytes(getFormat());
181            int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame;
182            mCallbackBuffer = new uint8_t[callbackBufferSize];
183        }
184
185        setState(AAUDIO_STREAM_STATE_OPEN);
186    }
187    return result;
188}
189
190aaudio_result_t AudioStreamInternal::close() {
191    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X",
192             mServiceStreamHandle);
193    if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) {
194        // Don't close a stream while it is running.
195        aaudio_stream_state_t currentState = getState();
196        if (isActive()) {
197            requestStop();
198            aaudio_stream_state_t nextState;
199            int64_t timeoutNanoseconds = MIN_TIMEOUT_NANOS;
200            aaudio_result_t result = waitForStateChange(currentState, &nextState,
201                                                       timeoutNanoseconds);
202            if (result != AAUDIO_OK) {
203                ALOGE("AudioStreamInternal::close() waitForStateChange() returned %d %s",
204                result, AAudio_convertResultToText(result));
205            }
206        }
207        aaudio_handle_t serviceStreamHandle = mServiceStreamHandle;
208        mServiceStreamHandle = AAUDIO_HANDLE_INVALID;
209
210        mServiceInterface.closeStream(serviceStreamHandle);
211        delete[] mCallbackBuffer;
212        mCallbackBuffer = nullptr;
213        return mEndPointParcelable.close();
214    } else {
215        return AAUDIO_ERROR_INVALID_HANDLE;
216    }
217}
218
219
220static void *aaudio_callback_thread_proc(void *context)
221{
222    AudioStreamInternal *stream = (AudioStreamInternal *)context;
223    //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream);
224    if (stream != NULL) {
225        return stream->callbackLoop();
226    } else {
227        return NULL;
228    }
229}
230
231aaudio_result_t AudioStreamInternal::requestStart()
232{
233    int64_t startTime;
234    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()");
235    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
236        return AAUDIO_ERROR_INVALID_STATE;
237    }
238
239    startTime = AudioClock::getNanoseconds();
240    mClockModel.start(startTime);
241    setState(AAUDIO_STREAM_STATE_STARTING);
242    aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);;
243
244    if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) {
245        // Launch the callback loop thread.
246        int64_t periodNanos = mCallbackFrames
247                              * AAUDIO_NANOS_PER_SECOND
248                              / getSampleRate();
249        mCallbackEnabled.store(true);
250        result = createThread(periodNanos, aaudio_callback_thread_proc, this);
251    }
252    return result;
253}
254
255int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) {
256
257    // Wait for at least a second or some number of callbacks to join the thread.
258    int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS
259                                  * framesPerOperation
260                                  * AAUDIO_NANOS_PER_SECOND)
261                                  / getSampleRate();
262    if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds
263        timeoutNanoseconds = MIN_TIMEOUT_NANOS;
264    }
265    return timeoutNanoseconds;
266}
267
268int64_t AudioStreamInternal::calculateReasonableTimeout() {
269    return calculateReasonableTimeout(getFramesPerBurst());
270}
271
272aaudio_result_t AudioStreamInternal::stopCallback()
273{
274    if (isDataCallbackActive()) {
275        mCallbackEnabled.store(false);
276        return joinThread(NULL);
277    } else {
278        return AAUDIO_OK;
279    }
280}
281
282aaudio_result_t AudioStreamInternal::requestPauseInternal()
283{
284    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
285        ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X",
286              mServiceStreamHandle);
287        return AAUDIO_ERROR_INVALID_STATE;
288    }
289
290    mClockModel.stop(AudioClock::getNanoseconds());
291    setState(AAUDIO_STREAM_STATE_PAUSING);
292    return mServiceInterface.pauseStream(mServiceStreamHandle);
293}
294
295aaudio_result_t AudioStreamInternal::requestPause()
296{
297    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName());
298    aaudio_result_t result = stopCallback();
299    if (result != AAUDIO_OK) {
300        return result;
301    }
302    result = requestPauseInternal();
303    ALOGD("AudioStreamInternal(): requestPause() returns %d", result);
304    return result;
305}
306
307aaudio_result_t AudioStreamInternal::requestFlush() {
308    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()");
309    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
310        ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X",
311              mServiceStreamHandle);
312        return AAUDIO_ERROR_INVALID_STATE;
313    }
314
315    setState(AAUDIO_STREAM_STATE_FLUSHING);
316    return mServiceInterface.flushStream(mServiceStreamHandle);
317}
318
319// TODO for Play only
320void AudioStreamInternal::onFlushFromServer() {
321    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()");
322    int64_t readCounter = mAudioEndpoint.getDataReadCounter();
323    int64_t writeCounter = mAudioEndpoint.getDataWriteCounter();
324
325    // Bump offset so caller does not see the retrograde motion in getFramesRead().
326    int64_t framesFlushed = writeCounter - readCounter;
327    mFramesOffsetFromService += framesFlushed;
328
329    // Flush written frames by forcing writeCounter to readCounter.
330    // This is because we cannot move the read counter in the hardware.
331    mAudioEndpoint.setDataWriteCounter(readCounter);
332}
333
334aaudio_result_t AudioStreamInternal::requestStopInternal()
335{
336    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
337        ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X",
338              mServiceStreamHandle);
339        return AAUDIO_ERROR_INVALID_STATE;
340    }
341
342    mClockModel.stop(AudioClock::getNanoseconds());
343    setState(AAUDIO_STREAM_STATE_STOPPING);
344    return mServiceInterface.stopStream(mServiceStreamHandle);
345}
346
347aaudio_result_t AudioStreamInternal::requestStop()
348{
349    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName());
350    aaudio_result_t result = stopCallback();
351    if (result != AAUDIO_OK) {
352        return result;
353    }
354    result = requestStopInternal();
355    ALOGD("AudioStreamInternal(): requestStop() returns %d", result);
356    return result;
357}
358
359aaudio_result_t AudioStreamInternal::registerThread() {
360    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
361        return AAUDIO_ERROR_INVALID_STATE;
362    }
363    return mServiceInterface.registerAudioThread(mServiceStreamHandle,
364                                              getpid(),
365                                              gettid(),
366                                              getPeriodNanoseconds());
367}
368
369aaudio_result_t AudioStreamInternal::unregisterThread() {
370    if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) {
371        return AAUDIO_ERROR_INVALID_STATE;
372    }
373    return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid());
374}
375
376aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId,
377                           int64_t *framePosition,
378                           int64_t *timeNanoseconds) {
379    // TODO Generate in server and pass to client. Return latest.
380    int64_t time = AudioClock::getNanoseconds();
381    *framePosition = mClockModel.convertTimeToPosition(time);
382    // TODO Get a more accurate timestamp from the service. This code just adds a fudge factor.
383    *timeNanoseconds = time + (6 * AAUDIO_NANOS_PER_MILLISECOND);
384    return AAUDIO_OK;
385}
386
387aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() {
388    if (isDataCallbackActive()) {
389        return AAUDIO_OK; // state is getting updated by the callback thread read/write call
390    }
391    return processCommands();
392}
393
394#if LOG_TIMESTAMPS
395static void AudioStreamInternal_logTimestamp(AAudioServiceMessage &command) {
396    static int64_t oldPosition = 0;
397    static int64_t oldTime = 0;
398    int64_t framePosition = command.timestamp.position;
399    int64_t nanoTime = command.timestamp.timestamp;
400    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu",
401         (long long) framePosition,
402         (long long) nanoTime);
403    int64_t nanosDelta = nanoTime - oldTime;
404    if (nanosDelta > 0 && oldTime > 0) {
405        int64_t framesDelta = framePosition - oldPosition;
406        int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta;
407        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta);
408        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta);
409        ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate);
410    }
411    oldPosition = framePosition;
412    oldTime = nanoTime;
413}
414#endif
415
416aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) {
417#if LOG_TIMESTAMPS
418    AudioStreamInternal_logTimestamp(*message);
419#endif
420    processTimestamp(message->timestamp.position, message->timestamp.timestamp);
421    return AAUDIO_OK;
422}
423
424aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) {
425    aaudio_result_t result = AAUDIO_OK;
426    ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event);
427    switch (message->event.event) {
428        case AAUDIO_SERVICE_EVENT_STARTED:
429            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED");
430            if (getState() == AAUDIO_STREAM_STATE_STARTING) {
431                setState(AAUDIO_STREAM_STATE_STARTED);
432            }
433            break;
434        case AAUDIO_SERVICE_EVENT_PAUSED:
435            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED");
436            if (getState() == AAUDIO_STREAM_STATE_PAUSING) {
437                setState(AAUDIO_STREAM_STATE_PAUSED);
438            }
439            break;
440        case AAUDIO_SERVICE_EVENT_STOPPED:
441            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED");
442            if (getState() == AAUDIO_STREAM_STATE_STOPPING) {
443                setState(AAUDIO_STREAM_STATE_STOPPED);
444            }
445            break;
446        case AAUDIO_SERVICE_EVENT_FLUSHED:
447            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED");
448            if (getState() == AAUDIO_STREAM_STATE_FLUSHING) {
449                setState(AAUDIO_STREAM_STATE_FLUSHED);
450                onFlushFromServer();
451            }
452            break;
453        case AAUDIO_SERVICE_EVENT_CLOSED:
454            ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED");
455            setState(AAUDIO_STREAM_STATE_CLOSED);
456            break;
457        case AAUDIO_SERVICE_EVENT_DISCONNECTED:
458            result = AAUDIO_ERROR_DISCONNECTED;
459            setState(AAUDIO_STREAM_STATE_DISCONNECTED);
460            ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED");
461            break;
462        case AAUDIO_SERVICE_EVENT_VOLUME:
463            mVolumeRamp.setTarget((float) message->event.dataDouble);
464            ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f",
465                     message->event.dataDouble);
466            break;
467        default:
468            ALOGW("WARNING - processCommands() Unrecognized event = %d",
469                 (int) message->event.event);
470            break;
471    }
472    return result;
473}
474
475// Process all the commands coming from the server.
476aaudio_result_t AudioStreamInternal::processCommands() {
477    aaudio_result_t result = AAUDIO_OK;
478
479    while (result == AAUDIO_OK) {
480        //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result);
481        AAudioServiceMessage message;
482        if (mAudioEndpoint.readUpCommand(&message) != 1) {
483            break; // no command this time, no problem
484        }
485        switch (message.what) {
486        case AAudioServiceMessage::code::TIMESTAMP:
487            result = onTimestampFromServer(&message);
488            break;
489
490        case AAudioServiceMessage::code::EVENT:
491            result = onEventFromServer(&message);
492            break;
493
494        default:
495            ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d",
496                 (int) message.what);
497            result = AAUDIO_ERROR_INTERNAL;
498            break;
499        }
500    }
501    return result;
502}
503
504// Read or write the data, block if needed and timeoutMillis > 0
505aaudio_result_t AudioStreamInternal::processData(void *buffer, int32_t numFrames,
506                                                 int64_t timeoutNanoseconds)
507{
508    const char * traceName = (mInService) ? "aaWrtS" : "aaWrtC";
509    ATRACE_BEGIN(traceName);
510    aaudio_result_t result = AAUDIO_OK;
511    int32_t loopCount = 0;
512    uint8_t* audioData = (uint8_t*)buffer;
513    int64_t currentTimeNanos = AudioClock::getNanoseconds();
514    int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds;
515    int32_t framesLeft = numFrames;
516
517    int32_t fullFrames = mAudioEndpoint.getFullFramesAvailable();
518    if (ATRACE_ENABLED()) {
519        const char * traceName = (mInService) ? "aaFullS" : "aaFullC";
520        ATRACE_INT(traceName, fullFrames);
521    }
522
523    // Loop until all the data has been processed or until a timeout occurs.
524    while (framesLeft > 0) {
525        // The call to processDataNow() will not block. It will just read as much as it can.
526        int64_t wakeTimeNanos = 0;
527        aaudio_result_t framesProcessed = processDataNow(audioData, framesLeft,
528                                                  currentTimeNanos, &wakeTimeNanos);
529        if (framesProcessed < 0) {
530            ALOGE("AudioStreamInternal::processData() loop: framesProcessed = %d", framesProcessed);
531            result = framesProcessed;
532            break;
533        }
534        framesLeft -= (int32_t) framesProcessed;
535        audioData += framesProcessed * getBytesPerFrame();
536
537        // Should we block?
538        if (timeoutNanoseconds == 0) {
539            break; // don't block
540        } else if (framesLeft > 0) {
541            // clip the wake time to something reasonable
542            if (wakeTimeNanos < currentTimeNanos) {
543                wakeTimeNanos = currentTimeNanos;
544            }
545            if (wakeTimeNanos > deadlineNanos) {
546                // If we time out, just return the framesWritten so far.
547                ALOGE("AudioStreamInternal::processData(): timed out after %lld nanos",
548                      (long long) timeoutNanoseconds);
549                ALOGE("AudioStreamInternal::processData(): wakeTime = %lld, deadline = %lld nanos",
550                      (long long) wakeTimeNanos, (long long) deadlineNanos);
551                ALOGE("AudioStreamInternal::processData(): past deadline by %d micros",
552                      (int)((wakeTimeNanos - deadlineNanos) / AAUDIO_NANOS_PER_MICROSECOND));
553
554                break;
555            }
556
557            int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos;
558            //ALOGE("AudioStreamInternal::processData(): sleep for %d micros",
559            //      (int)(sleepForNanos / AAUDIO_NANOS_PER_MICROSECOND));
560            AudioClock::sleepForNanos(sleepForNanos);
561            currentTimeNanos = AudioClock::getNanoseconds();
562        }
563    }
564
565    // return error or framesProcessed
566    (void) loopCount;
567    ATRACE_END();
568    return (result < 0) ? result : numFrames - framesLeft;
569}
570
571void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) {
572    mClockModel.processTimestamp(position, time);
573}
574
575aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) {
576    int32_t actualFrames = 0;
577    // Round to the next highest burst size.
578    if (getFramesPerBurst() > 0) {
579        int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst();
580        requestedFrames = numBursts * getFramesPerBurst();
581    }
582
583    aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames);
584    ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d",
585             getLocationName(), requestedFrames, actualFrames);
586    if (result < 0) {
587        return result;
588    } else {
589        return (aaudio_result_t) actualFrames;
590    }
591}
592
593int32_t AudioStreamInternal::getBufferSize() const {
594    return mAudioEndpoint.getBufferSizeInFrames();
595}
596
597int32_t AudioStreamInternal::getBufferCapacity() const {
598    return mAudioEndpoint.getBufferCapacityInFrames();
599}
600
601int32_t AudioStreamInternal::getFramesPerBurst() const {
602    return mEndpointDescriptor.dataQueueDescriptor.framesPerBurst;
603}
604
605aaudio_result_t AudioStreamInternal::joinThread(void** returnArg) {
606    return AudioStream::joinThread(returnArg, calculateReasonableTimeout(getFramesPerBurst()));
607}
608