AudioStreamInternal.cpp revision e4d7bb418df0fdc4c708c334ba3601f5ed8d89b3
1/* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "AAudio" 18//#define LOG_NDEBUG 0 19#include <utils/Log.h> 20 21#include <assert.h> 22 23#include <binder/IServiceManager.h> 24#include <utils/Mutex.h> 25 26#include <aaudio/AAudio.h> 27#include <utils/String16.h> 28 29#include "utility/AudioClock.h" 30#include "AudioStreamInternal.h" 31#include "binding/AAudioServiceMessage.h" 32 33#include "core/AudioStreamBuilder.h" 34 35#define LOG_TIMESTAMPS 0 36 37using android::String16; 38using android::IServiceManager; 39using android::defaultServiceManager; 40using android::interface_cast; 41using android::Mutex; 42 43using namespace aaudio; 44 45static android::Mutex gServiceLock; 46static sp<IAAudioService> gAAudioService; 47 48#define AAUDIO_SERVICE_NAME "AAudioService" 49 50#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND) 51 52// Wait at least this many times longer than the operation should take. 53#define MIN_TIMEOUT_OPERATIONS 4 54 55// Helper function to get access to the "AAudioService" service. 56// This code was modeled after frameworks/av/media/libaudioclient/AudioSystem.cpp 57static const sp<IAAudioService> getAAudioService() { 58 sp<IBinder> binder; 59 Mutex::Autolock _l(gServiceLock); 60 if (gAAudioService == 0) { 61 sp<IServiceManager> sm = defaultServiceManager(); 62 // Try several times to get the service. 63 int retries = 4; 64 do { 65 binder = sm->getService(String16(AAUDIO_SERVICE_NAME)); // This will wait a while. 66 if (binder != 0) { 67 break; 68 } 69 } while (retries-- > 0); 70 71 if (binder != 0) { 72 // TODO Add linkToDeath() like in frameworks/av/media/libaudioclient/AudioSystem.cpp 73 // TODO Create a DeathRecipient that disconnects all active streams. 74 gAAudioService = interface_cast<IAAudioService>(binder); 75 } else { 76 ALOGE("AudioStreamInternal could not get %s", AAUDIO_SERVICE_NAME); 77 } 78 } 79 return gAAudioService; 80} 81 82AudioStreamInternal::AudioStreamInternal() 83 : AudioStream() 84 , mClockModel() 85 , mAudioEndpoint() 86 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID) 87 , mFramesPerBurst(16) 88{ 89} 90 91AudioStreamInternal::~AudioStreamInternal() { 92} 93 94aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) { 95 96 const sp<IAAudioService>& service = getAAudioService(); 97 if (service == 0) return AAUDIO_ERROR_NO_SERVICE; 98 99 aaudio_result_t result = AAUDIO_OK; 100 AAudioStreamRequest request; 101 AAudioStreamConfiguration configuration; 102 103 result = AudioStream::open(builder); 104 if (result < 0) { 105 return result; 106 } 107 108 // Build the request to send to the server. 109 request.setUserId(getuid()); 110 request.setProcessId(getpid()); 111 request.getConfiguration().setDeviceId(getDeviceId()); 112 request.getConfiguration().setSampleRate(getSampleRate()); 113 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame()); 114 request.getConfiguration().setAudioFormat(getFormat()); 115 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity()); 116 request.dump(); 117 118 mServiceStreamHandle = service->openStream(request, configuration); 119 ALOGD("AudioStreamInternal.open(): openStream returned mServiceStreamHandle = 0x%08X", 120 (unsigned int)mServiceStreamHandle); 121 if (mServiceStreamHandle < 0) { 122 result = mServiceStreamHandle; 123 ALOGE("AudioStreamInternal.open(): acquireRealtimeStream aaudio_result_t = 0x%08X", result); 124 } else { 125 result = configuration.validate(); 126 if (result != AAUDIO_OK) { 127 close(); 128 return result; 129 } 130 // Save results of the open. 131 setSampleRate(configuration.getSampleRate()); 132 setSamplesPerFrame(configuration.getSamplesPerFrame()); 133 setFormat(configuration.getAudioFormat()); 134 135 aaudio::AudioEndpointParcelable parcelable; 136 result = service->getStreamDescription(mServiceStreamHandle, parcelable); 137 if (result != AAUDIO_OK) { 138 ALOGE("AudioStreamInternal.open(): getStreamDescriptor returns %d", result); 139 service->closeStream(mServiceStreamHandle); 140 return result; 141 } 142 // resolve parcelable into a descriptor 143 parcelable.resolve(&mEndpointDescriptor); 144 145 // Configure endpoint based on descriptor. 146 mAudioEndpoint.configure(&mEndpointDescriptor); 147 148 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst; 149 assert(mFramesPerBurst >= 16); 150 assert(mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames < 10 * 1024); 151 152 mClockModel.setSampleRate(getSampleRate()); 153 mClockModel.setFramesPerBurst(mFramesPerBurst); 154 155 if (getDataCallbackProc()) { 156 mCallbackFrames = builder.getFramesPerDataCallback(); 157 if (mCallbackFrames > getBufferCapacity() / 2) { 158 ALOGE("AudioStreamInternal.open(): framesPerCallback too large"); 159 service->closeStream(mServiceStreamHandle); 160 return AAUDIO_ERROR_OUT_OF_RANGE; 161 162 } else if (mCallbackFrames < 0) { 163 ALOGE("AudioStreamInternal.open(): framesPerCallback negative"); 164 service->closeStream(mServiceStreamHandle); 165 return AAUDIO_ERROR_OUT_OF_RANGE; 166 167 } 168 if (mCallbackFrames == AAUDIO_UNSPECIFIED) { 169 mCallbackFrames = mFramesPerBurst; 170 } 171 172 int32_t bytesPerFrame = getSamplesPerFrame() 173 * AAudioConvert_formatToSizeInBytes(getFormat()); 174 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame; 175 mCallbackBuffer = new uint8_t[callbackBufferSize]; 176 } 177 178 setState(AAUDIO_STREAM_STATE_OPEN); 179 } 180 return result; 181} 182 183aaudio_result_t AudioStreamInternal::close() { 184 ALOGD("AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", mServiceStreamHandle); 185 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) { 186 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle; 187 mServiceStreamHandle = AAUDIO_HANDLE_INVALID; 188 const sp<IAAudioService>& aaudioService = getAAudioService(); 189 if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE; 190 aaudioService->closeStream(serviceStreamHandle); 191 delete[] mCallbackBuffer; 192 return AAUDIO_OK; 193 } else { 194 return AAUDIO_ERROR_INVALID_HANDLE; 195 } 196} 197 198// Render audio in the application callback and then write the data to the stream. 199void *AudioStreamInternal::callbackLoop() { 200 aaudio_result_t result = AAUDIO_OK; 201 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE; 202 int32_t framesWritten = 0; 203 AAudioStream_dataCallback appCallback = getDataCallbackProc(); 204 if (appCallback == nullptr) return NULL; 205 206 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) { // result might be a frame count 207 // Call application using the AAudio callback interface. 208 callbackResult = (*appCallback)( 209 (AAudioStream *) this, 210 getDataCallbackUserData(), 211 mCallbackBuffer, 212 mCallbackFrames); 213 214 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) { 215 // Write audio data to stream 216 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames); 217 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos); 218 if (result == AAUDIO_ERROR_DISCONNECTED) { 219 if (getErrorCallbackProc() != nullptr) { 220 ALOGD("AudioStreamAAudio(): callbackLoop() stream disconnected"); 221 (*getErrorCallbackProc())( 222 (AAudioStream *) this, 223 getErrorCallbackUserData(), 224 AAUDIO_OK); 225 } 226 break; 227 } else if (result != mCallbackFrames) { 228 ALOGE("AudioStreamAAudio(): callbackLoop() wrote %d / %d", 229 framesWritten, mCallbackFrames); 230 break; 231 } 232 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) { 233 ALOGD("AudioStreamAAudio(): callback returned AAUDIO_CALLBACK_RESULT_STOP"); 234 break; 235 } 236 } 237 238 ALOGD("AudioStreamAAudio(): callbackLoop() exiting, result = %d, isPlaying() = %d", 239 result, (int) isPlaying()); 240 return NULL; // TODO review 241} 242 243static void *aaudio_callback_thread_proc(void *context) 244{ 245 AudioStreamInternal *stream = (AudioStreamInternal *)context; 246 //LOGD("AudioStreamAAudio(): oboe_callback_thread, stream = %p", stream); 247 if (stream != NULL) { 248 return stream->callbackLoop(); 249 } else { 250 return NULL; 251 } 252} 253 254aaudio_result_t AudioStreamInternal::requestStart() 255{ 256 int64_t startTime; 257 ALOGD("AudioStreamInternal(): start()"); 258 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 259 return AAUDIO_ERROR_INVALID_STATE; 260 } 261 const sp<IAAudioService>& aaudioService = getAAudioService(); 262 if (aaudioService == 0) { 263 return AAUDIO_ERROR_NO_SERVICE; 264 } 265 startTime = AudioClock::getNanoseconds(); 266 mClockModel.start(startTime); 267 processTimestamp(0, startTime); 268 setState(AAUDIO_STREAM_STATE_STARTING); 269 aaudio_result_t result = aaudioService->startStream(mServiceStreamHandle); 270 271 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) { 272 // Launch the callback loop thread. 273 int64_t periodNanos = mCallbackFrames 274 * AAUDIO_NANOS_PER_SECOND 275 / getSampleRate(); 276 mCallbackEnabled.store(true); 277 result = createThread(periodNanos, aaudio_callback_thread_proc, this); 278 } 279 return result; 280} 281 282int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) { 283 284 // Wait for at least a second or some number of callbacks to join the thread. 285 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS * framesPerOperation * AAUDIO_NANOS_PER_SECOND) 286 / getSampleRate(); 287 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds 288 timeoutNanoseconds = MIN_TIMEOUT_NANOS; 289 } 290 return timeoutNanoseconds; 291} 292 293aaudio_result_t AudioStreamInternal::stopCallback() 294{ 295 if (isDataCallbackActive()) { 296 mCallbackEnabled.store(false); 297 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames)); 298 } else { 299 return AAUDIO_OK; 300 } 301} 302 303aaudio_result_t AudioStreamInternal::requestPauseInternal() 304{ 305 ALOGD("AudioStreamInternal(): pause()"); 306 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 307 return AAUDIO_ERROR_INVALID_STATE; 308 } 309 const sp<IAAudioService>& aaudioService = getAAudioService(); 310 if (aaudioService == 0) { 311 return AAUDIO_ERROR_NO_SERVICE; 312 } 313 mClockModel.stop(AudioClock::getNanoseconds()); 314 setState(AAUDIO_STREAM_STATE_PAUSING); 315 return aaudioService->pauseStream(mServiceStreamHandle); 316} 317 318aaudio_result_t AudioStreamInternal::requestPause() 319{ 320 aaudio_result_t result = stopCallback(); 321 if (result != AAUDIO_OK) { 322 return result; 323 } 324 return requestPauseInternal(); 325} 326 327aaudio_result_t AudioStreamInternal::requestFlush() { 328 ALOGD("AudioStreamInternal(): flush()"); 329 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 330 return AAUDIO_ERROR_INVALID_STATE; 331 } 332 const sp<IAAudioService>& aaudioService = getAAudioService(); 333 if (aaudioService == 0) { 334 return AAUDIO_ERROR_NO_SERVICE; 335 } 336 setState(AAUDIO_STREAM_STATE_FLUSHING); 337 return aaudioService->flushStream(mServiceStreamHandle); 338} 339 340void AudioStreamInternal::onFlushFromServer() { 341 ALOGD("AudioStreamInternal(): onFlushFromServer()"); 342 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter(); 343 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter(); 344 // Bump offset so caller does not see the retrograde motion in getFramesRead(). 345 int64_t framesFlushed = writeCounter - readCounter; 346 mFramesOffsetFromService += framesFlushed; 347 // Flush written frames by forcing writeCounter to readCounter. 348 // This is because we cannot move the read counter in the hardware. 349 mAudioEndpoint.setDownDataWriteCounter(readCounter); 350} 351 352aaudio_result_t AudioStreamInternal::requestStop() 353{ 354 // TODO better implementation of requestStop() 355 aaudio_result_t result = requestPause(); 356 if (result == AAUDIO_OK) { 357 aaudio_stream_state_t state; 358 result = waitForStateChange(AAUDIO_STREAM_STATE_PAUSING, 359 &state, 360 500 * AAUDIO_NANOS_PER_MILLISECOND);// TODO temporary code 361 if (result == AAUDIO_OK) { 362 result = requestFlush(); 363 } 364 } 365 return result; 366} 367 368aaudio_result_t AudioStreamInternal::registerThread() { 369 ALOGD("AudioStreamInternal(): registerThread()"); 370 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 371 return AAUDIO_ERROR_INVALID_STATE; 372 } 373 const sp<IAAudioService>& aaudioService = getAAudioService(); 374 if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE; 375 return aaudioService->registerAudioThread(mServiceStreamHandle, 376 gettid(), 377 getPeriodNanoseconds()); 378} 379 380aaudio_result_t AudioStreamInternal::unregisterThread() { 381 ALOGD("AudioStreamInternal(): unregisterThread()"); 382 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 383 return AAUDIO_ERROR_INVALID_STATE; 384 } 385 const sp<IAAudioService>& aaudioService = getAAudioService(); 386 if (aaudioService == 0) return AAUDIO_ERROR_NO_SERVICE; 387 return aaudioService->unregisterAudioThread(mServiceStreamHandle, gettid()); 388} 389 390aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId, 391 int64_t *framePosition, 392 int64_t *timeNanoseconds) { 393 // TODO implement using real HAL 394 int64_t time = AudioClock::getNanoseconds(); 395 *framePosition = mClockModel.convertTimeToPosition(time); 396 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay 397 return AAUDIO_OK; 398} 399 400aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() { 401 if (isDataCallbackActive()) { 402 return AAUDIO_OK; // state is getting updated by the callback thread read/write call 403 } 404 return processCommands(); 405} 406 407#if LOG_TIMESTAMPS 408static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) { 409 static int64_t oldPosition = 0; 410 static int64_t oldTime = 0; 411 int64_t framePosition = command.timestamp.position; 412 int64_t nanoTime = command.timestamp.timestamp; 413 ALOGD("AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu", 414 (long long) framePosition, 415 (long long) nanoTime); 416 int64_t nanosDelta = nanoTime - oldTime; 417 if (nanosDelta > 0 && oldTime > 0) { 418 int64_t framesDelta = framePosition - oldPosition; 419 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta; 420 ALOGD("AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta); 421 ALOGD("AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta); 422 ALOGD("AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate); 423 } 424 oldPosition = framePosition; 425 oldTime = nanoTime; 426} 427#endif 428 429aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) { 430 int64_t framePosition = 0; 431#if LOG_TIMESTAMPS 432 AudioStreamInternal_LogTimestamp(command); 433#endif 434 framePosition = message->timestamp.position; 435 processTimestamp(framePosition, message->timestamp.timestamp); 436 return AAUDIO_OK; 437} 438 439aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) { 440 aaudio_result_t result = AAUDIO_OK; 441 ALOGD("processCommands() got event %d", message->event.event); 442 switch (message->event.event) { 443 case AAUDIO_SERVICE_EVENT_STARTED: 444 ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_STARTED"); 445 setState(AAUDIO_STREAM_STATE_STARTED); 446 break; 447 case AAUDIO_SERVICE_EVENT_PAUSED: 448 ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_PAUSED"); 449 setState(AAUDIO_STREAM_STATE_PAUSED); 450 break; 451 case AAUDIO_SERVICE_EVENT_FLUSHED: 452 ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED"); 453 setState(AAUDIO_STREAM_STATE_FLUSHED); 454 onFlushFromServer(); 455 break; 456 case AAUDIO_SERVICE_EVENT_CLOSED: 457 ALOGD("processCommands() got AAUDIO_SERVICE_EVENT_CLOSED"); 458 setState(AAUDIO_STREAM_STATE_CLOSED); 459 break; 460 case AAUDIO_SERVICE_EVENT_DISCONNECTED: 461 result = AAUDIO_ERROR_DISCONNECTED; 462 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED"); 463 break; 464 default: 465 ALOGW("WARNING - processCommands() Unrecognized event = %d", 466 (int) message->event.event); 467 break; 468 } 469 return result; 470} 471 472// Process all the commands coming from the server. 473aaudio_result_t AudioStreamInternal::processCommands() { 474 aaudio_result_t result = AAUDIO_OK; 475 476 while (result == AAUDIO_OK) { 477 AAudioServiceMessage message; 478 if (mAudioEndpoint.readUpCommand(&message) != 1) { 479 break; // no command this time, no problem 480 } 481 switch (message.what) { 482 case AAudioServiceMessage::code::TIMESTAMP: 483 result = onTimestampFromServer(&message); 484 break; 485 486 case AAudioServiceMessage::code::EVENT: 487 result = onEventFromServer(&message); 488 break; 489 490 default: 491 ALOGW("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d", 492 (int) message.what); 493 result = AAUDIO_ERROR_UNEXPECTED_VALUE; 494 break; 495 } 496 } 497 return result; 498} 499 500// Write the data, block if needed and timeoutMillis > 0 501aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames, 502 int64_t timeoutNanoseconds) 503{ 504 aaudio_result_t result = AAUDIO_OK; 505 uint8_t* source = (uint8_t*)buffer; 506 int64_t currentTimeNanos = AudioClock::getNanoseconds(); 507 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds; 508 int32_t framesLeft = numFrames; 509// ALOGD("AudioStreamInternal::write(%p, %d) at time %08llu , mState = %d ------------------", 510// buffer, numFrames, (unsigned long long) currentTimeNanos, mState); 511 512 // Write until all the data has been written or until a timeout occurs. 513 while (framesLeft > 0) { 514 // The call to writeNow() will not block. It will just write as much as it can. 515 int64_t wakeTimeNanos = 0; 516 aaudio_result_t framesWritten = writeNow(source, framesLeft, 517 currentTimeNanos, &wakeTimeNanos); 518// ALOGD("AudioStreamInternal::write() writeNow() framesLeft = %d --> framesWritten = %d", framesLeft, framesWritten); 519 if (framesWritten < 0) { 520 result = framesWritten; 521 break; 522 } 523 framesLeft -= (int32_t) framesWritten; 524 source += framesWritten * getBytesPerFrame(); 525 526 // Should we block? 527 if (timeoutNanoseconds == 0) { 528 break; // don't block 529 } else if (framesLeft > 0) { 530 //ALOGD("AudioStreamInternal:: original wakeTimeNanos %lld", (long long) wakeTimeNanos); 531 // clip the wake time to something reasonable 532 if (wakeTimeNanos < currentTimeNanos) { 533 wakeTimeNanos = currentTimeNanos; 534 } 535 if (wakeTimeNanos > deadlineNanos) { 536 // If we time out, just return the framesWritten so far. 537 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", (long long) timeoutNanoseconds); 538 break; 539 } 540 541 //ALOGD("AudioStreamInternal:: sleep until %lld, dur = %lld", (long long) wakeTimeNanos, 542 // (long long) (wakeTimeNanos - currentTimeNanos)); 543 AudioClock::sleepForNanos(wakeTimeNanos - currentTimeNanos); 544 currentTimeNanos = AudioClock::getNanoseconds(); 545 } 546 } 547 548 // return error or framesWritten 549 return (result < 0) ? result : numFrames - framesLeft; 550} 551 552// Write as much data as we can without blocking. 553aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames, 554 int64_t currentNanoTime, int64_t *wakeTimePtr) { 555 { 556 aaudio_result_t result = processCommands(); 557 if (result != AAUDIO_OK) { 558 return result; 559 } 560 } 561 562 if (mAudioEndpoint.isOutputFreeRunning()) { 563 // Update data queue based on the timing model. 564 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime); 565 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter); 566 // If the read index passed the write index then consider it an underrun. 567 if (mAudioEndpoint.getFullFramesAvailable() < 0) { 568 mXRunCount++; 569 } 570 } 571 // TODO else query from endpoint cuz set by actual reader, maybe 572 573 // Write some data to the buffer. 574 int32_t framesWritten = mAudioEndpoint.writeDataNow(buffer, numFrames); 575 if (framesWritten > 0) { 576 incrementFramesWritten(framesWritten); 577 } 578 //ALOGD("AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d", 579 // numFrames, framesWritten); 580 581 // Calculate an ideal time to wake up. 582 if (wakeTimePtr != nullptr && framesWritten >= 0) { 583 // By default wake up a few milliseconds from now. // TODO review 584 int64_t wakeTime = currentNanoTime + (2 * AAUDIO_NANOS_PER_MILLISECOND); 585 switch (getState()) { 586 case AAUDIO_STREAM_STATE_OPEN: 587 case AAUDIO_STREAM_STATE_STARTING: 588 if (framesWritten != 0) { 589 // Don't wait to write more data. Just prime the buffer. 590 wakeTime = currentNanoTime; 591 } 592 break; 593 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur? 594 { 595 uint32_t burstSize = mFramesPerBurst; 596 if (burstSize < 32) { 597 burstSize = 32; // TODO review 598 } 599 600 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize; 601 wakeTime = mClockModel.convertPositionToTime(nextReadPosition); 602 } 603 break; 604 default: 605 break; 606 } 607 *wakeTimePtr = wakeTime; 608 609 } 610// ALOGD("AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu", 611// (unsigned long long)currentNanoTime, 612// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(), 613// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter()); 614 return framesWritten; 615} 616 617void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) { 618 mClockModel.processTimestamp( position, time); 619} 620 621aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) { 622 int32_t actualFrames = 0; 623 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames); 624 if (result < 0) { 625 return result; 626 } else { 627 return (aaudio_result_t) actualFrames; 628 } 629} 630 631int32_t AudioStreamInternal::getBufferSize() const 632{ 633 return mAudioEndpoint.getBufferSizeInFrames(); 634} 635 636int32_t AudioStreamInternal::getBufferCapacity() const 637{ 638 return mAudioEndpoint.getBufferCapacityInFrames(); 639} 640 641int32_t AudioStreamInternal::getFramesPerBurst() const 642{ 643 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst; 644} 645 646int64_t AudioStreamInternal::getFramesRead() 647{ 648 int64_t framesRead = 649 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds()) 650 + mFramesOffsetFromService; 651 // Prevent retrograde motion. 652 if (framesRead < mLastFramesRead) { 653 framesRead = mLastFramesRead; 654 } else { 655 mLastFramesRead = framesRead; 656 } 657 ALOGD("AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead); 658 return framesRead; 659} 660 661// TODO implement getTimestamp 662