AudioStreamInternal.cpp revision 4c5129b410884ec0400cbe65fce56d0ade12d11b
1/* 2 * Copyright (C) 2016 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "AAudio" 18//#define LOG_NDEBUG 0 19#include <utils/Log.h> 20 21#include <stdint.h> 22#include <assert.h> 23 24#include <binder/IServiceManager.h> 25 26#include <aaudio/AAudio.h> 27#include <utils/String16.h> 28 29#include "AudioClock.h" 30#include "AudioEndpointParcelable.h" 31#include "binding/AAudioStreamRequest.h" 32#include "binding/AAudioStreamConfiguration.h" 33#include "binding/IAAudioService.h" 34#include "binding/AAudioServiceMessage.h" 35#include "core/AudioStreamBuilder.h" 36#include "fifo/FifoBuffer.h" 37#include "utility/LinearRamp.h" 38 39#include "AudioStreamInternal.h" 40 41#define LOG_TIMESTAMPS 0 42 43using android::String16; 44using android::Mutex; 45using android::WrappingBuffer; 46 47using namespace aaudio; 48 49#define MIN_TIMEOUT_NANOS (1000 * AAUDIO_NANOS_PER_MILLISECOND) 50 51// Wait at least this many times longer than the operation should take. 52#define MIN_TIMEOUT_OPERATIONS 4 53 54//static int64_t s_logCounter = 0; 55//#define MYLOG_CONDITION (mInService == true && s_logCounter++ < 500) 56//#define MYLOG_CONDITION (s_logCounter++ < 500000) 57#define MYLOG_CONDITION (1) 58 59AudioStreamInternal::AudioStreamInternal(AAudioServiceInterface &serviceInterface, bool inService) 60 : AudioStream() 61 , mClockModel() 62 , mAudioEndpoint() 63 , mServiceStreamHandle(AAUDIO_HANDLE_INVALID) 64 , mFramesPerBurst(16) 65 , mServiceInterface(serviceInterface) 66 , mInService(inService) { 67} 68 69AudioStreamInternal::~AudioStreamInternal() { 70} 71 72aaudio_result_t AudioStreamInternal::open(const AudioStreamBuilder &builder) { 73 74 aaudio_result_t result = AAUDIO_OK; 75 AAudioStreamRequest request; 76 AAudioStreamConfiguration configuration; 77 78 result = AudioStream::open(builder); 79 if (result < 0) { 80 return result; 81 } 82 83 // We have to do volume scaling. So we prefer FLOAT format. 84 if (getFormat() == AAUDIO_UNSPECIFIED) { 85 setFormat(AAUDIO_FORMAT_PCM_FLOAT); 86 } 87 // Request FLOAT for the shared mixer. 88 request.getConfiguration().setAudioFormat(AAUDIO_FORMAT_PCM_FLOAT); 89 90 // Build the request to send to the server. 91 request.setUserId(getuid()); 92 request.setProcessId(getpid()); 93 request.setDirection(getDirection()); 94 request.setSharingModeMatchRequired(isSharingModeMatchRequired()); 95 96 request.getConfiguration().setDeviceId(getDeviceId()); 97 request.getConfiguration().setSampleRate(getSampleRate()); 98 request.getConfiguration().setSamplesPerFrame(getSamplesPerFrame()); 99 request.getConfiguration().setSharingMode(getSharingMode()); 100 101 request.getConfiguration().setBufferCapacity(builder.getBufferCapacity()); 102 103 mServiceStreamHandle = mServiceInterface.openStream(request, configuration); 104 if (mServiceStreamHandle < 0) { 105 result = mServiceStreamHandle; 106 ALOGE("AudioStreamInternal.open(): %s openStream() returned %d", getLocationName(), result); 107 } else { 108 result = configuration.validate(); 109 if (result != AAUDIO_OK) { 110 close(); 111 return result; 112 } 113 // Save results of the open. 114 setSampleRate(configuration.getSampleRate()); 115 setSamplesPerFrame(configuration.getSamplesPerFrame()); 116 setDeviceId(configuration.getDeviceId()); 117 118 // Save device format so we can do format conversion and volume scaling together. 119 mDeviceFormat = configuration.getAudioFormat(); 120 121 result = mServiceInterface.getStreamDescription(mServiceStreamHandle, mEndPointParcelable); 122 if (result != AAUDIO_OK) { 123 ALOGE("AudioStreamInternal.open(): %s getStreamDescriptor returns %d", 124 getLocationName(), result); 125 mServiceInterface.closeStream(mServiceStreamHandle); 126 return result; 127 } 128 129 // resolve parcelable into a descriptor 130 result = mEndPointParcelable.resolve(&mEndpointDescriptor); 131 if (result != AAUDIO_OK) { 132 ALOGE("AudioStreamInternal.open(): resolve() returns %d", result); 133 mServiceInterface.closeStream(mServiceStreamHandle); 134 return result; 135 } 136 137 // Configure endpoint based on descriptor. 138 mAudioEndpoint.configure(&mEndpointDescriptor); 139 140 mFramesPerBurst = mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst; 141 int32_t capacity = mEndpointDescriptor.downDataQueueDescriptor.capacityInFrames; 142 143 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.open() %s framesPerBurst = %d, capacity = %d", 144 getLocationName(), mFramesPerBurst, capacity); 145 // Validate result from server. 146 if (mFramesPerBurst < 16 || mFramesPerBurst > 16 * 1024) { 147 ALOGE("AudioStream::open(): framesPerBurst out of range = %d", mFramesPerBurst); 148 return AAUDIO_ERROR_OUT_OF_RANGE; 149 } 150 if (capacity < mFramesPerBurst || capacity > 32 * 1024) { 151 ALOGE("AudioStream::open(): bufferCapacity out of range = %d", capacity); 152 return AAUDIO_ERROR_OUT_OF_RANGE; 153 } 154 155 mClockModel.setSampleRate(getSampleRate()); 156 mClockModel.setFramesPerBurst(mFramesPerBurst); 157 158 if (getDataCallbackProc()) { 159 mCallbackFrames = builder.getFramesPerDataCallback(); 160 if (mCallbackFrames > getBufferCapacity() / 2) { 161 ALOGE("AudioStreamInternal.open(): framesPerCallback too large = %d, capacity = %d", 162 mCallbackFrames, getBufferCapacity()); 163 mServiceInterface.closeStream(mServiceStreamHandle); 164 return AAUDIO_ERROR_OUT_OF_RANGE; 165 166 } else if (mCallbackFrames < 0) { 167 ALOGE("AudioStreamInternal.open(): framesPerCallback negative"); 168 mServiceInterface.closeStream(mServiceStreamHandle); 169 return AAUDIO_ERROR_OUT_OF_RANGE; 170 171 } 172 if (mCallbackFrames == AAUDIO_UNSPECIFIED) { 173 mCallbackFrames = mFramesPerBurst; 174 } 175 176 int32_t bytesPerFrame = getSamplesPerFrame() 177 * AAudioConvert_formatToSizeInBytes(getFormat()); 178 int32_t callbackBufferSize = mCallbackFrames * bytesPerFrame; 179 mCallbackBuffer = new uint8_t[callbackBufferSize]; 180 } 181 182 setState(AAUDIO_STREAM_STATE_OPEN); 183 } 184 return result; 185} 186 187aaudio_result_t AudioStreamInternal::close() { 188 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal.close(): mServiceStreamHandle = 0x%08X", 189 mServiceStreamHandle); 190 if (mServiceStreamHandle != AAUDIO_HANDLE_INVALID) { 191 aaudio_handle_t serviceStreamHandle = mServiceStreamHandle; 192 mServiceStreamHandle = AAUDIO_HANDLE_INVALID; 193 194 mServiceInterface.closeStream(serviceStreamHandle); 195 delete[] mCallbackBuffer; 196 return mEndPointParcelable.close(); 197 } else { 198 return AAUDIO_ERROR_INVALID_HANDLE; 199 } 200} 201 202 203// Render audio in the application callback and then write the data to the stream. 204void *AudioStreamInternal::callbackLoop() { 205 aaudio_result_t result = AAUDIO_OK; 206 aaudio_data_callback_result_t callbackResult = AAUDIO_CALLBACK_RESULT_CONTINUE; 207 AAudioStream_dataCallback appCallback = getDataCallbackProc(); 208 if (appCallback == nullptr) return NULL; 209 210 // result might be a frame count 211 while (mCallbackEnabled.load() && isPlaying() && (result >= 0)) { 212 // Call application using the AAudio callback interface. 213 callbackResult = (*appCallback)( 214 (AAudioStream *) this, 215 getDataCallbackUserData(), 216 mCallbackBuffer, 217 mCallbackFrames); 218 219 if (callbackResult == AAUDIO_CALLBACK_RESULT_CONTINUE) { 220 // Write audio data to stream. 221 int64_t timeoutNanos = calculateReasonableTimeout(mCallbackFrames); 222 223 // This is a BLOCKING WRITE! 224 result = write(mCallbackBuffer, mCallbackFrames, timeoutNanos); 225 if ((result != mCallbackFrames)) { 226 ALOGE("AudioStreamInternal(): callbackLoop: write() returned %d", result); 227 if (result >= 0) { 228 // Only wrote some of the frames requested. Must have timed out. 229 result = AAUDIO_ERROR_TIMEOUT; 230 } 231 if (getErrorCallbackProc() != nullptr) { 232 (*getErrorCallbackProc())( 233 (AAudioStream *) this, 234 getErrorCallbackUserData(), 235 result); 236 } 237 break; 238 } 239 } else if (callbackResult == AAUDIO_CALLBACK_RESULT_STOP) { 240 ALOGD("AudioStreamInternal(): callback returned AAUDIO_CALLBACK_RESULT_STOP"); 241 break; 242 } 243 } 244 245 ALOGD("AudioStreamInternal(): callbackLoop() exiting, result = %d, isPlaying() = %d", 246 result, (int) isPlaying()); 247 return NULL; // TODO review 248} 249 250static void *aaudio_callback_thread_proc(void *context) 251{ 252 AudioStreamInternal *stream = (AudioStreamInternal *)context; 253 //LOGD("AudioStreamInternal(): oboe_callback_thread, stream = %p", stream); 254 if (stream != NULL) { 255 return stream->callbackLoop(); 256 } else { 257 return NULL; 258 } 259} 260 261aaudio_result_t AudioStreamInternal::requestStart() 262{ 263 int64_t startTime; 264 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): start()"); 265 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 266 return AAUDIO_ERROR_INVALID_STATE; 267 } 268 269 startTime = AudioClock::getNanoseconds(); 270 mClockModel.start(startTime); 271 processTimestamp(0, startTime); 272 setState(AAUDIO_STREAM_STATE_STARTING); 273 aaudio_result_t result = mServiceInterface.startStream(mServiceStreamHandle);; 274 275 if (result == AAUDIO_OK && getDataCallbackProc() != nullptr) { 276 // Launch the callback loop thread. 277 int64_t periodNanos = mCallbackFrames 278 * AAUDIO_NANOS_PER_SECOND 279 / getSampleRate(); 280 mCallbackEnabled.store(true); 281 result = createThread(periodNanos, aaudio_callback_thread_proc, this); 282 } 283 return result; 284} 285 286int64_t AudioStreamInternal::calculateReasonableTimeout(int32_t framesPerOperation) { 287 288 // Wait for at least a second or some number of callbacks to join the thread. 289 int64_t timeoutNanoseconds = (MIN_TIMEOUT_OPERATIONS 290 * framesPerOperation 291 * AAUDIO_NANOS_PER_SECOND) 292 / getSampleRate(); 293 if (timeoutNanoseconds < MIN_TIMEOUT_NANOS) { // arbitrary number of seconds 294 timeoutNanoseconds = MIN_TIMEOUT_NANOS; 295 } 296 return timeoutNanoseconds; 297} 298 299aaudio_result_t AudioStreamInternal::stopCallback() 300{ 301 if (isDataCallbackActive()) { 302 mCallbackEnabled.store(false); 303 return joinThread(NULL, calculateReasonableTimeout(mCallbackFrames)); 304 } else { 305 return AAUDIO_OK; 306 } 307} 308 309aaudio_result_t AudioStreamInternal::requestPauseInternal() 310{ 311 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 312 ALOGE("AudioStreamInternal(): requestPauseInternal() mServiceStreamHandle invalid = 0x%08X", 313 mServiceStreamHandle); 314 return AAUDIO_ERROR_INVALID_STATE; 315 } 316 317 mClockModel.stop(AudioClock::getNanoseconds()); 318 setState(AAUDIO_STREAM_STATE_PAUSING); 319 return mServiceInterface.pauseStream(mServiceStreamHandle); 320} 321 322aaudio_result_t AudioStreamInternal::requestPause() 323{ 324 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestPause()", getLocationName()); 325 aaudio_result_t result = stopCallback(); 326 if (result != AAUDIO_OK) { 327 return result; 328 } 329 result = requestPauseInternal(); 330 ALOGD("AudioStreamInternal(): requestPause() returns %d", result); 331 return result; 332} 333 334aaudio_result_t AudioStreamInternal::requestFlush() { 335 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): requestFlush()"); 336 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 337 ALOGE("AudioStreamInternal(): requestFlush() mServiceStreamHandle invalid = 0x%08X", 338 mServiceStreamHandle); 339 return AAUDIO_ERROR_INVALID_STATE; 340 } 341 342 setState(AAUDIO_STREAM_STATE_FLUSHING); 343 return mServiceInterface.flushStream(mServiceStreamHandle); 344} 345 346void AudioStreamInternal::onFlushFromServer() { 347 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): onFlushFromServer()"); 348 int64_t readCounter = mAudioEndpoint.getDownDataReadCounter(); 349 int64_t writeCounter = mAudioEndpoint.getDownDataWriteCounter(); 350 351 // Bump offset so caller does not see the retrograde motion in getFramesRead(). 352 int64_t framesFlushed = writeCounter - readCounter; 353 mFramesOffsetFromService += framesFlushed; 354 355 // Flush written frames by forcing writeCounter to readCounter. 356 // This is because we cannot move the read counter in the hardware. 357 mAudioEndpoint.setDownDataWriteCounter(readCounter); 358} 359 360aaudio_result_t AudioStreamInternal::requestStopInternal() 361{ 362 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 363 ALOGE("AudioStreamInternal(): requestStopInternal() mServiceStreamHandle invalid = 0x%08X", 364 mServiceStreamHandle); 365 return AAUDIO_ERROR_INVALID_STATE; 366 } 367 368 mClockModel.stop(AudioClock::getNanoseconds()); 369 setState(AAUDIO_STREAM_STATE_STOPPING); 370 return mServiceInterface.stopStream(mServiceStreamHandle); 371} 372 373aaudio_result_t AudioStreamInternal::requestStop() 374{ 375 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal(): %s requestStop()", getLocationName()); 376 aaudio_result_t result = stopCallback(); 377 if (result != AAUDIO_OK) { 378 return result; 379 } 380 result = requestStopInternal(); 381 ALOGD("AudioStreamInternal(): requestStop() returns %d", result); 382 return result; 383} 384 385aaudio_result_t AudioStreamInternal::registerThread() { 386 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 387 return AAUDIO_ERROR_INVALID_STATE; 388 } 389 return mServiceInterface.registerAudioThread(mServiceStreamHandle, 390 getpid(), 391 gettid(), 392 getPeriodNanoseconds()); 393} 394 395aaudio_result_t AudioStreamInternal::unregisterThread() { 396 if (mServiceStreamHandle == AAUDIO_HANDLE_INVALID) { 397 return AAUDIO_ERROR_INVALID_STATE; 398 } 399 return mServiceInterface.unregisterAudioThread(mServiceStreamHandle, getpid(), gettid()); 400} 401 402aaudio_result_t AudioStreamInternal::getTimestamp(clockid_t clockId, 403 int64_t *framePosition, 404 int64_t *timeNanoseconds) { 405 // TODO implement using real HAL 406 int64_t time = AudioClock::getNanoseconds(); 407 *framePosition = mClockModel.convertTimeToPosition(time); 408 *timeNanoseconds = time + (10 * AAUDIO_NANOS_PER_MILLISECOND); // Fake hardware delay 409 return AAUDIO_OK; 410} 411 412aaudio_result_t AudioStreamInternal::updateStateWhileWaiting() { 413 if (isDataCallbackActive()) { 414 return AAUDIO_OK; // state is getting updated by the callback thread read/write call 415 } 416 return processCommands(); 417} 418 419#if LOG_TIMESTAMPS 420static void AudioStreamInternal_LogTimestamp(AAudioServiceMessage &command) { 421 static int64_t oldPosition = 0; 422 static int64_t oldTime = 0; 423 int64_t framePosition = command.timestamp.position; 424 int64_t nanoTime = command.timestamp.timestamp; 425 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() timestamp says framePosition = %08lld at nanoTime %llu", 426 (long long) framePosition, 427 (long long) nanoTime); 428 int64_t nanosDelta = nanoTime - oldTime; 429 if (nanosDelta > 0 && oldTime > 0) { 430 int64_t framesDelta = framePosition - oldPosition; 431 int64_t rate = (framesDelta * AAUDIO_NANOS_PER_SECOND) / nanosDelta; 432 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - framesDelta = %08lld", (long long) framesDelta); 433 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - nanosDelta = %08lld", (long long) nanosDelta); 434 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal() - measured rate = %llu", (unsigned long long) rate); 435 } 436 oldPosition = framePosition; 437 oldTime = nanoTime; 438} 439#endif 440 441aaudio_result_t AudioStreamInternal::onTimestampFromServer(AAudioServiceMessage *message) { 442 int64_t framePosition = 0; 443#if LOG_TIMESTAMPS 444 AudioStreamInternal_LogTimestamp(command); 445#endif 446 framePosition = message->timestamp.position; 447 processTimestamp(framePosition, message->timestamp.timestamp); 448 return AAUDIO_OK; 449} 450 451aaudio_result_t AudioStreamInternal::onEventFromServer(AAudioServiceMessage *message) { 452 aaudio_result_t result = AAUDIO_OK; 453 ALOGD_IF(MYLOG_CONDITION, "processCommands() got event %d", message->event.event); 454 switch (message->event.event) { 455 case AAUDIO_SERVICE_EVENT_STARTED: 456 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STARTED"); 457 setState(AAUDIO_STREAM_STATE_STARTED); 458 break; 459 case AAUDIO_SERVICE_EVENT_PAUSED: 460 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_PAUSED"); 461 setState(AAUDIO_STREAM_STATE_PAUSED); 462 break; 463 case AAUDIO_SERVICE_EVENT_STOPPED: 464 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_STOPPED"); 465 setState(AAUDIO_STREAM_STATE_STOPPED); 466 break; 467 case AAUDIO_SERVICE_EVENT_FLUSHED: 468 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_FLUSHED"); 469 setState(AAUDIO_STREAM_STATE_FLUSHED); 470 onFlushFromServer(); 471 break; 472 case AAUDIO_SERVICE_EVENT_CLOSED: 473 ALOGD_IF(MYLOG_CONDITION, "processCommands() got AAUDIO_SERVICE_EVENT_CLOSED"); 474 setState(AAUDIO_STREAM_STATE_CLOSED); 475 break; 476 case AAUDIO_SERVICE_EVENT_DISCONNECTED: 477 result = AAUDIO_ERROR_DISCONNECTED; 478 setState(AAUDIO_STREAM_STATE_DISCONNECTED); 479 ALOGW("WARNING - processCommands() AAUDIO_SERVICE_EVENT_DISCONNECTED"); 480 break; 481 case AAUDIO_SERVICE_EVENT_VOLUME: 482 mVolumeRamp.setTarget((float) message->event.dataDouble); 483 ALOGD_IF(MYLOG_CONDITION, "processCommands() AAUDIO_SERVICE_EVENT_VOLUME %f", 484 message->event.dataDouble); 485 break; 486 default: 487 ALOGW("WARNING - processCommands() Unrecognized event = %d", 488 (int) message->event.event); 489 break; 490 } 491 return result; 492} 493 494// Process all the commands coming from the server. 495aaudio_result_t AudioStreamInternal::processCommands() { 496 aaudio_result_t result = AAUDIO_OK; 497 498 while (result == AAUDIO_OK) { 499 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::processCommands() - looping, %d", result); 500 AAudioServiceMessage message; 501 if (mAudioEndpoint.readUpCommand(&message) != 1) { 502 break; // no command this time, no problem 503 } 504 switch (message.what) { 505 case AAudioServiceMessage::code::TIMESTAMP: 506 result = onTimestampFromServer(&message); 507 break; 508 509 case AAudioServiceMessage::code::EVENT: 510 result = onEventFromServer(&message); 511 break; 512 513 default: 514 ALOGE("WARNING - AudioStreamInternal::processCommands() Unrecognized what = %d", 515 (int) message.what); 516 result = AAUDIO_ERROR_UNEXPECTED_VALUE; 517 break; 518 } 519 } 520 return result; 521} 522 523// Write the data, block if needed and timeoutMillis > 0 524aaudio_result_t AudioStreamInternal::write(const void *buffer, int32_t numFrames, 525 int64_t timeoutNanoseconds) 526{ 527 aaudio_result_t result = AAUDIO_OK; 528 int32_t loopCount = 0; 529 uint8_t* source = (uint8_t*)buffer; 530 int64_t currentTimeNanos = AudioClock::getNanoseconds(); 531 int64_t deadlineNanos = currentTimeNanos + timeoutNanoseconds; 532 int32_t framesLeft = numFrames; 533 534 // Write until all the data has been written or until a timeout occurs. 535 while (framesLeft > 0) { 536 // The call to writeNow() will not block. It will just write as much as it can. 537 int64_t wakeTimeNanos = 0; 538 aaudio_result_t framesWritten = writeNow(source, framesLeft, 539 currentTimeNanos, &wakeTimeNanos); 540 if (framesWritten < 0) { 541 ALOGE("AudioStreamInternal::write() loop: writeNow returned %d", framesWritten); 542 result = framesWritten; 543 break; 544 } 545 framesLeft -= (int32_t) framesWritten; 546 source += framesWritten * getBytesPerFrame(); 547 548 // Should we block? 549 if (timeoutNanoseconds == 0) { 550 break; // don't block 551 } else if (framesLeft > 0) { 552 // clip the wake time to something reasonable 553 if (wakeTimeNanos < currentTimeNanos) { 554 wakeTimeNanos = currentTimeNanos; 555 } 556 if (wakeTimeNanos > deadlineNanos) { 557 // If we time out, just return the framesWritten so far. 558 ALOGE("AudioStreamInternal::write(): timed out after %lld nanos", 559 (long long) timeoutNanoseconds); 560 break; 561 } 562 563 int64_t sleepForNanos = wakeTimeNanos - currentTimeNanos; 564 AudioClock::sleepForNanos(sleepForNanos); 565 currentTimeNanos = AudioClock::getNanoseconds(); 566 } 567 } 568 569 // return error or framesWritten 570 (void) loopCount; 571 return (result < 0) ? result : numFrames - framesLeft; 572} 573 574// Write as much data as we can without blocking. 575aaudio_result_t AudioStreamInternal::writeNow(const void *buffer, int32_t numFrames, 576 int64_t currentNanoTime, int64_t *wakeTimePtr) { 577 578 { 579 aaudio_result_t result = processCommands(); 580 if (result != AAUDIO_OK) { 581 return result; 582 } 583 } 584 585 if (mAudioEndpoint.isOutputFreeRunning()) { 586 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - update read counter"); 587 // Update data queue based on the timing model. 588 int64_t estimatedReadCounter = mClockModel.convertTimeToPosition(currentNanoTime); 589 mAudioEndpoint.setDownDataReadCounter(estimatedReadCounter); 590 } 591 // TODO else query from endpoint cuz set by actual reader, maybe 592 593 // If the read index passed the write index then consider it an underrun. 594 if (mAudioEndpoint.getFullFramesAvailable() < 0) { 595 mXRunCount++; 596 } 597 598 // Write some data to the buffer. 599 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - writeNowWithConversion(%d)", numFrames); 600 int32_t framesWritten = writeNowWithConversion(buffer, numFrames); 601 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - tried to write %d frames, wrote %d", 602 // numFrames, framesWritten); 603 604 // Calculate an ideal time to wake up. 605 if (wakeTimePtr != nullptr && framesWritten >= 0) { 606 // By default wake up a few milliseconds from now. // TODO review 607 int64_t wakeTime = currentNanoTime + (1 * AAUDIO_NANOS_PER_MILLISECOND); 608 aaudio_stream_state_t state = getState(); 609 //ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow() - wakeTime based on %s", 610 // AAudio_convertStreamStateToText(state)); 611 switch (state) { 612 case AAUDIO_STREAM_STATE_OPEN: 613 case AAUDIO_STREAM_STATE_STARTING: 614 if (framesWritten != 0) { 615 // Don't wait to write more data. Just prime the buffer. 616 wakeTime = currentNanoTime; 617 } 618 break; 619 case AAUDIO_STREAM_STATE_STARTED: // When do we expect the next read burst to occur? 620 { 621 uint32_t burstSize = mFramesPerBurst; 622 if (burstSize < 32) { 623 burstSize = 32; // TODO review 624 } 625 626 uint64_t nextReadPosition = mAudioEndpoint.getDownDataReadCounter() + burstSize; 627 wakeTime = mClockModel.convertPositionToTime(nextReadPosition); 628 } 629 break; 630 default: 631 break; 632 } 633 *wakeTimePtr = wakeTime; 634 635 } 636// ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNow finished: now = %llu, read# = %llu, wrote# = %llu", 637// (unsigned long long)currentNanoTime, 638// (unsigned long long)mAudioEndpoint.getDownDataReadCounter(), 639// (unsigned long long)mAudioEndpoint.getDownDataWriteCounter()); 640 return framesWritten; 641} 642 643 644aaudio_result_t AudioStreamInternal::writeNowWithConversion(const void *buffer, 645 int32_t numFrames) { 646 // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion(%p, %d)", 647 // buffer, numFrames); 648 WrappingBuffer wrappingBuffer; 649 uint8_t *source = (uint8_t *) buffer; 650 int32_t framesLeft = numFrames; 651 652 mAudioEndpoint.getEmptyRoomAvailable(&wrappingBuffer); 653 654 // Read data in one or two parts. 655 int partIndex = 0; 656 while (framesLeft > 0 && partIndex < WrappingBuffer::SIZE) { 657 int32_t framesToWrite = framesLeft; 658 int32_t framesAvailable = wrappingBuffer.numFrames[partIndex]; 659 if (framesAvailable > 0) { 660 if (framesToWrite > framesAvailable) { 661 framesToWrite = framesAvailable; 662 } 663 int32_t numBytes = getBytesPerFrame() * framesToWrite; 664 int32_t numSamples = framesToWrite * getSamplesPerFrame(); 665 // Data conversion. 666 float levelFrom; 667 float levelTo; 668 bool ramping = mVolumeRamp.nextSegment(framesToWrite * getSamplesPerFrame(), 669 &levelFrom, &levelTo); 670 // The formats are validated when the stream is opened so we do not have to 671 // check for illegal combinations here. 672 if (getFormat() == AAUDIO_FORMAT_PCM_FLOAT) { 673 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) { 674 AAudio_linearRamp( 675 (const float *) source, 676 (float *) wrappingBuffer.data[partIndex], 677 framesToWrite, 678 getSamplesPerFrame(), 679 levelFrom, 680 levelTo); 681 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) { 682 if (ramping) { 683 AAudioConvert_floatToPcm16( 684 (const float *) source, 685 (int16_t *) wrappingBuffer.data[partIndex], 686 framesToWrite, 687 getSamplesPerFrame(), 688 levelFrom, 689 levelTo); 690 } else { 691 AAudioConvert_floatToPcm16( 692 (const float *) source, 693 (int16_t *) wrappingBuffer.data[partIndex], 694 numSamples, 695 levelTo); 696 } 697 } 698 } else if (getFormat() == AAUDIO_FORMAT_PCM_I16) { 699 if (mDeviceFormat == AAUDIO_FORMAT_PCM_FLOAT) { 700 if (ramping) { 701 AAudioConvert_pcm16ToFloat( 702 (const int16_t *) source, 703 (float *) wrappingBuffer.data[partIndex], 704 framesToWrite, 705 getSamplesPerFrame(), 706 levelFrom, 707 levelTo); 708 } else { 709 AAudioConvert_pcm16ToFloat( 710 (const int16_t *) source, 711 (float *) wrappingBuffer.data[partIndex], 712 numSamples, 713 levelTo); 714 } 715 } else if (mDeviceFormat == AAUDIO_FORMAT_PCM_I16) { 716 AAudio_linearRamp( 717 (const int16_t *) source, 718 (int16_t *) wrappingBuffer.data[partIndex], 719 framesToWrite, 720 getSamplesPerFrame(), 721 levelFrom, 722 levelTo); 723 } 724 } 725 source += numBytes; 726 framesLeft -= framesToWrite; 727 } else { 728 break; 729 } 730 partIndex++; 731 } 732 int32_t framesWritten = numFrames - framesLeft; 733 mAudioEndpoint.advanceWriteIndex(framesWritten); 734 735 if (framesWritten > 0) { 736 incrementFramesWritten(framesWritten); 737 } 738 // ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::writeNowWithConversion() returns %d", framesWritten); 739 return framesWritten; 740} 741 742void AudioStreamInternal::processTimestamp(uint64_t position, int64_t time) { 743 mClockModel.processTimestamp( position, time); 744} 745 746aaudio_result_t AudioStreamInternal::setBufferSize(int32_t requestedFrames) { 747 int32_t actualFrames = 0; 748 // Round to the next highest burst size. 749 if (getFramesPerBurst() > 0) { 750 int32_t numBursts = (requestedFrames + getFramesPerBurst() - 1) / getFramesPerBurst(); 751 requestedFrames = numBursts * getFramesPerBurst(); 752 } 753 754 aaudio_result_t result = mAudioEndpoint.setBufferSizeInFrames(requestedFrames, &actualFrames); 755 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::setBufferSize() %s req = %d => %d", 756 getLocationName(), requestedFrames, actualFrames); 757 if (result < 0) { 758 return result; 759 } else { 760 return (aaudio_result_t) actualFrames; 761 } 762} 763 764int32_t AudioStreamInternal::getBufferSize() const 765{ 766 return mAudioEndpoint.getBufferSizeInFrames(); 767} 768 769int32_t AudioStreamInternal::getBufferCapacity() const 770{ 771 return mAudioEndpoint.getBufferCapacityInFrames(); 772} 773 774int32_t AudioStreamInternal::getFramesPerBurst() const 775{ 776 return mEndpointDescriptor.downDataQueueDescriptor.framesPerBurst; 777} 778 779int64_t AudioStreamInternal::getFramesRead() 780{ 781 int64_t framesRead = 782 mClockModel.convertTimeToPosition(AudioClock::getNanoseconds()) 783 + mFramesOffsetFromService; 784 // Prevent retrograde motion. 785 if (framesRead < mLastFramesRead) { 786 framesRead = mLastFramesRead; 787 } else { 788 mLastFramesRead = framesRead; 789 } 790 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesRead() returns %lld", (long long)framesRead); 791 return framesRead; 792} 793 794int64_t AudioStreamInternal::getFramesWritten() 795{ 796 int64_t getFramesWritten = mAudioEndpoint.getDownDataWriteCounter() 797 + mFramesOffsetFromService; 798 ALOGD_IF(MYLOG_CONDITION, "AudioStreamInternal::getFramesWritten() returns %lld", (long long)getFramesWritten); 799 return getFramesWritten; 800} 801