AudioTrack.cpp revision 4ef88d7106c01f81109ee163cb6789073d80c6ae
1/* 2** 3** Copyright 2007, The Android Open Source Project 4** 5** Licensed under the Apache License, Version 2.0 (the "License"); 6** you may not use this file except in compliance with the License. 7** You may obtain a copy of the License at 8** 9** http://www.apache.org/licenses/LICENSE-2.0 10** 11** Unless required by applicable law or agreed to in writing, software 12** distributed under the License is distributed on an "AS IS" BASIS, 13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14** See the License for the specific language governing permissions and 15** limitations under the License. 16*/ 17 18//#define LOG_NDEBUG 0 19#define LOG_TAG "AudioTrack" 20 21#include <inttypes.h> 22#include <math.h> 23#include <sys/resource.h> 24 25#include <audio_utils/primitives.h> 26#include <binder/IPCThreadState.h> 27#include <media/AudioTrack.h> 28#include <utils/Log.h> 29#include <private/media/AudioTrackShared.h> 30#include <media/IAudioFlinger.h> 31#include <media/AudioPolicyHelper.h> 32#include <media/AudioResamplerPublic.h> 33 34#define WAIT_PERIOD_MS 10 35#define WAIT_STREAM_END_TIMEOUT_SEC 120 36static const int kMaxLoopCountNotifications = 32; 37 38namespace android { 39// --------------------------------------------------------------------------- 40 41// TODO: Move to a separate .h 42 43template <typename T> 44static inline const T &min(const T &x, const T &y) { 45 return x < y ? x : y; 46} 47 48template <typename T> 49static inline const T &max(const T &x, const T &y) { 50 return x > y ? x : y; 51} 52 53static const int32_t NANOS_PER_SECOND = 1000000000; 54 55static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed) 56{ 57 return ((double)frames * 1000000000) / ((double)sampleRate * speed); 58} 59 60static int64_t convertTimespecToUs(const struct timespec &tv) 61{ 62 return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000; 63} 64 65static inline nsecs_t convertTimespecToNs(const struct timespec &tv) 66{ 67 return tv.tv_sec * (long long)NANOS_PER_SECOND + tv.tv_nsec; 68} 69 70// current monotonic time in microseconds. 71static int64_t getNowUs() 72{ 73 struct timespec tv; 74 (void) clock_gettime(CLOCK_MONOTONIC, &tv); 75 return convertTimespecToUs(tv); 76} 77 78// FIXME: we don't use the pitch setting in the time stretcher (not working); 79// instead we emulate it using our sample rate converter. 80static const bool kFixPitch = true; // enable pitch fix 81static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch) 82{ 83 return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate; 84} 85 86static inline float adjustSpeed(float speed, float pitch) 87{ 88 return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed; 89} 90 91static inline float adjustPitch(float pitch) 92{ 93 return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch; 94} 95 96// Must match similar computation in createTrack_l in Threads.cpp. 97// TODO: Move to a common library 98static size_t calculateMinFrameCount( 99 uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate, 100 uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/) 101{ 102 // Ensure that buffer depth covers at least audio hardware latency 103 uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate); 104 if (minBufCount < 2) { 105 minBufCount = 2; 106 } 107#if 0 108 // The notificationsPerBufferReq parameter is not yet used for non-fast tracks, 109 // but keeping the code here to make it easier to add later. 110 if (minBufCount < notificationsPerBufferReq) { 111 minBufCount = notificationsPerBufferReq; 112 } 113#endif 114 ALOGV("calculateMinFrameCount afLatency %u afFrameCount %u afSampleRate %u " 115 "sampleRate %u speed %f minBufCount: %u" /*" notificationsPerBufferReq %u"*/, 116 afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount 117 /*, notificationsPerBufferReq*/); 118 return minBufCount * sourceFramesNeededWithTimestretch( 119 sampleRate, afFrameCount, afSampleRate, speed); 120} 121 122// static 123status_t AudioTrack::getMinFrameCount( 124 size_t* frameCount, 125 audio_stream_type_t streamType, 126 uint32_t sampleRate) 127{ 128 if (frameCount == NULL) { 129 return BAD_VALUE; 130 } 131 132 // FIXME handle in server, like createTrack_l(), possible missing info: 133 // audio_io_handle_t output 134 // audio_format_t format 135 // audio_channel_mask_t channelMask 136 // audio_output_flags_t flags (FAST) 137 uint32_t afSampleRate; 138 status_t status; 139 status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType); 140 if (status != NO_ERROR) { 141 ALOGE("Unable to query output sample rate for stream type %d; status %d", 142 streamType, status); 143 return status; 144 } 145 size_t afFrameCount; 146 status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType); 147 if (status != NO_ERROR) { 148 ALOGE("Unable to query output frame count for stream type %d; status %d", 149 streamType, status); 150 return status; 151 } 152 uint32_t afLatency; 153 status = AudioSystem::getOutputLatency(&afLatency, streamType); 154 if (status != NO_ERROR) { 155 ALOGE("Unable to query output latency for stream type %d; status %d", 156 streamType, status); 157 return status; 158 } 159 160 // When called from createTrack, speed is 1.0f (normal speed). 161 // This is rechecked again on setting playback rate (TODO: on setting sample rate, too). 162 *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f 163 /*, 0 notificationsPerBufferReq*/); 164 165 // The formula above should always produce a non-zero value under normal circumstances: 166 // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX. 167 // Return error in the unlikely event that it does not, as that's part of the API contract. 168 if (*frameCount == 0) { 169 ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u", 170 streamType, sampleRate); 171 return BAD_VALUE; 172 } 173 ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u", 174 *frameCount, afFrameCount, afSampleRate, afLatency); 175 return NO_ERROR; 176} 177 178// --------------------------------------------------------------------------- 179 180AudioTrack::AudioTrack() 181 : mStatus(NO_INIT), 182 mState(STATE_STOPPED), 183 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 184 mPreviousSchedulingGroup(SP_DEFAULT), 185 mPausedPosition(0), 186 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), 187 mPortId(AUDIO_PORT_HANDLE_NONE) 188{ 189 mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN; 190 mAttributes.usage = AUDIO_USAGE_UNKNOWN; 191 mAttributes.flags = 0x0; 192 strcpy(mAttributes.tags, ""); 193} 194 195AudioTrack::AudioTrack( 196 audio_stream_type_t streamType, 197 uint32_t sampleRate, 198 audio_format_t format, 199 audio_channel_mask_t channelMask, 200 size_t frameCount, 201 audio_output_flags_t flags, 202 callback_t cbf, 203 void* user, 204 int32_t notificationFrames, 205 audio_session_t sessionId, 206 transfer_type transferType, 207 const audio_offload_info_t *offloadInfo, 208 uid_t uid, 209 pid_t pid, 210 const audio_attributes_t* pAttributes, 211 bool doNotReconnect, 212 float maxRequiredSpeed) 213 : mStatus(NO_INIT), 214 mState(STATE_STOPPED), 215 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 216 mPreviousSchedulingGroup(SP_DEFAULT), 217 mPausedPosition(0), 218 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), 219 mPortId(AUDIO_PORT_HANDLE_NONE) 220{ 221 mStatus = set(streamType, sampleRate, format, channelMask, 222 frameCount, flags, cbf, user, notificationFrames, 223 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, 224 offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed); 225} 226 227AudioTrack::AudioTrack( 228 audio_stream_type_t streamType, 229 uint32_t sampleRate, 230 audio_format_t format, 231 audio_channel_mask_t channelMask, 232 const sp<IMemory>& sharedBuffer, 233 audio_output_flags_t flags, 234 callback_t cbf, 235 void* user, 236 int32_t notificationFrames, 237 audio_session_t sessionId, 238 transfer_type transferType, 239 const audio_offload_info_t *offloadInfo, 240 uid_t uid, 241 pid_t pid, 242 const audio_attributes_t* pAttributes, 243 bool doNotReconnect, 244 float maxRequiredSpeed) 245 : mStatus(NO_INIT), 246 mState(STATE_STOPPED), 247 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 248 mPreviousSchedulingGroup(SP_DEFAULT), 249 mPausedPosition(0), 250 mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE), 251 mPortId(AUDIO_PORT_HANDLE_NONE) 252{ 253 mStatus = set(streamType, sampleRate, format, channelMask, 254 0 /*frameCount*/, flags, cbf, user, notificationFrames, 255 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, 256 uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed); 257} 258 259AudioTrack::~AudioTrack() 260{ 261 if (mStatus == NO_ERROR) { 262 // Make sure that callback function exits in the case where 263 // it is looping on buffer full condition in obtainBuffer(). 264 // Otherwise the callback thread will never exit. 265 stop(); 266 if (mAudioTrackThread != 0) { 267 mProxy->interrupt(); 268 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h 269 mAudioTrackThread->requestExitAndWait(); 270 mAudioTrackThread.clear(); 271 } 272 // No lock here: worst case we remove a NULL callback which will be a nop 273 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) { 274 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput); 275 } 276 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this); 277 mAudioTrack.clear(); 278 mCblkMemory.clear(); 279 mSharedBuffer.clear(); 280 IPCThreadState::self()->flushCommands(); 281 ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d", 282 mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid); 283 AudioSystem::releaseAudioSessionId(mSessionId, mClientPid); 284 } 285} 286 287status_t AudioTrack::set( 288 audio_stream_type_t streamType, 289 uint32_t sampleRate, 290 audio_format_t format, 291 audio_channel_mask_t channelMask, 292 size_t frameCount, 293 audio_output_flags_t flags, 294 callback_t cbf, 295 void* user, 296 int32_t notificationFrames, 297 const sp<IMemory>& sharedBuffer, 298 bool threadCanCallJava, 299 audio_session_t sessionId, 300 transfer_type transferType, 301 const audio_offload_info_t *offloadInfo, 302 uid_t uid, 303 pid_t pid, 304 const audio_attributes_t* pAttributes, 305 bool doNotReconnect, 306 float maxRequiredSpeed) 307{ 308 ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, " 309 "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d", 310 streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames, 311 sessionId, transferType, uid, pid); 312 313 mThreadCanCallJava = threadCanCallJava; 314 315 switch (transferType) { 316 case TRANSFER_DEFAULT: 317 if (sharedBuffer != 0) { 318 transferType = TRANSFER_SHARED; 319 } else if (cbf == NULL || threadCanCallJava) { 320 transferType = TRANSFER_SYNC; 321 } else { 322 transferType = TRANSFER_CALLBACK; 323 } 324 break; 325 case TRANSFER_CALLBACK: 326 if (cbf == NULL || sharedBuffer != 0) { 327 ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0"); 328 return BAD_VALUE; 329 } 330 break; 331 case TRANSFER_OBTAIN: 332 case TRANSFER_SYNC: 333 if (sharedBuffer != 0) { 334 ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0"); 335 return BAD_VALUE; 336 } 337 break; 338 case TRANSFER_SHARED: 339 if (sharedBuffer == 0) { 340 ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0"); 341 return BAD_VALUE; 342 } 343 break; 344 default: 345 ALOGE("Invalid transfer type %d", transferType); 346 return BAD_VALUE; 347 } 348 mSharedBuffer = sharedBuffer; 349 mTransfer = transferType; 350 mDoNotReconnect = doNotReconnect; 351 352 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(), 353 sharedBuffer->size()); 354 355 ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags); 356 357 // invariant that mAudioTrack != 0 is true only after set() returns successfully 358 if (mAudioTrack != 0) { 359 ALOGE("Track already in use"); 360 return INVALID_OPERATION; 361 } 362 363 // handle default values first. 364 if (streamType == AUDIO_STREAM_DEFAULT) { 365 streamType = AUDIO_STREAM_MUSIC; 366 } 367 if (pAttributes == NULL) { 368 if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) { 369 ALOGE("Invalid stream type %d", streamType); 370 return BAD_VALUE; 371 } 372 mStreamType = streamType; 373 374 } else { 375 // stream type shouldn't be looked at, this track has audio attributes 376 memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t)); 377 ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]", 378 mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags); 379 mStreamType = AUDIO_STREAM_DEFAULT; 380 if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) { 381 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC); 382 } 383 if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) { 384 flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST); 385 } 386 // check deep buffer after flags have been modified above 387 if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) { 388 flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 389 } 390 } 391 392 // these below should probably come from the audioFlinger too... 393 if (format == AUDIO_FORMAT_DEFAULT) { 394 format = AUDIO_FORMAT_PCM_16_BIT; 395 } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through? 396 mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO; 397 } 398 399 // validate parameters 400 if (!audio_is_valid_format(format)) { 401 ALOGE("Invalid format %#x", format); 402 return BAD_VALUE; 403 } 404 mFormat = format; 405 406 if (!audio_is_output_channel(channelMask)) { 407 ALOGE("Invalid channel mask %#x", channelMask); 408 return BAD_VALUE; 409 } 410 mChannelMask = channelMask; 411 uint32_t channelCount = audio_channel_count_from_out_mask(channelMask); 412 mChannelCount = channelCount; 413 414 // force direct flag if format is not linear PCM 415 // or offload was requested 416 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) 417 || !audio_is_linear_pcm(format)) { 418 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) 419 ? "Offload request, forcing to Direct Output" 420 : "Not linear PCM, forcing to Direct Output"); 421 flags = (audio_output_flags_t) 422 // FIXME why can't we allow direct AND fast? 423 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST); 424 } 425 426 // force direct flag if HW A/V sync requested 427 if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) { 428 flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT); 429 } 430 431 if (flags & AUDIO_OUTPUT_FLAG_DIRECT) { 432 if (audio_has_proportional_frames(format)) { 433 mFrameSize = channelCount * audio_bytes_per_sample(format); 434 } else { 435 mFrameSize = sizeof(uint8_t); 436 } 437 } else { 438 ALOG_ASSERT(audio_has_proportional_frames(format)); 439 mFrameSize = channelCount * audio_bytes_per_sample(format); 440 // createTrack will return an error if PCM format is not supported by server, 441 // so no need to check for specific PCM formats here 442 } 443 444 // sampling rate must be specified for direct outputs 445 if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) { 446 return BAD_VALUE; 447 } 448 mSampleRate = sampleRate; 449 mOriginalSampleRate = sampleRate; 450 mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT; 451 // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX 452 mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX); 453 454 // Make copy of input parameter offloadInfo so that in the future: 455 // (a) createTrack_l doesn't need it as an input parameter 456 // (b) we can support re-creation of offloaded tracks 457 if (offloadInfo != NULL) { 458 mOffloadInfoCopy = *offloadInfo; 459 mOffloadInfo = &mOffloadInfoCopy; 460 } else { 461 mOffloadInfo = NULL; 462 memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t)); 463 } 464 465 mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f; 466 mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f; 467 mSendLevel = 0.0f; 468 // mFrameCount is initialized in createTrack_l 469 mReqFrameCount = frameCount; 470 if (notificationFrames >= 0) { 471 mNotificationFramesReq = notificationFrames; 472 mNotificationsPerBufferReq = 0; 473 } else { 474 if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) { 475 ALOGE("notificationFrames=%d not permitted for non-fast track", 476 notificationFrames); 477 return BAD_VALUE; 478 } 479 if (frameCount > 0) { 480 ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu", 481 notificationFrames, frameCount); 482 return BAD_VALUE; 483 } 484 mNotificationFramesReq = 0; 485 const uint32_t minNotificationsPerBuffer = 1; 486 const uint32_t maxNotificationsPerBuffer = 8; 487 mNotificationsPerBufferReq = min(maxNotificationsPerBuffer, 488 max((uint32_t) -notificationFrames, minNotificationsPerBuffer)); 489 ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames, 490 "notificationFrames=%d clamped to the range -%u to -%u", 491 notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer); 492 } 493 mNotificationFramesAct = 0; 494 if (sessionId == AUDIO_SESSION_ALLOCATE) { 495 mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION); 496 } else { 497 mSessionId = sessionId; 498 } 499 int callingpid = IPCThreadState::self()->getCallingPid(); 500 int mypid = getpid(); 501 if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) { 502 mClientUid = IPCThreadState::self()->getCallingUid(); 503 } else { 504 mClientUid = uid; 505 } 506 if (pid == -1 || (callingpid != mypid)) { 507 mClientPid = callingpid; 508 } else { 509 mClientPid = pid; 510 } 511 mAuxEffectId = 0; 512 mOrigFlags = mFlags = flags; 513 mCbf = cbf; 514 515 if (cbf != NULL) { 516 mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava); 517 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/); 518 // thread begins in paused state, and will not reference us until start() 519 } 520 521 // create the IAudioTrack 522 status_t status = createTrack_l(); 523 524 if (status != NO_ERROR) { 525 if (mAudioTrackThread != 0) { 526 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h 527 mAudioTrackThread->requestExitAndWait(); 528 mAudioTrackThread.clear(); 529 } 530 return status; 531 } 532 533 mStatus = NO_ERROR; 534 mUserData = user; 535 mLoopCount = 0; 536 mLoopStart = 0; 537 mLoopEnd = 0; 538 mLoopCountNotified = 0; 539 mMarkerPosition = 0; 540 mMarkerReached = false; 541 mNewPosition = 0; 542 mUpdatePeriod = 0; 543 mPosition = 0; 544 mReleased = 0; 545 mStartUs = 0; 546 AudioSystem::acquireAudioSessionId(mSessionId, mClientPid); 547 mSequence = 1; 548 mObservedSequence = mSequence; 549 mInUnderrun = false; 550 mPreviousTimestampValid = false; 551 mTimestampStartupGlitchReported = false; 552 mRetrogradeMotionReported = false; 553 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID; 554 mStartTs.mPosition = 0; 555 mUnderrunCountOffset = 0; 556 mFramesWritten = 0; 557 mFramesWrittenServerOffset = 0; 558 mFramesWrittenAtRestore = -1; // -1 is a unique initializer. 559 mVolumeHandler = new VolumeHandler(); 560 return NO_ERROR; 561} 562 563// ------------------------------------------------------------------------- 564 565status_t AudioTrack::start() 566{ 567 AutoMutex lock(mLock); 568 569 if (mState == STATE_ACTIVE) { 570 return INVALID_OPERATION; 571 } 572 573 mInUnderrun = true; 574 575 State previousState = mState; 576 if (previousState == STATE_PAUSED_STOPPING) { 577 mState = STATE_STOPPING; 578 } else { 579 mState = STATE_ACTIVE; 580 } 581 (void) updateAndGetPosition_l(); 582 583 // save start timestamp 584 if (isOffloadedOrDirect_l()) { 585 if (getTimestamp_l(mStartTs) != OK) { 586 mStartTs.mPosition = 0; 587 } 588 } else { 589 if (getTimestamp_l(&mStartEts) != OK) { 590 mStartEts.clear(); 591 } 592 } 593 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) { 594 // reset current position as seen by client to 0 595 mPosition = 0; 596 mPreviousTimestampValid = false; 597 mTimestampStartupGlitchReported = false; 598 mRetrogradeMotionReported = false; 599 mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID; 600 601 if (!isOffloadedOrDirect_l() 602 && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) { 603 // Server side has consumed something, but is it finished consuming? 604 // It is possible since flush and stop are asynchronous that the server 605 // is still active at this point. 606 ALOGV("start: server read:%lld cumulative flushed:%lld client written:%lld", 607 (long long)(mFramesWrittenServerOffset 608 + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]), 609 (long long)mStartEts.mFlushed, 610 (long long)mFramesWritten); 611 mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]; 612 } 613 mFramesWritten = 0; 614 mProxy->clearTimestamp(); // need new server push for valid timestamp 615 mMarkerReached = false; 616 617 // For offloaded tracks, we don't know if the hardware counters are really zero here, 618 // since the flush is asynchronous and stop may not fully drain. 619 // We save the time when the track is started to later verify whether 620 // the counters are realistic (i.e. start from zero after this time). 621 mStartUs = getNowUs(); 622 623 // force refresh of remaining frames by processAudioBuffer() as last 624 // write before stop could be partial. 625 mRefreshRemaining = true; 626 } 627 mNewPosition = mPosition + mUpdatePeriod; 628 int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags); 629 630 status_t status = NO_ERROR; 631 if (!(flags & CBLK_INVALID)) { 632 status = mAudioTrack->start(); 633 if (status == DEAD_OBJECT) { 634 flags |= CBLK_INVALID; 635 } 636 } 637 if (flags & CBLK_INVALID) { 638 status = restoreTrack_l("start"); 639 } 640 641 // resume or pause the callback thread as needed. 642 sp<AudioTrackThread> t = mAudioTrackThread; 643 if (status == NO_ERROR) { 644 if (t != 0) { 645 if (previousState == STATE_STOPPING) { 646 mProxy->interrupt(); 647 } else { 648 t->resume(); 649 } 650 } else { 651 mPreviousPriority = getpriority(PRIO_PROCESS, 0); 652 get_sched_policy(0, &mPreviousSchedulingGroup); 653 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); 654 } 655 } else { 656 ALOGE("start() status %d", status); 657 mState = previousState; 658 if (t != 0) { 659 if (previousState != STATE_STOPPING) { 660 t->pause(); 661 } 662 } else { 663 setpriority(PRIO_PROCESS, 0, mPreviousPriority); 664 set_sched_policy(0, mPreviousSchedulingGroup); 665 } 666 } 667 668 return status; 669} 670 671void AudioTrack::stop() 672{ 673 AutoMutex lock(mLock); 674 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) { 675 return; 676 } 677 678 if (isOffloaded_l()) { 679 mState = STATE_STOPPING; 680 } else { 681 mState = STATE_STOPPED; 682 ALOGD_IF(mSharedBuffer == nullptr, 683 "stop() called with %u frames delivered", mReleased.value()); 684 mReleased = 0; 685 } 686 687 mProxy->interrupt(); 688 mAudioTrack->stop(); 689 690 // Note: legacy handling - stop does not clear playback marker 691 // and periodic update counter, but flush does for streaming tracks. 692 693 if (mSharedBuffer != 0) { 694 // clear buffer position and loop count. 695 mStaticProxy->setBufferPositionAndLoop(0 /* position */, 696 0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */); 697 } 698 699 sp<AudioTrackThread> t = mAudioTrackThread; 700 if (t != 0) { 701 if (!isOffloaded_l()) { 702 t->pause(); 703 } 704 } else { 705 setpriority(PRIO_PROCESS, 0, mPreviousPriority); 706 set_sched_policy(0, mPreviousSchedulingGroup); 707 } 708} 709 710bool AudioTrack::stopped() const 711{ 712 AutoMutex lock(mLock); 713 return mState != STATE_ACTIVE; 714} 715 716void AudioTrack::flush() 717{ 718 if (mSharedBuffer != 0) { 719 return; 720 } 721 AutoMutex lock(mLock); 722 if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) { 723 return; 724 } 725 flush_l(); 726} 727 728void AudioTrack::flush_l() 729{ 730 ALOG_ASSERT(mState != STATE_ACTIVE); 731 732 // clear playback marker and periodic update counter 733 mMarkerPosition = 0; 734 mMarkerReached = false; 735 mUpdatePeriod = 0; 736 mRefreshRemaining = true; 737 738 mState = STATE_FLUSHED; 739 mReleased = 0; 740 if (isOffloaded_l()) { 741 mProxy->interrupt(); 742 } 743 mProxy->flush(); 744 mAudioTrack->flush(); 745} 746 747void AudioTrack::pause() 748{ 749 AutoMutex lock(mLock); 750 if (mState == STATE_ACTIVE) { 751 mState = STATE_PAUSED; 752 } else if (mState == STATE_STOPPING) { 753 mState = STATE_PAUSED_STOPPING; 754 } else { 755 return; 756 } 757 mProxy->interrupt(); 758 mAudioTrack->pause(); 759 760 if (isOffloaded_l()) { 761 if (mOutput != AUDIO_IO_HANDLE_NONE) { 762 // An offload output can be re-used between two audio tracks having 763 // the same configuration. A timestamp query for a paused track 764 // while the other is running would return an incorrect time. 765 // To fix this, cache the playback position on a pause() and return 766 // this time when requested until the track is resumed. 767 768 // OffloadThread sends HAL pause in its threadLoop. Time saved 769 // here can be slightly off. 770 771 // TODO: check return code for getRenderPosition. 772 773 uint32_t halFrames; 774 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition); 775 ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition); 776 } 777 } 778} 779 780status_t AudioTrack::setVolume(float left, float right) 781{ 782 // This duplicates a test by AudioTrack JNI, but that is not the only caller 783 if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY || 784 isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) { 785 return BAD_VALUE; 786 } 787 788 AutoMutex lock(mLock); 789 mVolume[AUDIO_INTERLEAVE_LEFT] = left; 790 mVolume[AUDIO_INTERLEAVE_RIGHT] = right; 791 792 mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right))); 793 794 if (isOffloaded_l()) { 795 mAudioTrack->signal(); 796 } 797 return NO_ERROR; 798} 799 800status_t AudioTrack::setVolume(float volume) 801{ 802 return setVolume(volume, volume); 803} 804 805status_t AudioTrack::setAuxEffectSendLevel(float level) 806{ 807 // This duplicates a test by AudioTrack JNI, but that is not the only caller 808 if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) { 809 return BAD_VALUE; 810 } 811 812 AutoMutex lock(mLock); 813 mSendLevel = level; 814 mProxy->setSendLevel(level); 815 816 return NO_ERROR; 817} 818 819void AudioTrack::getAuxEffectSendLevel(float* level) const 820{ 821 if (level != NULL) { 822 *level = mSendLevel; 823 } 824} 825 826status_t AudioTrack::setSampleRate(uint32_t rate) 827{ 828 AutoMutex lock(mLock); 829 if (rate == mSampleRate) { 830 return NO_ERROR; 831 } 832 if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) { 833 return INVALID_OPERATION; 834 } 835 if (mOutput == AUDIO_IO_HANDLE_NONE) { 836 return NO_INIT; 837 } 838 // NOTE: it is theoretically possible, but highly unlikely, that a device change 839 // could mean a previously allowed sampling rate is no longer allowed. 840 uint32_t afSamplingRate; 841 if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) { 842 return NO_INIT; 843 } 844 // pitch is emulated by adjusting speed and sampleRate 845 const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch); 846 if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) { 847 return BAD_VALUE; 848 } 849 // TODO: Should we also check if the buffer size is compatible? 850 851 mSampleRate = rate; 852 mProxy->setSampleRate(effectiveSampleRate); 853 854 return NO_ERROR; 855} 856 857uint32_t AudioTrack::getSampleRate() const 858{ 859 AutoMutex lock(mLock); 860 861 // sample rate can be updated during playback by the offloaded decoder so we need to 862 // query the HAL and update if needed. 863// FIXME use Proxy return channel to update the rate from server and avoid polling here 864 if (isOffloadedOrDirect_l()) { 865 if (mOutput != AUDIO_IO_HANDLE_NONE) { 866 uint32_t sampleRate = 0; 867 status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate); 868 if (status == NO_ERROR) { 869 mSampleRate = sampleRate; 870 } 871 } 872 } 873 return mSampleRate; 874} 875 876uint32_t AudioTrack::getOriginalSampleRate() const 877{ 878 return mOriginalSampleRate; 879} 880 881status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate) 882{ 883 AutoMutex lock(mLock); 884 if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) { 885 return NO_ERROR; 886 } 887 if (isOffloadedOrDirect_l()) { 888 return INVALID_OPERATION; 889 } 890 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 891 return INVALID_OPERATION; 892 } 893 894 ALOGV("setPlaybackRate (input): mSampleRate:%u mSpeed:%f mPitch:%f", 895 mSampleRate, playbackRate.mSpeed, playbackRate.mPitch); 896 // pitch is emulated by adjusting speed and sampleRate 897 const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch); 898 const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch); 899 const float effectivePitch = adjustPitch(playbackRate.mPitch); 900 AudioPlaybackRate playbackRateTemp = playbackRate; 901 playbackRateTemp.mSpeed = effectiveSpeed; 902 playbackRateTemp.mPitch = effectivePitch; 903 904 ALOGV("setPlaybackRate (effective): mSampleRate:%u mSpeed:%f mPitch:%f", 905 effectiveRate, effectiveSpeed, effectivePitch); 906 907 if (!isAudioPlaybackRateValid(playbackRateTemp)) { 908 ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)", 909 playbackRate.mSpeed, playbackRate.mPitch); 910 return BAD_VALUE; 911 } 912 // Check if the buffer size is compatible. 913 if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) { 914 ALOGV("setPlaybackRate(%f, %f) failed (buffer size)", 915 playbackRate.mSpeed, playbackRate.mPitch); 916 return BAD_VALUE; 917 } 918 919 // Check resampler ratios are within bounds 920 if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) { 921 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value", 922 playbackRate.mSpeed, playbackRate.mPitch); 923 return BAD_VALUE; 924 } 925 926 if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) { 927 ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value", 928 playbackRate.mSpeed, playbackRate.mPitch); 929 return BAD_VALUE; 930 } 931 mPlaybackRate = playbackRate; 932 //set effective rates 933 mProxy->setPlaybackRate(playbackRateTemp); 934 mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate 935 return NO_ERROR; 936} 937 938const AudioPlaybackRate& AudioTrack::getPlaybackRate() const 939{ 940 AutoMutex lock(mLock); 941 return mPlaybackRate; 942} 943 944ssize_t AudioTrack::getBufferSizeInFrames() 945{ 946 AutoMutex lock(mLock); 947 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) { 948 return NO_INIT; 949 } 950 return (ssize_t) mProxy->getBufferSizeInFrames(); 951} 952 953status_t AudioTrack::getBufferDurationInUs(int64_t *duration) 954{ 955 if (duration == nullptr) { 956 return BAD_VALUE; 957 } 958 AutoMutex lock(mLock); 959 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) { 960 return NO_INIT; 961 } 962 ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames(); 963 if (bufferSizeInFrames < 0) { 964 return (status_t)bufferSizeInFrames; 965 } 966 *duration = (int64_t)((double)bufferSizeInFrames * 1000000 967 / ((double)mSampleRate * mPlaybackRate.mSpeed)); 968 return NO_ERROR; 969} 970 971ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames) 972{ 973 AutoMutex lock(mLock); 974 if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) { 975 return NO_INIT; 976 } 977 // Reject if timed track or compressed audio. 978 if (!audio_is_linear_pcm(mFormat)) { 979 return INVALID_OPERATION; 980 } 981 return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames); 982} 983 984status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount) 985{ 986 if (mSharedBuffer == 0 || isOffloadedOrDirect()) { 987 return INVALID_OPERATION; 988 } 989 990 if (loopCount == 0) { 991 ; 992 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount && 993 loopEnd - loopStart >= MIN_LOOP) { 994 ; 995 } else { 996 return BAD_VALUE; 997 } 998 999 AutoMutex lock(mLock); 1000 // See setPosition() regarding setting parameters such as loop points or position while active 1001 if (mState == STATE_ACTIVE) { 1002 return INVALID_OPERATION; 1003 } 1004 setLoop_l(loopStart, loopEnd, loopCount); 1005 return NO_ERROR; 1006} 1007 1008void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) 1009{ 1010 // We do not update the periodic notification point. 1011 // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; 1012 mLoopCount = loopCount; 1013 mLoopEnd = loopEnd; 1014 mLoopStart = loopStart; 1015 mLoopCountNotified = loopCount; 1016 mStaticProxy->setLoop(loopStart, loopEnd, loopCount); 1017 1018 // Waking the AudioTrackThread is not needed as this cannot be called when active. 1019} 1020 1021status_t AudioTrack::setMarkerPosition(uint32_t marker) 1022{ 1023 // The only purpose of setting marker position is to get a callback 1024 if (mCbf == NULL || isOffloadedOrDirect()) { 1025 return INVALID_OPERATION; 1026 } 1027 1028 AutoMutex lock(mLock); 1029 mMarkerPosition = marker; 1030 mMarkerReached = false; 1031 1032 sp<AudioTrackThread> t = mAudioTrackThread; 1033 if (t != 0) { 1034 t->wake(); 1035 } 1036 return NO_ERROR; 1037} 1038 1039status_t AudioTrack::getMarkerPosition(uint32_t *marker) const 1040{ 1041 if (isOffloadedOrDirect()) { 1042 return INVALID_OPERATION; 1043 } 1044 if (marker == NULL) { 1045 return BAD_VALUE; 1046 } 1047 1048 AutoMutex lock(mLock); 1049 mMarkerPosition.getValue(marker); 1050 1051 return NO_ERROR; 1052} 1053 1054status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) 1055{ 1056 // The only purpose of setting position update period is to get a callback 1057 if (mCbf == NULL || isOffloadedOrDirect()) { 1058 return INVALID_OPERATION; 1059 } 1060 1061 AutoMutex lock(mLock); 1062 mNewPosition = updateAndGetPosition_l() + updatePeriod; 1063 mUpdatePeriod = updatePeriod; 1064 1065 sp<AudioTrackThread> t = mAudioTrackThread; 1066 if (t != 0) { 1067 t->wake(); 1068 } 1069 return NO_ERROR; 1070} 1071 1072status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const 1073{ 1074 if (isOffloadedOrDirect()) { 1075 return INVALID_OPERATION; 1076 } 1077 if (updatePeriod == NULL) { 1078 return BAD_VALUE; 1079 } 1080 1081 AutoMutex lock(mLock); 1082 *updatePeriod = mUpdatePeriod; 1083 1084 return NO_ERROR; 1085} 1086 1087status_t AudioTrack::setPosition(uint32_t position) 1088{ 1089 if (mSharedBuffer == 0 || isOffloadedOrDirect()) { 1090 return INVALID_OPERATION; 1091 } 1092 if (position > mFrameCount) { 1093 return BAD_VALUE; 1094 } 1095 1096 AutoMutex lock(mLock); 1097 // Currently we require that the player is inactive before setting parameters such as position 1098 // or loop points. Otherwise, there could be a race condition: the application could read the 1099 // current position, compute a new position or loop parameters, and then set that position or 1100 // loop parameters but it would do the "wrong" thing since the position has continued to advance 1101 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app 1102 // to specify how it wants to handle such scenarios. 1103 if (mState == STATE_ACTIVE) { 1104 return INVALID_OPERATION; 1105 } 1106 // After setting the position, use full update period before notification. 1107 mNewPosition = updateAndGetPosition_l() + mUpdatePeriod; 1108 mStaticProxy->setBufferPosition(position); 1109 1110 // Waking the AudioTrackThread is not needed as this cannot be called when active. 1111 return NO_ERROR; 1112} 1113 1114status_t AudioTrack::getPosition(uint32_t *position) 1115{ 1116 if (position == NULL) { 1117 return BAD_VALUE; 1118 } 1119 1120 AutoMutex lock(mLock); 1121 // FIXME: offloaded and direct tracks call into the HAL for render positions 1122 // for compressed/synced data; however, we use proxy position for pure linear pcm data 1123 // as we do not know the capability of the HAL for pcm position support and standby. 1124 // There may be some latency differences between the HAL position and the proxy position. 1125 if (isOffloadedOrDirect_l() && !isPurePcmData_l()) { 1126 uint32_t dspFrames = 0; 1127 1128 if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) { 1129 ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition); 1130 *position = mPausedPosition; 1131 return NO_ERROR; 1132 } 1133 1134 if (mOutput != AUDIO_IO_HANDLE_NONE) { 1135 uint32_t halFrames; // actually unused 1136 (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames); 1137 // FIXME: on getRenderPosition() error, we return OK with frame position 0. 1138 } 1139 // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED) 1140 // due to hardware latency. We leave this behavior for now. 1141 *position = dspFrames; 1142 } else { 1143 if (mCblk->mFlags & CBLK_INVALID) { 1144 (void) restoreTrack_l("getPosition"); 1145 // FIXME: for compatibility with the Java API we ignore the restoreTrack_l() 1146 // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position. 1147 } 1148 1149 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes 1150 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 1151 0 : updateAndGetPosition_l().value(); 1152 } 1153 return NO_ERROR; 1154} 1155 1156status_t AudioTrack::getBufferPosition(uint32_t *position) 1157{ 1158 if (mSharedBuffer == 0) { 1159 return INVALID_OPERATION; 1160 } 1161 if (position == NULL) { 1162 return BAD_VALUE; 1163 } 1164 1165 AutoMutex lock(mLock); 1166 *position = mStaticProxy->getBufferPosition(); 1167 return NO_ERROR; 1168} 1169 1170status_t AudioTrack::reload() 1171{ 1172 if (mSharedBuffer == 0 || isOffloadedOrDirect()) { 1173 return INVALID_OPERATION; 1174 } 1175 1176 AutoMutex lock(mLock); 1177 // See setPosition() regarding setting parameters such as loop points or position while active 1178 if (mState == STATE_ACTIVE) { 1179 return INVALID_OPERATION; 1180 } 1181 mNewPosition = mUpdatePeriod; 1182 (void) updateAndGetPosition_l(); 1183 mPosition = 0; 1184 mPreviousTimestampValid = false; 1185#if 0 1186 // The documentation is not clear on the behavior of reload() and the restoration 1187 // of loop count. Historically we have not restored loop count, start, end, 1188 // but it makes sense if one desires to repeat playing a particular sound. 1189 if (mLoopCount != 0) { 1190 mLoopCountNotified = mLoopCount; 1191 mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount); 1192 } 1193#endif 1194 mStaticProxy->setBufferPosition(0); 1195 return NO_ERROR; 1196} 1197 1198audio_io_handle_t AudioTrack::getOutput() const 1199{ 1200 AutoMutex lock(mLock); 1201 return mOutput; 1202} 1203 1204status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) { 1205 AutoMutex lock(mLock); 1206 if (mSelectedDeviceId != deviceId) { 1207 mSelectedDeviceId = deviceId; 1208 android_atomic_or(CBLK_INVALID, &mCblk->mFlags); 1209 } 1210 return NO_ERROR; 1211} 1212 1213audio_port_handle_t AudioTrack::getOutputDevice() { 1214 AutoMutex lock(mLock); 1215 return mSelectedDeviceId; 1216} 1217 1218audio_port_handle_t AudioTrack::getRoutedDeviceId() { 1219 AutoMutex lock(mLock); 1220 if (mOutput == AUDIO_IO_HANDLE_NONE) { 1221 return AUDIO_PORT_HANDLE_NONE; 1222 } 1223 return AudioSystem::getDeviceIdForIo(mOutput); 1224} 1225 1226status_t AudioTrack::attachAuxEffect(int effectId) 1227{ 1228 AutoMutex lock(mLock); 1229 status_t status = mAudioTrack->attachAuxEffect(effectId); 1230 if (status == NO_ERROR) { 1231 mAuxEffectId = effectId; 1232 } 1233 return status; 1234} 1235 1236audio_stream_type_t AudioTrack::streamType() const 1237{ 1238 if (mStreamType == AUDIO_STREAM_DEFAULT) { 1239 return audio_attributes_to_stream_type(&mAttributes); 1240 } 1241 return mStreamType; 1242} 1243 1244// ------------------------------------------------------------------------- 1245 1246// must be called with mLock held 1247status_t AudioTrack::createTrack_l() 1248{ 1249 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); 1250 if (audioFlinger == 0) { 1251 ALOGE("Could not get audioflinger"); 1252 return NO_INIT; 1253 } 1254 1255 if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) { 1256 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput); 1257 } 1258 audio_io_handle_t output; 1259 audio_stream_type_t streamType = mStreamType; 1260 audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL; 1261 1262 // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted. 1263 // After fast request is denied, we will request again if IAudioTrack is re-created. 1264 1265 status_t status; 1266 audio_config_t config = AUDIO_CONFIG_INITIALIZER; 1267 config.sample_rate = mSampleRate; 1268 config.channel_mask = mChannelMask; 1269 config.format = mFormat; 1270 config.offload_info = mOffloadInfoCopy; 1271 status = AudioSystem::getOutputForAttr(attr, &output, 1272 mSessionId, &streamType, mClientUid, 1273 &config, 1274 mFlags, mSelectedDeviceId, &mPortId); 1275 1276 if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) { 1277 ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x," 1278 " channel mask %#x, flags %#x", 1279 mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags); 1280 return BAD_VALUE; 1281 } 1282 { 1283 // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger, 1284 // we must release it ourselves if anything goes wrong. 1285 1286 // Not all of these values are needed under all conditions, but it is easier to get them all 1287 status = AudioSystem::getLatency(output, &mAfLatency); 1288 if (status != NO_ERROR) { 1289 ALOGE("getLatency(%d) failed status %d", output, status); 1290 goto release; 1291 } 1292 ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency); 1293 1294 status = AudioSystem::getFrameCount(output, &mAfFrameCount); 1295 if (status != NO_ERROR) { 1296 ALOGE("getFrameCount(output=%d) status %d", output, status); 1297 goto release; 1298 } 1299 1300 // TODO consider making this a member variable if there are other uses for it later 1301 size_t afFrameCountHAL; 1302 status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL); 1303 if (status != NO_ERROR) { 1304 ALOGE("getFrameCountHAL(output=%d) status %d", output, status); 1305 goto release; 1306 } 1307 ALOG_ASSERT(afFrameCountHAL > 0); 1308 1309 status = AudioSystem::getSamplingRate(output, &mAfSampleRate); 1310 if (status != NO_ERROR) { 1311 ALOGE("getSamplingRate(output=%d) status %d", output, status); 1312 goto release; 1313 } 1314 if (mSampleRate == 0) { 1315 mSampleRate = mAfSampleRate; 1316 mOriginalSampleRate = mAfSampleRate; 1317 } 1318 1319 // Client can only express a preference for FAST. Server will perform additional tests. 1320 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1321 bool useCaseAllowed = 1322 // either of these use cases: 1323 // use case 1: shared buffer 1324 (mSharedBuffer != 0) || 1325 // use case 2: callback transfer mode 1326 (mTransfer == TRANSFER_CALLBACK) || 1327 // use case 3: obtain/release mode 1328 (mTransfer == TRANSFER_OBTAIN) || 1329 // use case 4: synchronous write 1330 ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava); 1331 // sample rates must also match 1332 bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate); 1333 if (!fastAllowed) { 1334 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, " 1335 "track %u Hz, output %u Hz", 1336 mTransfer, mSampleRate, mAfSampleRate); 1337 mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST); 1338 } 1339 } 1340 1341 mNotificationFramesAct = mNotificationFramesReq; 1342 1343 size_t frameCount = mReqFrameCount; 1344 if (!audio_has_proportional_frames(mFormat)) { 1345 1346 if (mSharedBuffer != 0) { 1347 // Same comment as below about ignoring frameCount parameter for set() 1348 frameCount = mSharedBuffer->size(); 1349 } else if (frameCount == 0) { 1350 frameCount = mAfFrameCount; 1351 } 1352 if (mNotificationFramesAct != frameCount) { 1353 mNotificationFramesAct = frameCount; 1354 } 1355 } else if (mSharedBuffer != 0) { 1356 // FIXME: Ensure client side memory buffers need 1357 // not have additional alignment beyond sample 1358 // (e.g. 16 bit stereo accessed as 32 bit frame). 1359 size_t alignment = audio_bytes_per_sample(mFormat); 1360 if (alignment & 1) { 1361 // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java). 1362 alignment = 1; 1363 } 1364 if (mChannelCount > 1) { 1365 // More than 2 channels does not require stronger alignment than stereo 1366 alignment <<= 1; 1367 } 1368 if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) { 1369 ALOGE("Invalid buffer alignment: address %p, channel count %u", 1370 mSharedBuffer->pointer(), mChannelCount); 1371 status = BAD_VALUE; 1372 goto release; 1373 } 1374 1375 // When initializing a shared buffer AudioTrack via constructors, 1376 // there's no frameCount parameter. 1377 // But when initializing a shared buffer AudioTrack via set(), 1378 // there _is_ a frameCount parameter. We silently ignore it. 1379 frameCount = mSharedBuffer->size() / mFrameSize; 1380 } else { 1381 size_t minFrameCount = 0; 1382 // For fast tracks the frame count calculations and checks are mostly done by server, 1383 // but we try to respect the application's request for notifications per buffer. 1384 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1385 if (mNotificationsPerBufferReq > 0) { 1386 // Avoid possible arithmetic overflow during multiplication. 1387 // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely. 1388 if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) { 1389 ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu", 1390 mNotificationsPerBufferReq, afFrameCountHAL); 1391 } else { 1392 minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq; 1393 } 1394 } 1395 } else { 1396 // for normal tracks precompute the frame count based on speed. 1397 const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f : 1398 max(mMaxRequiredSpeed, mPlaybackRate.mSpeed); 1399 minFrameCount = calculateMinFrameCount( 1400 mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate, 1401 speed /*, 0 mNotificationsPerBufferReq*/); 1402 } 1403 if (frameCount < minFrameCount) { 1404 frameCount = minFrameCount; 1405 } 1406 } 1407 1408 audio_output_flags_t flags = mFlags; 1409 1410 pid_t tid = -1; 1411 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1412 if (mAudioTrackThread != 0 && !mThreadCanCallJava) { 1413 tid = mAudioTrackThread->getTid(); 1414 } 1415 } 1416 1417 size_t temp = frameCount; // temp may be replaced by a revised value of frameCount, 1418 // but we will still need the original value also 1419 audio_session_t originalSessionId = mSessionId; 1420 sp<IAudioTrack> track = audioFlinger->createTrack(streamType, 1421 mSampleRate, 1422 mFormat, 1423 mChannelMask, 1424 &temp, 1425 &flags, 1426 mSharedBuffer, 1427 output, 1428 mClientPid, 1429 tid, 1430 &mSessionId, 1431 mClientUid, 1432 &status, 1433 mPortId); 1434 ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId, 1435 "session ID changed from %d to %d", originalSessionId, mSessionId); 1436 1437 if (status != NO_ERROR) { 1438 ALOGE("AudioFlinger could not create track, status: %d", status); 1439 goto release; 1440 } 1441 ALOG_ASSERT(track != 0); 1442 1443 // AudioFlinger now owns the reference to the I/O handle, 1444 // so we are no longer responsible for releasing it. 1445 1446 // FIXME compare to AudioRecord 1447 sp<IMemory> iMem = track->getCblk(); 1448 if (iMem == 0) { 1449 ALOGE("Could not get control block"); 1450 return NO_INIT; 1451 } 1452 void *iMemPointer = iMem->pointer(); 1453 if (iMemPointer == NULL) { 1454 ALOGE("Could not get control block pointer"); 1455 return NO_INIT; 1456 } 1457 // invariant that mAudioTrack != 0 is true only after set() returns successfully 1458 if (mAudioTrack != 0) { 1459 IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this); 1460 mDeathNotifier.clear(); 1461 } 1462 mAudioTrack = track; 1463 mCblkMemory = iMem; 1464 IPCThreadState::self()->flushCommands(); 1465 1466 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer); 1467 mCblk = cblk; 1468 // note that temp is the (possibly revised) value of frameCount 1469 if (temp < frameCount || (frameCount == 0 && temp == 0)) { 1470 // In current design, AudioTrack client checks and ensures frame count validity before 1471 // passing it to AudioFlinger so AudioFlinger should not return a different value except 1472 // for fast track as it uses a special method of assigning frame count. 1473 ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp); 1474 } 1475 frameCount = temp; 1476 1477 mAwaitBoost = false; 1478 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1479 if (flags & AUDIO_OUTPUT_FLAG_FAST) { 1480 ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount); 1481 if (!mThreadCanCallJava) { 1482 mAwaitBoost = true; 1483 } 1484 } else { 1485 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount); 1486 } 1487 } 1488 mFlags = flags; 1489 1490 // Make sure that application is notified with sufficient margin before underrun. 1491 // The client can divide the AudioTrack buffer into sub-buffers, 1492 // and expresses its desire to server as the notification frame count. 1493 if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) { 1494 size_t maxNotificationFrames; 1495 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1496 // notify every HAL buffer, regardless of the size of the track buffer 1497 maxNotificationFrames = afFrameCountHAL; 1498 } else { 1499 // For normal tracks, use at least double-buffering if no sample rate conversion, 1500 // or at least triple-buffering if there is sample rate conversion 1501 const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3; 1502 maxNotificationFrames = frameCount / nBuffering; 1503 } 1504 if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) { 1505 if (mNotificationFramesAct == 0) { 1506 ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu", 1507 maxNotificationFrames, frameCount); 1508 } else { 1509 ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu", 1510 mNotificationFramesAct, maxNotificationFrames, frameCount); 1511 } 1512 mNotificationFramesAct = (uint32_t) maxNotificationFrames; 1513 } 1514 } 1515 1516 // We retain a copy of the I/O handle, but don't own the reference 1517 mOutput = output; 1518 mRefreshRemaining = true; 1519 1520 // Starting address of buffers in shared memory. If there is a shared buffer, buffers 1521 // is the value of pointer() for the shared buffer, otherwise buffers points 1522 // immediately after the control block. This address is for the mapping within client 1523 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. 1524 void* buffers; 1525 if (mSharedBuffer == 0) { 1526 buffers = cblk + 1; 1527 } else { 1528 buffers = mSharedBuffer->pointer(); 1529 if (buffers == NULL) { 1530 ALOGE("Could not get buffer pointer"); 1531 return NO_INIT; 1532 } 1533 } 1534 1535 mAudioTrack->attachAuxEffect(mAuxEffectId); 1536 // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack) 1537 // FIXME don't believe this lie 1538 mLatency = mAfLatency + (1000*frameCount) / mSampleRate; 1539 1540 mFrameCount = frameCount; 1541 // If IAudioTrack is re-created, don't let the requested frameCount 1542 // decrease. This can confuse clients that cache frameCount(). 1543 if (frameCount > mReqFrameCount) { 1544 mReqFrameCount = frameCount; 1545 } 1546 1547 // reset server position to 0 as we have new cblk. 1548 mServer = 0; 1549 1550 // update proxy 1551 if (mSharedBuffer == 0) { 1552 mStaticProxy.clear(); 1553 mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize); 1554 } else { 1555 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize); 1556 mProxy = mStaticProxy; 1557 } 1558 1559 mProxy->setVolumeLR(gain_minifloat_pack( 1560 gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]), 1561 gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT]))); 1562 1563 mProxy->setSendLevel(mSendLevel); 1564 const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch); 1565 const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch); 1566 const float effectivePitch = adjustPitch(mPlaybackRate.mPitch); 1567 mProxy->setSampleRate(effectiveSampleRate); 1568 1569 AudioPlaybackRate playbackRateTemp = mPlaybackRate; 1570 playbackRateTemp.mSpeed = effectiveSpeed; 1571 playbackRateTemp.mPitch = effectivePitch; 1572 mProxy->setPlaybackRate(playbackRateTemp); 1573 mProxy->setMinimum(mNotificationFramesAct); 1574 1575 mDeathNotifier = new DeathNotifier(this); 1576 IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this); 1577 1578 if (mDeviceCallback != 0) { 1579 AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput); 1580 } 1581 1582 return NO_ERROR; 1583 } 1584 1585release: 1586 AudioSystem::releaseOutput(output, streamType, mSessionId); 1587 if (status == NO_ERROR) { 1588 status = NO_INIT; 1589 } 1590 return status; 1591} 1592 1593status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig) 1594{ 1595 if (audioBuffer == NULL) { 1596 if (nonContig != NULL) { 1597 *nonContig = 0; 1598 } 1599 return BAD_VALUE; 1600 } 1601 if (mTransfer != TRANSFER_OBTAIN) { 1602 audioBuffer->frameCount = 0; 1603 audioBuffer->size = 0; 1604 audioBuffer->raw = NULL; 1605 if (nonContig != NULL) { 1606 *nonContig = 0; 1607 } 1608 return INVALID_OPERATION; 1609 } 1610 1611 const struct timespec *requested; 1612 struct timespec timeout; 1613 if (waitCount == -1) { 1614 requested = &ClientProxy::kForever; 1615 } else if (waitCount == 0) { 1616 requested = &ClientProxy::kNonBlocking; 1617 } else if (waitCount > 0) { 1618 long long ms = WAIT_PERIOD_MS * (long long) waitCount; 1619 timeout.tv_sec = ms / 1000; 1620 timeout.tv_nsec = (int) (ms % 1000) * 1000000; 1621 requested = &timeout; 1622 } else { 1623 ALOGE("%s invalid waitCount %d", __func__, waitCount); 1624 requested = NULL; 1625 } 1626 return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig); 1627} 1628 1629status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, 1630 struct timespec *elapsed, size_t *nonContig) 1631{ 1632 // previous and new IAudioTrack sequence numbers are used to detect track re-creation 1633 uint32_t oldSequence = 0; 1634 uint32_t newSequence; 1635 1636 Proxy::Buffer buffer; 1637 status_t status = NO_ERROR; 1638 1639 static const int32_t kMaxTries = 5; 1640 int32_t tryCounter = kMaxTries; 1641 1642 do { 1643 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to 1644 // keep them from going away if another thread re-creates the track during obtainBuffer() 1645 sp<AudioTrackClientProxy> proxy; 1646 sp<IMemory> iMem; 1647 1648 { // start of lock scope 1649 AutoMutex lock(mLock); 1650 1651 newSequence = mSequence; 1652 // did previous obtainBuffer() fail due to media server death or voluntary invalidation? 1653 if (status == DEAD_OBJECT) { 1654 // re-create track, unless someone else has already done so 1655 if (newSequence == oldSequence) { 1656 status = restoreTrack_l("obtainBuffer"); 1657 if (status != NO_ERROR) { 1658 buffer.mFrameCount = 0; 1659 buffer.mRaw = NULL; 1660 buffer.mNonContig = 0; 1661 break; 1662 } 1663 } 1664 } 1665 oldSequence = newSequence; 1666 1667 if (status == NOT_ENOUGH_DATA) { 1668 restartIfDisabled(); 1669 } 1670 1671 // Keep the extra references 1672 proxy = mProxy; 1673 iMem = mCblkMemory; 1674 1675 if (mState == STATE_STOPPING) { 1676 status = -EINTR; 1677 buffer.mFrameCount = 0; 1678 buffer.mRaw = NULL; 1679 buffer.mNonContig = 0; 1680 break; 1681 } 1682 1683 // Non-blocking if track is stopped or paused 1684 if (mState != STATE_ACTIVE) { 1685 requested = &ClientProxy::kNonBlocking; 1686 } 1687 1688 } // end of lock scope 1689 1690 buffer.mFrameCount = audioBuffer->frameCount; 1691 // FIXME starts the requested timeout and elapsed over from scratch 1692 status = proxy->obtainBuffer(&buffer, requested, elapsed); 1693 } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0)); 1694 1695 audioBuffer->frameCount = buffer.mFrameCount; 1696 audioBuffer->size = buffer.mFrameCount * mFrameSize; 1697 audioBuffer->raw = buffer.mRaw; 1698 if (nonContig != NULL) { 1699 *nonContig = buffer.mNonContig; 1700 } 1701 return status; 1702} 1703 1704void AudioTrack::releaseBuffer(const Buffer* audioBuffer) 1705{ 1706 // FIXME add error checking on mode, by adding an internal version 1707 if (mTransfer == TRANSFER_SHARED) { 1708 return; 1709 } 1710 1711 size_t stepCount = audioBuffer->size / mFrameSize; 1712 if (stepCount == 0) { 1713 return; 1714 } 1715 1716 Proxy::Buffer buffer; 1717 buffer.mFrameCount = stepCount; 1718 buffer.mRaw = audioBuffer->raw; 1719 1720 AutoMutex lock(mLock); 1721 mReleased += stepCount; 1722 mInUnderrun = false; 1723 mProxy->releaseBuffer(&buffer); 1724 1725 // restart track if it was disabled by audioflinger due to previous underrun 1726 restartIfDisabled(); 1727} 1728 1729void AudioTrack::restartIfDisabled() 1730{ 1731 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags); 1732 if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) { 1733 ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this); 1734 // FIXME ignoring status 1735 mAudioTrack->start(); 1736 } 1737} 1738 1739// ------------------------------------------------------------------------- 1740 1741ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking) 1742{ 1743 if (mTransfer != TRANSFER_SYNC) { 1744 return INVALID_OPERATION; 1745 } 1746 1747 if (isDirect()) { 1748 AutoMutex lock(mLock); 1749 int32_t flags = android_atomic_and( 1750 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), 1751 &mCblk->mFlags); 1752 if (flags & CBLK_INVALID) { 1753 return DEAD_OBJECT; 1754 } 1755 } 1756 1757 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) { 1758 // Sanity-check: user is most-likely passing an error code, and it would 1759 // make the return value ambiguous (actualSize vs error). 1760 ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize); 1761 return BAD_VALUE; 1762 } 1763 1764 size_t written = 0; 1765 Buffer audioBuffer; 1766 1767 while (userSize >= mFrameSize) { 1768 audioBuffer.frameCount = userSize / mFrameSize; 1769 1770 status_t err = obtainBuffer(&audioBuffer, 1771 blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking); 1772 if (err < 0) { 1773 if (written > 0) { 1774 break; 1775 } 1776 if (err == TIMED_OUT || err == -EINTR) { 1777 err = WOULD_BLOCK; 1778 } 1779 return ssize_t(err); 1780 } 1781 1782 size_t toWrite = audioBuffer.size; 1783 memcpy(audioBuffer.i8, buffer, toWrite); 1784 buffer = ((const char *) buffer) + toWrite; 1785 userSize -= toWrite; 1786 written += toWrite; 1787 1788 releaseBuffer(&audioBuffer); 1789 } 1790 1791 if (written > 0) { 1792 mFramesWritten += written / mFrameSize; 1793 } 1794 return written; 1795} 1796 1797// ------------------------------------------------------------------------- 1798 1799nsecs_t AudioTrack::processAudioBuffer() 1800{ 1801 // Currently the AudioTrack thread is not created if there are no callbacks. 1802 // Would it ever make sense to run the thread, even without callbacks? 1803 // If so, then replace this by checks at each use for mCbf != NULL. 1804 LOG_ALWAYS_FATAL_IF(mCblk == NULL); 1805 1806 mLock.lock(); 1807 if (mAwaitBoost) { 1808 mAwaitBoost = false; 1809 mLock.unlock(); 1810 static const int32_t kMaxTries = 5; 1811 int32_t tryCounter = kMaxTries; 1812 uint32_t pollUs = 10000; 1813 do { 1814 int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK; 1815 if (policy == SCHED_FIFO || policy == SCHED_RR) { 1816 break; 1817 } 1818 usleep(pollUs); 1819 pollUs <<= 1; 1820 } while (tryCounter-- > 0); 1821 if (tryCounter < 0) { 1822 ALOGE("did not receive expected priority boost on time"); 1823 } 1824 // Run again immediately 1825 return 0; 1826 } 1827 1828 // Can only reference mCblk while locked 1829 int32_t flags = android_atomic_and( 1830 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags); 1831 1832 // Check for track invalidation 1833 if (flags & CBLK_INVALID) { 1834 // for offloaded tracks restoreTrack_l() will just update the sequence and clear 1835 // AudioSystem cache. We should not exit here but after calling the callback so 1836 // that the upper layers can recreate the track 1837 if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) { 1838 status_t status __unused = restoreTrack_l("processAudioBuffer"); 1839 // FIXME unused status 1840 // after restoration, continue below to make sure that the loop and buffer events 1841 // are notified because they have been cleared from mCblk->mFlags above. 1842 } 1843 } 1844 1845 bool waitStreamEnd = mState == STATE_STOPPING; 1846 bool active = mState == STATE_ACTIVE; 1847 1848 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer() 1849 bool newUnderrun = false; 1850 if (flags & CBLK_UNDERRUN) { 1851#if 0 1852 // Currently in shared buffer mode, when the server reaches the end of buffer, 1853 // the track stays active in continuous underrun state. It's up to the application 1854 // to pause or stop the track, or set the position to a new offset within buffer. 1855 // This was some experimental code to auto-pause on underrun. Keeping it here 1856 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content. 1857 if (mTransfer == TRANSFER_SHARED) { 1858 mState = STATE_PAUSED; 1859 active = false; 1860 } 1861#endif 1862 if (!mInUnderrun) { 1863 mInUnderrun = true; 1864 newUnderrun = true; 1865 } 1866 } 1867 1868 // Get current position of server 1869 Modulo<uint32_t> position(updateAndGetPosition_l()); 1870 1871 // Manage marker callback 1872 bool markerReached = false; 1873 Modulo<uint32_t> markerPosition(mMarkerPosition); 1874 // uses 32 bit wraparound for comparison with position. 1875 if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) { 1876 mMarkerReached = markerReached = true; 1877 } 1878 1879 // Determine number of new position callback(s) that will be needed, while locked 1880 size_t newPosCount = 0; 1881 Modulo<uint32_t> newPosition(mNewPosition); 1882 uint32_t updatePeriod = mUpdatePeriod; 1883 // FIXME fails for wraparound, need 64 bits 1884 if (updatePeriod > 0 && position >= newPosition) { 1885 newPosCount = ((position - newPosition).value() / updatePeriod) + 1; 1886 mNewPosition += updatePeriod * newPosCount; 1887 } 1888 1889 // Cache other fields that will be needed soon 1890 uint32_t sampleRate = mSampleRate; 1891 float speed = mPlaybackRate.mSpeed; 1892 const uint32_t notificationFrames = mNotificationFramesAct; 1893 if (mRefreshRemaining) { 1894 mRefreshRemaining = false; 1895 mRemainingFrames = notificationFrames; 1896 mRetryOnPartialBuffer = false; 1897 } 1898 size_t misalignment = mProxy->getMisalignment(); 1899 uint32_t sequence = mSequence; 1900 sp<AudioTrackClientProxy> proxy = mProxy; 1901 1902 // Determine the number of new loop callback(s) that will be needed, while locked. 1903 int loopCountNotifications = 0; 1904 uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END 1905 1906 if (mLoopCount > 0) { 1907 int loopCount; 1908 size_t bufferPosition; 1909 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount); 1910 loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition; 1911 loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications); 1912 mLoopCountNotified = loopCount; // discard any excess notifications 1913 } else if (mLoopCount < 0) { 1914 // FIXME: We're not accurate with notification count and position with infinite looping 1915 // since loopCount from server side will always return -1 (we could decrement it). 1916 size_t bufferPosition = mStaticProxy->getBufferPosition(); 1917 loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0); 1918 loopPeriod = mLoopEnd - bufferPosition; 1919 } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) { 1920 size_t bufferPosition = mStaticProxy->getBufferPosition(); 1921 loopPeriod = mFrameCount - bufferPosition; 1922 } 1923 1924 // These fields don't need to be cached, because they are assigned only by set(): 1925 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags 1926 // mFlags is also assigned by createTrack_l(), but not the bit we care about. 1927 1928 mLock.unlock(); 1929 1930 // get anchor time to account for callbacks. 1931 const nsecs_t timeBeforeCallbacks = systemTime(); 1932 1933 if (waitStreamEnd) { 1934 // FIXME: Instead of blocking in proxy->waitStreamEndDone(), Callback thread 1935 // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function 1936 // (and make sure we don't callback for more data while we're stopping). 1937 // This helps with position, marker notifications, and track invalidation. 1938 struct timespec timeout; 1939 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC; 1940 timeout.tv_nsec = 0; 1941 1942 status_t status = proxy->waitStreamEndDone(&timeout); 1943 switch (status) { 1944 case NO_ERROR: 1945 case DEAD_OBJECT: 1946 case TIMED_OUT: 1947 if (status != DEAD_OBJECT) { 1948 // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop(); 1949 // instead, the application should handle the EVENT_NEW_IAUDIOTRACK. 1950 mCbf(EVENT_STREAM_END, mUserData, NULL); 1951 } 1952 { 1953 AutoMutex lock(mLock); 1954 // The previously assigned value of waitStreamEnd is no longer valid, 1955 // since the mutex has been unlocked and either the callback handler 1956 // or another thread could have re-started the AudioTrack during that time. 1957 waitStreamEnd = mState == STATE_STOPPING; 1958 if (waitStreamEnd) { 1959 mState = STATE_STOPPED; 1960 mReleased = 0; 1961 } 1962 } 1963 if (waitStreamEnd && status != DEAD_OBJECT) { 1964 return NS_INACTIVE; 1965 } 1966 break; 1967 } 1968 return 0; 1969 } 1970 1971 // perform callbacks while unlocked 1972 if (newUnderrun) { 1973 mCbf(EVENT_UNDERRUN, mUserData, NULL); 1974 } 1975 while (loopCountNotifications > 0) { 1976 mCbf(EVENT_LOOP_END, mUserData, NULL); 1977 --loopCountNotifications; 1978 } 1979 if (flags & CBLK_BUFFER_END) { 1980 mCbf(EVENT_BUFFER_END, mUserData, NULL); 1981 } 1982 if (markerReached) { 1983 mCbf(EVENT_MARKER, mUserData, &markerPosition); 1984 } 1985 while (newPosCount > 0) { 1986 size_t temp = newPosition.value(); // FIXME size_t != uint32_t 1987 mCbf(EVENT_NEW_POS, mUserData, &temp); 1988 newPosition += updatePeriod; 1989 newPosCount--; 1990 } 1991 1992 if (mObservedSequence != sequence) { 1993 mObservedSequence = sequence; 1994 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL); 1995 // for offloaded tracks, just wait for the upper layers to recreate the track 1996 if (isOffloadedOrDirect()) { 1997 return NS_INACTIVE; 1998 } 1999 } 2000 2001 // if inactive, then don't run me again until re-started 2002 if (!active) { 2003 return NS_INACTIVE; 2004 } 2005 2006 // Compute the estimated time until the next timed event (position, markers, loops) 2007 // FIXME only for non-compressed audio 2008 uint32_t minFrames = ~0; 2009 if (!markerReached && position < markerPosition) { 2010 minFrames = (markerPosition - position).value(); 2011 } 2012 if (loopPeriod > 0 && loopPeriod < minFrames) { 2013 // loopPeriod is already adjusted for actual position. 2014 minFrames = loopPeriod; 2015 } 2016 if (updatePeriod > 0) { 2017 minFrames = min(minFrames, (newPosition - position).value()); 2018 } 2019 2020 // If > 0, poll periodically to recover from a stuck server. A good value is 2. 2021 static const uint32_t kPoll = 0; 2022 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) { 2023 minFrames = kPoll * notificationFrames; 2024 } 2025 2026 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server 2027 static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL; 2028 const nsecs_t timeAfterCallbacks = systemTime(); 2029 2030 // Convert frame units to time units 2031 nsecs_t ns = NS_WHENEVER; 2032 if (minFrames != (uint32_t) ~0) { 2033 ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs; 2034 ns -= (timeAfterCallbacks - timeBeforeCallbacks); // account for callback time 2035 // TODO: Should we warn if the callback time is too long? 2036 if (ns < 0) ns = 0; 2037 } 2038 2039 // If not supplying data by EVENT_MORE_DATA, then we're done 2040 if (mTransfer != TRANSFER_CALLBACK) { 2041 return ns; 2042 } 2043 2044 // EVENT_MORE_DATA callback handling. 2045 // Timing for linear pcm audio data formats can be derived directly from the 2046 // buffer fill level. 2047 // Timing for compressed data is not directly available from the buffer fill level, 2048 // rather indirectly from waiting for blocking mode callbacks or waiting for obtain() 2049 // to return a certain fill level. 2050 2051 struct timespec timeout; 2052 const struct timespec *requested = &ClientProxy::kForever; 2053 if (ns != NS_WHENEVER) { 2054 timeout.tv_sec = ns / 1000000000LL; 2055 timeout.tv_nsec = ns % 1000000000LL; 2056 ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000); 2057 requested = &timeout; 2058 } 2059 2060 size_t writtenFrames = 0; 2061 while (mRemainingFrames > 0) { 2062 2063 Buffer audioBuffer; 2064 audioBuffer.frameCount = mRemainingFrames; 2065 size_t nonContig; 2066 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig); 2067 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0), 2068 "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount); 2069 requested = &ClientProxy::kNonBlocking; 2070 size_t avail = audioBuffer.frameCount + nonContig; 2071 ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d", 2072 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err); 2073 if (err != NO_ERROR) { 2074 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR || 2075 (isOffloaded() && (err == DEAD_OBJECT))) { 2076 // FIXME bug 25195759 2077 return 1000000; 2078 } 2079 ALOGE("Error %d obtaining an audio buffer, giving up.", err); 2080 return NS_NEVER; 2081 } 2082 2083 if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) { 2084 mRetryOnPartialBuffer = false; 2085 if (avail < mRemainingFrames) { 2086 if (ns > 0) { // account for obtain time 2087 const nsecs_t timeNow = systemTime(); 2088 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks)); 2089 } 2090 nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed); 2091 if (ns < 0 /* NS_WHENEVER */ || myns < ns) { 2092 ns = myns; 2093 } 2094 return ns; 2095 } 2096 } 2097 2098 size_t reqSize = audioBuffer.size; 2099 mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer); 2100 size_t writtenSize = audioBuffer.size; 2101 2102 // Sanity check on returned size 2103 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) { 2104 ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes", 2105 reqSize, ssize_t(writtenSize)); 2106 return NS_NEVER; 2107 } 2108 2109 if (writtenSize == 0) { 2110 // The callback is done filling buffers 2111 // Keep this thread going to handle timed events and 2112 // still try to get more data in intervals of WAIT_PERIOD_MS 2113 // but don't just loop and block the CPU, so wait 2114 2115 // mCbf(EVENT_MORE_DATA, ...) might either 2116 // (1) Block until it can fill the buffer, returning 0 size on EOS. 2117 // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS. 2118 // (3) Return 0 size when no data is available, does not wait for more data. 2119 // 2120 // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer. 2121 // We try to compute the wait time to avoid a tight sleep-wait cycle, 2122 // especially for case (3). 2123 // 2124 // The decision to support (1) and (2) affect the sizing of mRemainingFrames 2125 // and this loop; whereas for case (3) we could simply check once with the full 2126 // buffer size and skip the loop entirely. 2127 2128 nsecs_t myns; 2129 if (audio_has_proportional_frames(mFormat)) { 2130 // time to wait based on buffer occupancy 2131 const nsecs_t datans = mRemainingFrames <= avail ? 0 : 2132 framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed); 2133 // audio flinger thread buffer size (TODO: adjust for fast tracks) 2134 // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks. 2135 const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed); 2136 // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0. 2137 myns = datans + (afns / 2); 2138 } else { 2139 // FIXME: This could ping quite a bit if the buffer isn't full. 2140 // Note that when mState is stopping we waitStreamEnd, so it never gets here. 2141 myns = kWaitPeriodNs; 2142 } 2143 if (ns > 0) { // account for obtain and callback time 2144 const nsecs_t timeNow = systemTime(); 2145 ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks)); 2146 } 2147 if (ns < 0 /* NS_WHENEVER */ || myns < ns) { 2148 ns = myns; 2149 } 2150 return ns; 2151 } 2152 2153 size_t releasedFrames = writtenSize / mFrameSize; 2154 audioBuffer.frameCount = releasedFrames; 2155 mRemainingFrames -= releasedFrames; 2156 if (misalignment >= releasedFrames) { 2157 misalignment -= releasedFrames; 2158 } else { 2159 misalignment = 0; 2160 } 2161 2162 releaseBuffer(&audioBuffer); 2163 writtenFrames += releasedFrames; 2164 2165 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer 2166 // if callback doesn't like to accept the full chunk 2167 if (writtenSize < reqSize) { 2168 continue; 2169 } 2170 2171 // There could be enough non-contiguous frames available to satisfy the remaining request 2172 if (mRemainingFrames <= nonContig) { 2173 continue; 2174 } 2175 2176#if 0 2177 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a 2178 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA 2179 // that total to a sum == notificationFrames. 2180 if (0 < misalignment && misalignment <= mRemainingFrames) { 2181 mRemainingFrames = misalignment; 2182 return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed); 2183 } 2184#endif 2185 2186 } 2187 if (writtenFrames > 0) { 2188 AutoMutex lock(mLock); 2189 mFramesWritten += writtenFrames; 2190 } 2191 mRemainingFrames = notificationFrames; 2192 mRetryOnPartialBuffer = true; 2193 2194 // A lot has transpired since ns was calculated, so run again immediately and re-calculate 2195 return 0; 2196} 2197 2198status_t AudioTrack::restoreTrack_l(const char *from) 2199{ 2200 ALOGW("dead IAudioTrack, %s, creating a new one from %s()", 2201 isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from); 2202 ++mSequence; 2203 2204 // refresh the audio configuration cache in this process to make sure we get new 2205 // output parameters and new IAudioFlinger in createTrack_l() 2206 AudioSystem::clearAudioConfigCache(); 2207 2208 if (isOffloadedOrDirect_l() || mDoNotReconnect) { 2209 // FIXME re-creation of offloaded and direct tracks is not yet implemented; 2210 // reconsider enabling for linear PCM encodings when position can be preserved. 2211 return DEAD_OBJECT; 2212 } 2213 2214 // Save so we can return count since creation. 2215 mUnderrunCountOffset = getUnderrunCount_l(); 2216 2217 // save the old static buffer position 2218 uint32_t staticPosition = 0; 2219 size_t bufferPosition = 0; 2220 int loopCount = 0; 2221 if (mStaticProxy != 0) { 2222 mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount); 2223 staticPosition = mStaticProxy->getPosition().unsignedValue(); 2224 } 2225 2226 mFlags = mOrigFlags; 2227 2228 // If a new IAudioTrack is successfully created, createTrack_l() will modify the 2229 // following member variables: mAudioTrack, mCblkMemory and mCblk. 2230 // It will also delete the strong references on previous IAudioTrack and IMemory. 2231 // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact. 2232 status_t result = createTrack_l(); 2233 2234 if (result == NO_ERROR) { 2235 // take the frames that will be lost by track recreation into account in saved position 2236 // For streaming tracks, this is the amount we obtained from the user/client 2237 // (not the number actually consumed at the server - those are already lost). 2238 if (mStaticProxy == 0) { 2239 mPosition = mReleased; 2240 } 2241 // Continue playback from last known position and restore loop. 2242 if (mStaticProxy != 0) { 2243 if (loopCount != 0) { 2244 mStaticProxy->setBufferPositionAndLoop(bufferPosition, 2245 mLoopStart, mLoopEnd, loopCount); 2246 } else { 2247 mStaticProxy->setBufferPosition(bufferPosition); 2248 if (bufferPosition == mFrameCount) { 2249 ALOGD("restoring track at end of static buffer"); 2250 } 2251 } 2252 } 2253 // restore volume handler 2254 mVolumeHandler->forall([this](const sp<VolumeShaper::Configuration> &configuration, 2255 const sp<VolumeShaper::Operation> &operation) -> VolumeShaper::Status { 2256 sp<VolumeShaper::Operation> operationToEnd = new VolumeShaper::Operation(*operation); 2257 // TODO: Ideally we would restore to the exact xOffset position 2258 // as returned by getVolumeShaperState(), but we don't have that 2259 // information when restoring at the client unless we periodically poll 2260 // the server or create shared memory state. 2261 // 2262 // For now, we simply advance to the end of the VolumeShaper effect. 2263 operationToEnd->setXOffset(1.f); 2264 return mAudioTrack->applyVolumeShaper(configuration, operationToEnd); 2265 }); 2266 2267 if (mState == STATE_ACTIVE) { 2268 result = mAudioTrack->start(); 2269 } 2270 // server resets to zero so we offset 2271 mFramesWrittenServerOffset = 2272 mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten; 2273 mFramesWrittenAtRestore = mFramesWrittenServerOffset; 2274 } 2275 if (result != NO_ERROR) { 2276 ALOGW("restoreTrack_l() failed status %d", result); 2277 mState = STATE_STOPPED; 2278 mReleased = 0; 2279 } 2280 2281 return result; 2282} 2283 2284Modulo<uint32_t> AudioTrack::updateAndGetPosition_l() 2285{ 2286 // This is the sole place to read server consumed frames 2287 Modulo<uint32_t> newServer(mProxy->getPosition()); 2288 const int32_t delta = (newServer - mServer).signedValue(); 2289 // TODO There is controversy about whether there can be "negative jitter" in server position. 2290 // This should be investigated further, and if possible, it should be addressed. 2291 // A more definite failure mode is infrequent polling by client. 2292 // One could call (void)getPosition_l() in releaseBuffer(), 2293 // so mReleased and mPosition are always lock-step as best possible. 2294 // That should ensure delta never goes negative for infrequent polling 2295 // unless the server has more than 2^31 frames in its buffer, 2296 // in which case the use of uint32_t for these counters has bigger issues. 2297 ALOGE_IF(delta < 0, 2298 "detected illegal retrograde motion by the server: mServer advanced by %d", 2299 delta); 2300 mServer = newServer; 2301 if (delta > 0) { // avoid retrograde 2302 mPosition += delta; 2303 } 2304 return mPosition; 2305} 2306 2307bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const 2308{ 2309 // applicable for mixing tracks only (not offloaded or direct) 2310 if (mStaticProxy != 0) { 2311 return true; // static tracks do not have issues with buffer sizing. 2312 } 2313 const size_t minFrameCount = 2314 calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed 2315 /*, 0 mNotificationsPerBufferReq*/); 2316 ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu minFrameCount %zu", 2317 mFrameCount, minFrameCount); 2318 return mFrameCount >= minFrameCount; 2319} 2320 2321status_t AudioTrack::setParameters(const String8& keyValuePairs) 2322{ 2323 AutoMutex lock(mLock); 2324 return mAudioTrack->setParameters(keyValuePairs); 2325} 2326 2327VolumeShaper::Status AudioTrack::applyVolumeShaper( 2328 const sp<VolumeShaper::Configuration>& configuration, 2329 const sp<VolumeShaper::Operation>& operation) 2330{ 2331 AutoMutex lock(mLock); 2332 mVolumeHandler->setIdIfNecessary(configuration); 2333 VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation); 2334 if (status >= 0) { 2335 // save VolumeShaper for restore 2336 mVolumeHandler->applyVolumeShaper(configuration, operation); 2337 } 2338 return status; 2339} 2340 2341sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id) 2342{ 2343 // TODO: To properly restore the AudioTrack 2344 // we will need to save the last state in AudioTrackShared. 2345 AutoMutex lock(mLock); 2346 return mAudioTrack->getVolumeShaperState(id); 2347} 2348 2349status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp) 2350{ 2351 if (timestamp == nullptr) { 2352 return BAD_VALUE; 2353 } 2354 AutoMutex lock(mLock); 2355 return getTimestamp_l(timestamp); 2356} 2357 2358status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp) 2359{ 2360 if (mCblk->mFlags & CBLK_INVALID) { 2361 const status_t status = restoreTrack_l("getTimestampExtended"); 2362 if (status != OK) { 2363 // per getTimestamp() API doc in header, we return DEAD_OBJECT here, 2364 // recommending that the track be recreated. 2365 return DEAD_OBJECT; 2366 } 2367 } 2368 // check for offloaded/direct here in case restoring somehow changed those flags. 2369 if (isOffloadedOrDirect_l()) { 2370 return INVALID_OPERATION; // not supported 2371 } 2372 status_t status = mProxy->getTimestamp(timestamp); 2373 LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status); 2374 bool found = false; 2375 timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten; 2376 timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0; 2377 // server side frame offset in case AudioTrack has been restored. 2378 for (int i = ExtendedTimestamp::LOCATION_SERVER; 2379 i < ExtendedTimestamp::LOCATION_MAX; ++i) { 2380 if (timestamp->mTimeNs[i] >= 0) { 2381 // apply server offset (frames flushed is ignored 2382 // so we don't report the jump when the flush occurs). 2383 timestamp->mPosition[i] += mFramesWrittenServerOffset; 2384 found = true; 2385 } 2386 } 2387 return found ? OK : WOULD_BLOCK; 2388} 2389 2390status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp) 2391{ 2392 AutoMutex lock(mLock); 2393 return getTimestamp_l(timestamp); 2394} 2395 2396status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp) 2397{ 2398 bool previousTimestampValid = mPreviousTimestampValid; 2399 // Set false here to cover all the error return cases. 2400 mPreviousTimestampValid = false; 2401 2402 switch (mState) { 2403 case STATE_ACTIVE: 2404 case STATE_PAUSED: 2405 break; // handle below 2406 case STATE_FLUSHED: 2407 case STATE_STOPPED: 2408 return WOULD_BLOCK; 2409 case STATE_STOPPING: 2410 case STATE_PAUSED_STOPPING: 2411 if (!isOffloaded_l()) { 2412 return INVALID_OPERATION; 2413 } 2414 break; // offloaded tracks handled below 2415 default: 2416 LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState); 2417 break; 2418 } 2419 2420 if (mCblk->mFlags & CBLK_INVALID) { 2421 const status_t status = restoreTrack_l("getTimestamp"); 2422 if (status != OK) { 2423 // per getTimestamp() API doc in header, we return DEAD_OBJECT here, 2424 // recommending that the track be recreated. 2425 return DEAD_OBJECT; 2426 } 2427 } 2428 2429 // The presented frame count must always lag behind the consumed frame count. 2430 // To avoid a race, read the presented frames first. This ensures that presented <= consumed. 2431 2432 status_t status; 2433 if (isOffloadedOrDirect_l()) { 2434 // use Binder to get timestamp 2435 status = mAudioTrack->getTimestamp(timestamp); 2436 } else { 2437 // read timestamp from shared memory 2438 ExtendedTimestamp ets; 2439 status = mProxy->getTimestamp(&ets); 2440 if (status == OK) { 2441 ExtendedTimestamp::Location location; 2442 status = ets.getBestTimestamp(×tamp, &location); 2443 2444 if (status == OK) { 2445 // It is possible that the best location has moved from the kernel to the server. 2446 // In this case we adjust the position from the previous computed latency. 2447 if (location == ExtendedTimestamp::LOCATION_SERVER) { 2448 ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL, 2449 "getTimestamp() location moved from kernel to server"); 2450 // check that the last kernel OK time info exists and the positions 2451 // are valid (if they predate the current track, the positions may 2452 // be zero or negative). 2453 const int64_t frames = 2454 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 || 2455 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 || 2456 ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 || 2457 ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0) 2458 ? 2459 int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed 2460 / 1000) 2461 : 2462 (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] 2463 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]); 2464 ALOGV("frame adjustment:%lld timestamp:%s", 2465 (long long)frames, ets.toString().c_str()); 2466 if (frames >= ets.mPosition[location]) { 2467 timestamp.mPosition = 0; 2468 } else { 2469 timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames); 2470 } 2471 } else if (location == ExtendedTimestamp::LOCATION_KERNEL) { 2472 ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER, 2473 "getTimestamp() location moved from server to kernel"); 2474 } 2475 2476 // We update the timestamp time even when paused. 2477 if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) { 2478 const int64_t now = systemTime(); 2479 const int64_t at = convertTimespecToNs(timestamp.mTime); 2480 const int64_t lag = 2481 (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 || 2482 ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0) 2483 ? int64_t(mAfLatency * 1000000LL) 2484 : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] 2485 - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]) 2486 * NANOS_PER_SECOND / mSampleRate; 2487 const int64_t limit = now - lag; // no earlier than this limit 2488 if (at < limit) { 2489 ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld", 2490 (long long)lag, (long long)at, (long long)limit); 2491 timestamp.mTime.tv_sec = limit / NANOS_PER_SECOND; 2492 timestamp.mTime.tv_nsec = limit % NANOS_PER_SECOND; // compiler opt. 2493 } 2494 } 2495 mPreviousLocation = location; 2496 } else { 2497 // right after AudioTrack is started, one may not find a timestamp 2498 ALOGV("getBestTimestamp did not find timestamp"); 2499 } 2500 } 2501 if (status == INVALID_OPERATION) { 2502 // INVALID_OPERATION occurs when no timestamp has been issued by the server; 2503 // other failures are signaled by a negative time. 2504 // If we come out of FLUSHED or STOPPED where the position is known 2505 // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of 2506 // "zero" for NuPlayer). We don't convert for track restoration as position 2507 // does not reset. 2508 ALOGV("timestamp server offset:%lld restore frames:%lld", 2509 (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore); 2510 if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) { 2511 status = WOULD_BLOCK; 2512 } 2513 } 2514 } 2515 if (status != NO_ERROR) { 2516 ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status); 2517 return status; 2518 } 2519 if (isOffloadedOrDirect_l()) { 2520 if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) { 2521 // use cached paused position in case another offloaded track is running. 2522 timestamp.mPosition = mPausedPosition; 2523 clock_gettime(CLOCK_MONOTONIC, ×tamp.mTime); 2524 // TODO: adjust for delay 2525 return NO_ERROR; 2526 } 2527 2528 // Check whether a pending flush or stop has completed, as those commands may 2529 // be asynchronous or return near finish or exhibit glitchy behavior. 2530 // 2531 // Originally this showed up as the first timestamp being a continuation of 2532 // the previous song under gapless playback. 2533 // However, we sometimes see zero timestamps, then a glitch of 2534 // the previous song's position, and then correct timestamps afterwards. 2535 if (mStartUs != 0 && mSampleRate != 0) { 2536 static const int kTimeJitterUs = 100000; // 100 ms 2537 static const int k1SecUs = 1000000; 2538 2539 const int64_t timeNow = getNowUs(); 2540 2541 if (timeNow < mStartUs + k1SecUs) { // within first second of starting 2542 const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime); 2543 if (timestampTimeUs < mStartUs) { 2544 return WOULD_BLOCK; // stale timestamp time, occurs before start. 2545 } 2546 const int64_t deltaTimeUs = timestampTimeUs - mStartUs; 2547 const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000 2548 / ((double)mSampleRate * mPlaybackRate.mSpeed); 2549 2550 if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) { 2551 // Verify that the counter can't count faster than the sample rate 2552 // since the start time. If greater, then that means we may have failed 2553 // to completely flush or stop the previous playing track. 2554 ALOGW_IF(!mTimestampStartupGlitchReported, 2555 "getTimestamp startup glitch detected" 2556 " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)", 2557 (long long)deltaTimeUs, (long long)deltaPositionByUs, 2558 timestamp.mPosition); 2559 mTimestampStartupGlitchReported = true; 2560 if (previousTimestampValid 2561 && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) { 2562 timestamp = mPreviousTimestamp; 2563 mPreviousTimestampValid = true; 2564 return NO_ERROR; 2565 } 2566 return WOULD_BLOCK; 2567 } 2568 if (deltaPositionByUs != 0) { 2569 mStartUs = 0; // don't check again, we got valid nonzero position. 2570 } 2571 } else { 2572 mStartUs = 0; // don't check again, start time expired. 2573 } 2574 mTimestampStartupGlitchReported = false; 2575 } 2576 } else { 2577 // Update the mapping between local consumed (mPosition) and server consumed (mServer) 2578 (void) updateAndGetPosition_l(); 2579 // Server consumed (mServer) and presented both use the same server time base, 2580 // and server consumed is always >= presented. 2581 // The delta between these represents the number of frames in the buffer pipeline. 2582 // If this delta between these is greater than the client position, it means that 2583 // actually presented is still stuck at the starting line (figuratively speaking), 2584 // waiting for the first frame to go by. So we can't report a valid timestamp yet. 2585 // Note: We explicitly use non-Modulo comparison here - potential wrap issue when 2586 // mPosition exceeds 32 bits. 2587 // TODO Remove when timestamp is updated to contain pipeline status info. 2588 const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue(); 2589 if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */ 2590 && (uint32_t)pipelineDepthInFrames > mPosition.value()) { 2591 return INVALID_OPERATION; 2592 } 2593 // Convert timestamp position from server time base to client time base. 2594 // TODO The following code should work OK now because timestamp.mPosition is 32-bit. 2595 // But if we change it to 64-bit then this could fail. 2596 // Use Modulo computation here. 2597 timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value(); 2598 // Immediately after a call to getPosition_l(), mPosition and 2599 // mServer both represent the same frame position. mPosition is 2600 // in client's point of view, and mServer is in server's point of 2601 // view. So the difference between them is the "fudge factor" 2602 // between client and server views due to stop() and/or new 2603 // IAudioTrack. And timestamp.mPosition is initially in server's 2604 // point of view, so we need to apply the same fudge factor to it. 2605 } 2606 2607 // Prevent retrograde motion in timestamp. 2608 // This is sometimes caused by erratic reports of the available space in the ALSA drivers. 2609 if (status == NO_ERROR) { 2610 if (previousTimestampValid) { 2611 const int64_t previousTimeNanos = convertTimespecToNs(mPreviousTimestamp.mTime); 2612 const int64_t currentTimeNanos = convertTimespecToNs(timestamp.mTime); 2613 if (currentTimeNanos < previousTimeNanos) { 2614 ALOGW("retrograde timestamp time corrected, %lld < %lld", 2615 (long long)currentTimeNanos, (long long)previousTimeNanos); 2616 timestamp.mTime = mPreviousTimestamp.mTime; 2617 } 2618 2619 // Looking at signed delta will work even when the timestamps 2620 // are wrapping around. 2621 int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition) 2622 - mPreviousTimestamp.mPosition).signedValue(); 2623 if (deltaPosition < 0) { 2624 // Only report once per position instead of spamming the log. 2625 if (!mRetrogradeMotionReported) { 2626 ALOGW("retrograde timestamp position corrected, %d = %u - %u", 2627 deltaPosition, 2628 timestamp.mPosition, 2629 mPreviousTimestamp.mPosition); 2630 mRetrogradeMotionReported = true; 2631 } 2632 } else { 2633 mRetrogradeMotionReported = false; 2634 } 2635 if (deltaPosition < 0) { 2636 timestamp.mPosition = mPreviousTimestamp.mPosition; 2637 deltaPosition = 0; 2638 } 2639#if 0 2640 // Uncomment this to verify audio timestamp rate. 2641 const int64_t deltaTime = 2642 convertTimespecToNs(timestamp.mTime) - previousTimeNanos; 2643 if (deltaTime != 0) { 2644 const int64_t computedSampleRate = 2645 deltaPosition * (long long)NANOS_PER_SECOND / deltaTime; 2646 ALOGD("computedSampleRate:%u sampleRate:%u", 2647 (unsigned)computedSampleRate, mSampleRate); 2648 } 2649#endif 2650 } 2651 mPreviousTimestamp = timestamp; 2652 mPreviousTimestampValid = true; 2653 } 2654 2655 return status; 2656} 2657 2658String8 AudioTrack::getParameters(const String8& keys) 2659{ 2660 audio_io_handle_t output = getOutput(); 2661 if (output != AUDIO_IO_HANDLE_NONE) { 2662 return AudioSystem::getParameters(output, keys); 2663 } else { 2664 return String8::empty(); 2665 } 2666} 2667 2668bool AudioTrack::isOffloaded() const 2669{ 2670 AutoMutex lock(mLock); 2671 return isOffloaded_l(); 2672} 2673 2674bool AudioTrack::isDirect() const 2675{ 2676 AutoMutex lock(mLock); 2677 return isDirect_l(); 2678} 2679 2680bool AudioTrack::isOffloadedOrDirect() const 2681{ 2682 AutoMutex lock(mLock); 2683 return isOffloadedOrDirect_l(); 2684} 2685 2686 2687status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const 2688{ 2689 2690 const size_t SIZE = 256; 2691 char buffer[SIZE]; 2692 String8 result; 2693 2694 result.append(" AudioTrack::dump\n"); 2695 snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, 2696 mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]); 2697 result.append(buffer); 2698 snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%zu)\n", mFormat, 2699 mChannelCount, mFrameCount); 2700 result.append(buffer); 2701 snprintf(buffer, 255, " sample rate(%u), speed(%f), status(%d)\n", 2702 mSampleRate, mPlaybackRate.mSpeed, mStatus); 2703 result.append(buffer); 2704 snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency); 2705 result.append(buffer); 2706 ::write(fd, result.string(), result.size()); 2707 return NO_ERROR; 2708} 2709 2710uint32_t AudioTrack::getUnderrunCount() const 2711{ 2712 AutoMutex lock(mLock); 2713 return getUnderrunCount_l(); 2714} 2715 2716uint32_t AudioTrack::getUnderrunCount_l() const 2717{ 2718 return mProxy->getUnderrunCount() + mUnderrunCountOffset; 2719} 2720 2721uint32_t AudioTrack::getUnderrunFrames() const 2722{ 2723 AutoMutex lock(mLock); 2724 return mProxy->getUnderrunFrames(); 2725} 2726 2727status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback) 2728{ 2729 if (callback == 0) { 2730 ALOGW("%s adding NULL callback!", __FUNCTION__); 2731 return BAD_VALUE; 2732 } 2733 AutoMutex lock(mLock); 2734 if (mDeviceCallback == callback) { 2735 ALOGW("%s adding same callback!", __FUNCTION__); 2736 return INVALID_OPERATION; 2737 } 2738 status_t status = NO_ERROR; 2739 if (mOutput != AUDIO_IO_HANDLE_NONE) { 2740 if (mDeviceCallback != 0) { 2741 ALOGW("%s callback already present!", __FUNCTION__); 2742 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput); 2743 } 2744 status = AudioSystem::addAudioDeviceCallback(callback, mOutput); 2745 } 2746 mDeviceCallback = callback; 2747 return status; 2748} 2749 2750status_t AudioTrack::removeAudioDeviceCallback( 2751 const sp<AudioSystem::AudioDeviceCallback>& callback) 2752{ 2753 if (callback == 0) { 2754 ALOGW("%s removing NULL callback!", __FUNCTION__); 2755 return BAD_VALUE; 2756 } 2757 AutoMutex lock(mLock); 2758 if (mDeviceCallback != callback) { 2759 ALOGW("%s removing different callback!", __FUNCTION__); 2760 return INVALID_OPERATION; 2761 } 2762 if (mOutput != AUDIO_IO_HANDLE_NONE) { 2763 AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput); 2764 } 2765 mDeviceCallback = 0; 2766 return NO_ERROR; 2767} 2768 2769status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location) 2770{ 2771 if (msec == nullptr || 2772 (location != ExtendedTimestamp::LOCATION_SERVER 2773 && location != ExtendedTimestamp::LOCATION_KERNEL)) { 2774 return BAD_VALUE; 2775 } 2776 AutoMutex lock(mLock); 2777 // inclusive of offloaded and direct tracks. 2778 // 2779 // It is possible, but not enabled, to allow duration computation for non-pcm 2780 // audio_has_proportional_frames() formats because currently they have 2781 // the drain rate equivalent to the pcm sample rate * framesize. 2782 if (!isPurePcmData_l()) { 2783 return INVALID_OPERATION; 2784 } 2785 ExtendedTimestamp ets; 2786 if (getTimestamp_l(&ets) == OK 2787 && ets.mTimeNs[location] > 0) { 2788 int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT] 2789 - ets.mPosition[location]; 2790 if (diff < 0) { 2791 *msec = 0; 2792 } else { 2793 // ms is the playback time by frames 2794 int64_t ms = (int64_t)((double)diff * 1000 / 2795 ((double)mSampleRate * mPlaybackRate.mSpeed)); 2796 // clockdiff is the timestamp age (negative) 2797 int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 : 2798 ets.mTimeNs[location] 2799 + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC] 2800 - systemTime(SYSTEM_TIME_MONOTONIC); 2801 2802 //ALOGV("ms: %lld clockdiff: %lld", (long long)ms, (long long)clockdiff); 2803 static const int NANOS_PER_MILLIS = 1000000; 2804 *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS); 2805 } 2806 return NO_ERROR; 2807 } 2808 if (location != ExtendedTimestamp::LOCATION_SERVER) { 2809 return INVALID_OPERATION; // LOCATION_KERNEL is not available 2810 } 2811 // use server position directly (offloaded and direct arrive here) 2812 updateAndGetPosition_l(); 2813 int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue(); 2814 *msec = (diff <= 0) ? 0 2815 : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed)); 2816 return NO_ERROR; 2817} 2818 2819bool AudioTrack::hasStarted() 2820{ 2821 AutoMutex lock(mLock); 2822 switch (mState) { 2823 case STATE_STOPPED: 2824 if (isOffloadedOrDirect_l()) { 2825 // check if we have started in the past to return true. 2826 return mStartUs > 0; 2827 } 2828 // A normal audio track may still be draining, so 2829 // check if stream has ended. This covers fasttrack position 2830 // instability and start/stop without any data written. 2831 if (mProxy->getStreamEndDone()) { 2832 return true; 2833 } 2834 // fall through 2835 case STATE_ACTIVE: 2836 case STATE_STOPPING: 2837 break; 2838 case STATE_PAUSED: 2839 case STATE_PAUSED_STOPPING: 2840 case STATE_FLUSHED: 2841 return false; // we're not active 2842 default: 2843 LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState); 2844 break; 2845 } 2846 2847 // wait indicates whether we need to wait for a timestamp. 2848 // This is conservatively figured - if we encounter an unexpected error 2849 // then we will not wait. 2850 bool wait = false; 2851 if (isOffloadedOrDirect_l()) { 2852 AudioTimestamp ts; 2853 status_t status = getTimestamp_l(ts); 2854 if (status == WOULD_BLOCK) { 2855 wait = true; 2856 } else if (status == OK) { 2857 wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition); 2858 } 2859 ALOGV("hasStarted wait:%d ts:%u start position:%lld", 2860 (int)wait, 2861 ts.mPosition, 2862 (long long)mStartTs.mPosition); 2863 } else { 2864 int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG 2865 ExtendedTimestamp ets; 2866 status_t status = getTimestamp_l(&ets); 2867 if (status == WOULD_BLOCK) { // no SERVER or KERNEL frame info in ets 2868 wait = true; 2869 } else if (status == OK) { 2870 for (location = ExtendedTimestamp::LOCATION_KERNEL; 2871 location >= ExtendedTimestamp::LOCATION_SERVER; --location) { 2872 if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) { 2873 continue; 2874 } 2875 wait = ets.mPosition[location] == 0 2876 || ets.mPosition[location] == mStartEts.mPosition[location]; 2877 break; 2878 } 2879 } 2880 ALOGV("hasStarted wait:%d ets:%lld start position:%lld", 2881 (int)wait, 2882 (long long)ets.mPosition[location], 2883 (long long)mStartEts.mPosition[location]); 2884 } 2885 return !wait; 2886} 2887 2888// ========================================================================= 2889 2890void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused) 2891{ 2892 sp<AudioTrack> audioTrack = mAudioTrack.promote(); 2893 if (audioTrack != 0) { 2894 AutoMutex lock(audioTrack->mLock); 2895 audioTrack->mProxy->binderDied(); 2896 } 2897} 2898 2899// ========================================================================= 2900 2901AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava) 2902 : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL), 2903 mIgnoreNextPausedInt(false) 2904{ 2905} 2906 2907AudioTrack::AudioTrackThread::~AudioTrackThread() 2908{ 2909} 2910 2911bool AudioTrack::AudioTrackThread::threadLoop() 2912{ 2913 { 2914 AutoMutex _l(mMyLock); 2915 if (mPaused) { 2916 mMyCond.wait(mMyLock); 2917 // caller will check for exitPending() 2918 return true; 2919 } 2920 if (mIgnoreNextPausedInt) { 2921 mIgnoreNextPausedInt = false; 2922 mPausedInt = false; 2923 } 2924 if (mPausedInt) { 2925 if (mPausedNs > 0) { 2926 (void) mMyCond.waitRelative(mMyLock, mPausedNs); 2927 } else { 2928 mMyCond.wait(mMyLock); 2929 } 2930 mPausedInt = false; 2931 return true; 2932 } 2933 } 2934 if (exitPending()) { 2935 return false; 2936 } 2937 nsecs_t ns = mReceiver.processAudioBuffer(); 2938 switch (ns) { 2939 case 0: 2940 return true; 2941 case NS_INACTIVE: 2942 pauseInternal(); 2943 return true; 2944 case NS_NEVER: 2945 return false; 2946 case NS_WHENEVER: 2947 // Event driven: call wake() when callback notifications conditions change. 2948 ns = INT64_MAX; 2949 // fall through 2950 default: 2951 LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns); 2952 pauseInternal(ns); 2953 return true; 2954 } 2955} 2956 2957void AudioTrack::AudioTrackThread::requestExit() 2958{ 2959 // must be in this order to avoid a race condition 2960 Thread::requestExit(); 2961 resume(); 2962} 2963 2964void AudioTrack::AudioTrackThread::pause() 2965{ 2966 AutoMutex _l(mMyLock); 2967 mPaused = true; 2968} 2969 2970void AudioTrack::AudioTrackThread::resume() 2971{ 2972 AutoMutex _l(mMyLock); 2973 mIgnoreNextPausedInt = true; 2974 if (mPaused || mPausedInt) { 2975 mPaused = false; 2976 mPausedInt = false; 2977 mMyCond.signal(); 2978 } 2979} 2980 2981void AudioTrack::AudioTrackThread::wake() 2982{ 2983 AutoMutex _l(mMyLock); 2984 if (!mPaused) { 2985 // wake() might be called while servicing a callback - ignore the next 2986 // pause time and call processAudioBuffer. 2987 mIgnoreNextPausedInt = true; 2988 if (mPausedInt && mPausedNs > 0) { 2989 // audio track is active and internally paused with timeout. 2990 mPausedInt = false; 2991 mMyCond.signal(); 2992 } 2993 } 2994} 2995 2996void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns) 2997{ 2998 AutoMutex _l(mMyLock); 2999 mPausedInt = true; 3000 mPausedNs = ns; 3001} 3002 3003} // namespace android 3004