AudioTrack.cpp revision 620208dc0bbd7a0792702df3ab08800fdad60cec
1/* 2** 3** Copyright 2007, The Android Open Source Project 4** 5** Licensed under the Apache License, Version 2.0 (the "License"); 6** you may not use this file except in compliance with the License. 7** You may obtain a copy of the License at 8** 9** http://www.apache.org/licenses/LICENSE-2.0 10** 11** Unless required by applicable law or agreed to in writing, software 12** distributed under the License is distributed on an "AS IS" BASIS, 13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14** See the License for the specific language governing permissions and 15** limitations under the License. 16*/ 17 18 19//#define LOG_NDEBUG 0 20#define LOG_TAG "AudioTrack" 21#define ATRACE_TAG ATRACE_TAG_AUDIO 22 23#include <sys/resource.h> 24#include <audio_utils/primitives.h> 25#include <binder/IPCThreadState.h> 26#include <media/AudioTrack.h> 27#include <utils/Log.h> 28#include <utils/Trace.h> 29#include <private/media/AudioTrackShared.h> 30#include <media/IAudioFlinger.h> 31 32extern "C" { 33#include "../private/bionic_futex.h" 34} 35 36#define WAIT_PERIOD_MS 10 37#define WAIT_STREAM_END_TIMEOUT_SEC 120 38 39 40namespace android { 41// --------------------------------------------------------------------------- 42 43// static 44status_t AudioTrack::getMinFrameCount( 45 size_t* frameCount, 46 audio_stream_type_t streamType, 47 uint32_t sampleRate) 48{ 49 if (frameCount == NULL) { 50 return BAD_VALUE; 51 } 52 53 // default to 0 in case of error 54 *frameCount = 0; 55 56 // FIXME merge with similar code in createTrack_l(), except we're missing 57 // some information here that is available in createTrack_l(): 58 // audio_io_handle_t output 59 // audio_format_t format 60 // audio_channel_mask_t channelMask 61 // audio_output_flags_t flags 62 uint32_t afSampleRate; 63 if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { 64 return NO_INIT; 65 } 66 size_t afFrameCount; 67 if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) { 68 return NO_INIT; 69 } 70 uint32_t afLatency; 71 if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) { 72 return NO_INIT; 73 } 74 75 // Ensure that buffer depth covers at least audio hardware latency 76 uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate); 77 if (minBufCount < 2) { 78 minBufCount = 2; 79 } 80 81 *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount : 82 afFrameCount * minBufCount * sampleRate / afSampleRate; 83 ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d", 84 *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency); 85 return NO_ERROR; 86} 87 88// --------------------------------------------------------------------------- 89 90AudioTrack::AudioTrack() 91 : mStatus(NO_INIT), 92 mIsTimed(false), 93 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 94 mPreviousSchedulingGroup(SP_DEFAULT), 95 mPausedPosition(0) 96{ 97} 98 99AudioTrack::AudioTrack( 100 audio_stream_type_t streamType, 101 uint32_t sampleRate, 102 audio_format_t format, 103 audio_channel_mask_t channelMask, 104 int frameCount, 105 audio_output_flags_t flags, 106 callback_t cbf, 107 void* user, 108 int notificationFrames, 109 int sessionId, 110 transfer_type transferType, 111 const audio_offload_info_t *offloadInfo, 112 int uid) 113 : mStatus(NO_INIT), 114 mIsTimed(false), 115 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 116 mPreviousSchedulingGroup(SP_DEFAULT), 117 mPausedPosition(0) 118{ 119 mStatus = set(streamType, sampleRate, format, channelMask, 120 frameCount, flags, cbf, user, notificationFrames, 121 0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, 122 offloadInfo, uid); 123} 124 125AudioTrack::AudioTrack( 126 audio_stream_type_t streamType, 127 uint32_t sampleRate, 128 audio_format_t format, 129 audio_channel_mask_t channelMask, 130 const sp<IMemory>& sharedBuffer, 131 audio_output_flags_t flags, 132 callback_t cbf, 133 void* user, 134 int notificationFrames, 135 int sessionId, 136 transfer_type transferType, 137 const audio_offload_info_t *offloadInfo, 138 int uid) 139 : mStatus(NO_INIT), 140 mIsTimed(false), 141 mPreviousPriority(ANDROID_PRIORITY_NORMAL), 142 mPreviousSchedulingGroup(SP_DEFAULT), 143 mPausedPosition(0) 144{ 145 mStatus = set(streamType, sampleRate, format, channelMask, 146 0 /*frameCount*/, flags, cbf, user, notificationFrames, 147 sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid); 148} 149 150AudioTrack::~AudioTrack() 151{ 152 if (mStatus == NO_ERROR) { 153 // Make sure that callback function exits in the case where 154 // it is looping on buffer full condition in obtainBuffer(). 155 // Otherwise the callback thread will never exit. 156 stop(); 157 if (mAudioTrackThread != 0) { 158 mProxy->interrupt(); 159 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h 160 mAudioTrackThread->requestExitAndWait(); 161 mAudioTrackThread.clear(); 162 } 163 mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this); 164 mAudioTrack.clear(); 165 IPCThreadState::self()->flushCommands(); 166 AudioSystem::releaseAudioSessionId(mSessionId); 167 } 168} 169 170status_t AudioTrack::set( 171 audio_stream_type_t streamType, 172 uint32_t sampleRate, 173 audio_format_t format, 174 audio_channel_mask_t channelMask, 175 int frameCountInt, 176 audio_output_flags_t flags, 177 callback_t cbf, 178 void* user, 179 int notificationFrames, 180 const sp<IMemory>& sharedBuffer, 181 bool threadCanCallJava, 182 int sessionId, 183 transfer_type transferType, 184 const audio_offload_info_t *offloadInfo, 185 int uid) 186{ 187 switch (transferType) { 188 case TRANSFER_DEFAULT: 189 if (sharedBuffer != 0) { 190 transferType = TRANSFER_SHARED; 191 } else if (cbf == NULL || threadCanCallJava) { 192 transferType = TRANSFER_SYNC; 193 } else { 194 transferType = TRANSFER_CALLBACK; 195 } 196 break; 197 case TRANSFER_CALLBACK: 198 if (cbf == NULL || sharedBuffer != 0) { 199 ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0"); 200 return BAD_VALUE; 201 } 202 break; 203 case TRANSFER_OBTAIN: 204 case TRANSFER_SYNC: 205 if (sharedBuffer != 0) { 206 ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0"); 207 return BAD_VALUE; 208 } 209 break; 210 case TRANSFER_SHARED: 211 if (sharedBuffer == 0) { 212 ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0"); 213 return BAD_VALUE; 214 } 215 break; 216 default: 217 ALOGE("Invalid transfer type %d", transferType); 218 return BAD_VALUE; 219 } 220 mTransfer = transferType; 221 222 // FIXME "int" here is legacy and will be replaced by size_t later 223 if (frameCountInt < 0) { 224 ALOGE("Invalid frame count %d", frameCountInt); 225 return BAD_VALUE; 226 } 227 size_t frameCount = frameCountInt; 228 229 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), 230 sharedBuffer->size()); 231 232 ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags); 233 234 AutoMutex lock(mLock); 235 236 // invariant that mAudioTrack != 0 is true only after set() returns successfully 237 if (mAudioTrack != 0) { 238 ALOGE("Track already in use"); 239 return INVALID_OPERATION; 240 } 241 242 mOutput = 0; 243 244 // handle default values first. 245 if (streamType == AUDIO_STREAM_DEFAULT) { 246 streamType = AUDIO_STREAM_MUSIC; 247 } 248 249 if (sampleRate == 0) { 250 uint32_t afSampleRate; 251 if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) { 252 return NO_INIT; 253 } 254 sampleRate = afSampleRate; 255 } 256 mSampleRate = sampleRate; 257 258 // these below should probably come from the audioFlinger too... 259 if (format == AUDIO_FORMAT_DEFAULT) { 260 format = AUDIO_FORMAT_PCM_16_BIT; 261 } 262 if (channelMask == 0) { 263 channelMask = AUDIO_CHANNEL_OUT_STEREO; 264 } 265 266 // validate parameters 267 if (!audio_is_valid_format(format)) { 268 ALOGE("Invalid format %d", format); 269 return BAD_VALUE; 270 } 271 272 // AudioFlinger does not currently support 8-bit data in shared memory 273 if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) { 274 ALOGE("8-bit data in shared memory is not supported"); 275 return BAD_VALUE; 276 } 277 278 // force direct flag if format is not linear PCM 279 // or offload was requested 280 if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) 281 || !audio_is_linear_pcm(format)) { 282 ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) 283 ? "Offload request, forcing to Direct Output" 284 : "Not linear PCM, forcing to Direct Output"); 285 flags = (audio_output_flags_t) 286 // FIXME why can't we allow direct AND fast? 287 ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST); 288 } 289 // only allow deep buffering for music stream type 290 if (streamType != AUDIO_STREAM_MUSIC) { 291 flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER); 292 } 293 294 if (!audio_is_output_channel(channelMask)) { 295 ALOGE("Invalid channel mask %#x", channelMask); 296 return BAD_VALUE; 297 } 298 mChannelMask = channelMask; 299 uint32_t channelCount = popcount(channelMask); 300 mChannelCount = channelCount; 301 302 if (audio_is_linear_pcm(format)) { 303 mFrameSize = channelCount * audio_bytes_per_sample(format); 304 mFrameSizeAF = channelCount * sizeof(int16_t); 305 } else { 306 mFrameSize = sizeof(uint8_t); 307 mFrameSizeAF = sizeof(uint8_t); 308 } 309 310 audio_io_handle_t output = AudioSystem::getOutput( 311 streamType, 312 sampleRate, format, channelMask, 313 flags, 314 offloadInfo); 315 316 if (output == 0) { 317 ALOGE("Could not get audio output for stream type %d", streamType); 318 return BAD_VALUE; 319 } 320 321 mVolume[LEFT] = 1.0f; 322 mVolume[RIGHT] = 1.0f; 323 mSendLevel = 0.0f; 324 mFrameCount = frameCount; 325 mReqFrameCount = frameCount; 326 mNotificationFramesReq = notificationFrames; 327 mNotificationFramesAct = 0; 328 mSessionId = sessionId; 329 if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) { 330 mClientUid = IPCThreadState::self()->getCallingUid(); 331 } else { 332 mClientUid = uid; 333 } 334 mAuxEffectId = 0; 335 mFlags = flags; 336 mCbf = cbf; 337 338 if (cbf != NULL) { 339 mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava); 340 mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/); 341 } 342 343 // create the IAudioTrack 344 status_t status = createTrack_l(streamType, 345 sampleRate, 346 format, 347 frameCount, 348 flags, 349 sharedBuffer, 350 output, 351 0 /*epoch*/); 352 353 if (status != NO_ERROR) { 354 if (mAudioTrackThread != 0) { 355 mAudioTrackThread->requestExit(); // see comment in AudioTrack.h 356 mAudioTrackThread->requestExitAndWait(); 357 mAudioTrackThread.clear(); 358 } 359 //Use of direct and offloaded output streams is ref counted by audio policy manager. 360 // As getOutput was called above and resulted in an output stream to be opened, 361 // we need to release it. 362 AudioSystem::releaseOutput(output); 363 return status; 364 } 365 366 mStatus = NO_ERROR; 367 mStreamType = streamType; 368 mFormat = format; 369 mSharedBuffer = sharedBuffer; 370 mState = STATE_STOPPED; 371 mUserData = user; 372 mLoopPeriod = 0; 373 mMarkerPosition = 0; 374 mMarkerReached = false; 375 mNewPosition = 0; 376 mUpdatePeriod = 0; 377 AudioSystem::acquireAudioSessionId(mSessionId); 378 mSequence = 1; 379 mObservedSequence = mSequence; 380 mInUnderrun = false; 381 mOutput = output; 382 383 return NO_ERROR; 384} 385 386// ------------------------------------------------------------------------- 387 388status_t AudioTrack::start() 389{ 390 AutoMutex lock(mLock); 391 392 if (mState == STATE_ACTIVE) { 393 return INVALID_OPERATION; 394 } 395 396 mInUnderrun = true; 397 398 State previousState = mState; 399 if (previousState == STATE_PAUSED_STOPPING) { 400 mState = STATE_STOPPING; 401 } else { 402 mState = STATE_ACTIVE; 403 } 404 if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) { 405 // reset current position as seen by client to 0 406 mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition()); 407 // force refresh of remaining frames by processAudioBuffer() as last 408 // write before stop could be partial. 409 mRefreshRemaining = true; 410 } 411 mNewPosition = mProxy->getPosition() + mUpdatePeriod; 412 int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags); 413 414 sp<AudioTrackThread> t = mAudioTrackThread; 415 if (t != 0) { 416 if (previousState == STATE_STOPPING) { 417 mProxy->interrupt(); 418 } else { 419 t->resume(); 420 } 421 } else { 422 mPreviousPriority = getpriority(PRIO_PROCESS, 0); 423 get_sched_policy(0, &mPreviousSchedulingGroup); 424 androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO); 425 } 426 427 status_t status = NO_ERROR; 428 if (!(flags & CBLK_INVALID)) { 429 status = mAudioTrack->start(); 430 if (status == DEAD_OBJECT) { 431 flags |= CBLK_INVALID; 432 } 433 } 434 if (flags & CBLK_INVALID) { 435 status = restoreTrack_l("start"); 436 } 437 438 if (status != NO_ERROR) { 439 ALOGE("start() status %d", status); 440 mState = previousState; 441 if (t != 0) { 442 if (previousState != STATE_STOPPING) { 443 t->pause(); 444 } 445 } else { 446 setpriority(PRIO_PROCESS, 0, mPreviousPriority); 447 set_sched_policy(0, mPreviousSchedulingGroup); 448 } 449 } 450 451 return status; 452} 453 454void AudioTrack::stop() 455{ 456 AutoMutex lock(mLock); 457 // FIXME pause then stop should not be a nop 458 if (mState != STATE_ACTIVE) { 459 return; 460 } 461 462 if (isOffloaded()) { 463 mState = STATE_STOPPING; 464 } else { 465 mState = STATE_STOPPED; 466 } 467 468 mProxy->interrupt(); 469 mAudioTrack->stop(); 470 // the playback head position will reset to 0, so if a marker is set, we need 471 // to activate it again 472 mMarkerReached = false; 473#if 0 474 // Force flush if a shared buffer is used otherwise audioflinger 475 // will not stop before end of buffer is reached. 476 // It may be needed to make sure that we stop playback, likely in case looping is on. 477 if (mSharedBuffer != 0) { 478 flush_l(); 479 } 480#endif 481 482 sp<AudioTrackThread> t = mAudioTrackThread; 483 if (t != 0) { 484 if (!isOffloaded()) { 485 t->pause(); 486 } 487 } else { 488 setpriority(PRIO_PROCESS, 0, mPreviousPriority); 489 set_sched_policy(0, mPreviousSchedulingGroup); 490 } 491} 492 493bool AudioTrack::stopped() const 494{ 495 AutoMutex lock(mLock); 496 return mState != STATE_ACTIVE; 497} 498 499void AudioTrack::flush() 500{ 501 if (mSharedBuffer != 0) { 502 return; 503 } 504 AutoMutex lock(mLock); 505 if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) { 506 return; 507 } 508 flush_l(); 509} 510 511void AudioTrack::flush_l() 512{ 513 ALOG_ASSERT(mState != STATE_ACTIVE); 514 515 // clear playback marker and periodic update counter 516 mMarkerPosition = 0; 517 mMarkerReached = false; 518 mUpdatePeriod = 0; 519 mRefreshRemaining = true; 520 521 mState = STATE_FLUSHED; 522 if (isOffloaded()) { 523 mProxy->interrupt(); 524 } 525 mProxy->flush(); 526 mAudioTrack->flush(); 527} 528 529void AudioTrack::pause() 530{ 531 AutoMutex lock(mLock); 532 if (mState == STATE_ACTIVE) { 533 mState = STATE_PAUSED; 534 } else if (mState == STATE_STOPPING) { 535 mState = STATE_PAUSED_STOPPING; 536 } else { 537 return; 538 } 539 mProxy->interrupt(); 540 mAudioTrack->pause(); 541 542 if (isOffloaded()) { 543 if (mOutput != 0) { 544 uint32_t halFrames; 545 // OffloadThread sends HAL pause in its threadLoop.. time saved 546 // here can be slightly off 547 AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition); 548 ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition); 549 } 550 } 551} 552 553status_t AudioTrack::setVolume(float left, float right) 554{ 555 if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) { 556 return BAD_VALUE; 557 } 558 559 AutoMutex lock(mLock); 560 mVolume[LEFT] = left; 561 mVolume[RIGHT] = right; 562 563 mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000)); 564 565 if (isOffloaded()) { 566 mAudioTrack->signal(); 567 } 568 return NO_ERROR; 569} 570 571status_t AudioTrack::setVolume(float volume) 572{ 573 return setVolume(volume, volume); 574} 575 576status_t AudioTrack::setAuxEffectSendLevel(float level) 577{ 578 if (level < 0.0f || level > 1.0f) { 579 return BAD_VALUE; 580 } 581 582 AutoMutex lock(mLock); 583 mSendLevel = level; 584 mProxy->setSendLevel(level); 585 586 return NO_ERROR; 587} 588 589void AudioTrack::getAuxEffectSendLevel(float* level) const 590{ 591 if (level != NULL) { 592 *level = mSendLevel; 593 } 594} 595 596status_t AudioTrack::setSampleRate(uint32_t rate) 597{ 598 if (mIsTimed || isOffloaded()) { 599 return INVALID_OPERATION; 600 } 601 602 uint32_t afSamplingRate; 603 if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) { 604 return NO_INIT; 605 } 606 // Resampler implementation limits input sampling rate to 2 x output sampling rate. 607 if (rate == 0 || rate > afSamplingRate*2 ) { 608 return BAD_VALUE; 609 } 610 611 AutoMutex lock(mLock); 612 mSampleRate = rate; 613 mProxy->setSampleRate(rate); 614 615 return NO_ERROR; 616} 617 618uint32_t AudioTrack::getSampleRate() const 619{ 620 if (mIsTimed) { 621 return 0; 622 } 623 624 AutoMutex lock(mLock); 625 626 // sample rate can be updated during playback by the offloaded decoder so we need to 627 // query the HAL and update if needed. 628// FIXME use Proxy return channel to update the rate from server and avoid polling here 629 if (isOffloaded()) { 630 if (mOutput != 0) { 631 uint32_t sampleRate = 0; 632 status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate); 633 if (status == NO_ERROR) { 634 mSampleRate = sampleRate; 635 } 636 } 637 } 638 return mSampleRate; 639} 640 641status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount) 642{ 643 if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { 644 return INVALID_OPERATION; 645 } 646 647 if (loopCount == 0) { 648 ; 649 } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount && 650 loopEnd - loopStart >= MIN_LOOP) { 651 ; 652 } else { 653 return BAD_VALUE; 654 } 655 656 AutoMutex lock(mLock); 657 // See setPosition() regarding setting parameters such as loop points or position while active 658 if (mState == STATE_ACTIVE) { 659 return INVALID_OPERATION; 660 } 661 setLoop_l(loopStart, loopEnd, loopCount); 662 return NO_ERROR; 663} 664 665void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount) 666{ 667 // FIXME If setting a loop also sets position to start of loop, then 668 // this is correct. Otherwise it should be removed. 669 mNewPosition = mProxy->getPosition() + mUpdatePeriod; 670 mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0; 671 mStaticProxy->setLoop(loopStart, loopEnd, loopCount); 672} 673 674status_t AudioTrack::setMarkerPosition(uint32_t marker) 675{ 676 // The only purpose of setting marker position is to get a callback 677 if (mCbf == NULL || isOffloaded()) { 678 return INVALID_OPERATION; 679 } 680 681 AutoMutex lock(mLock); 682 mMarkerPosition = marker; 683 mMarkerReached = false; 684 685 return NO_ERROR; 686} 687 688status_t AudioTrack::getMarkerPosition(uint32_t *marker) const 689{ 690 if (isOffloaded()) { 691 return INVALID_OPERATION; 692 } 693 if (marker == NULL) { 694 return BAD_VALUE; 695 } 696 697 AutoMutex lock(mLock); 698 *marker = mMarkerPosition; 699 700 return NO_ERROR; 701} 702 703status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod) 704{ 705 // The only purpose of setting position update period is to get a callback 706 if (mCbf == NULL || isOffloaded()) { 707 return INVALID_OPERATION; 708 } 709 710 AutoMutex lock(mLock); 711 mNewPosition = mProxy->getPosition() + updatePeriod; 712 mUpdatePeriod = updatePeriod; 713 return NO_ERROR; 714} 715 716status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const 717{ 718 if (isOffloaded()) { 719 return INVALID_OPERATION; 720 } 721 if (updatePeriod == NULL) { 722 return BAD_VALUE; 723 } 724 725 AutoMutex lock(mLock); 726 *updatePeriod = mUpdatePeriod; 727 728 return NO_ERROR; 729} 730 731status_t AudioTrack::setPosition(uint32_t position) 732{ 733 if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { 734 return INVALID_OPERATION; 735 } 736 if (position > mFrameCount) { 737 return BAD_VALUE; 738 } 739 740 AutoMutex lock(mLock); 741 // Currently we require that the player is inactive before setting parameters such as position 742 // or loop points. Otherwise, there could be a race condition: the application could read the 743 // current position, compute a new position or loop parameters, and then set that position or 744 // loop parameters but it would do the "wrong" thing since the position has continued to advance 745 // in the mean time. If we ever provide a sequencer in server, we could allow a way for the app 746 // to specify how it wants to handle such scenarios. 747 if (mState == STATE_ACTIVE) { 748 return INVALID_OPERATION; 749 } 750 mNewPosition = mProxy->getPosition() + mUpdatePeriod; 751 mLoopPeriod = 0; 752 // FIXME Check whether loops and setting position are incompatible in old code. 753 // If we use setLoop for both purposes we lose the capability to set the position while looping. 754 mStaticProxy->setLoop(position, mFrameCount, 0); 755 756 return NO_ERROR; 757} 758 759status_t AudioTrack::getPosition(uint32_t *position) const 760{ 761 if (position == NULL) { 762 return BAD_VALUE; 763 } 764 765 AutoMutex lock(mLock); 766 if (isOffloaded()) { 767 uint32_t dspFrames = 0; 768 769 if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) { 770 ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition); 771 *position = mPausedPosition; 772 return NO_ERROR; 773 } 774 775 if (mOutput != 0) { 776 uint32_t halFrames; 777 AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames); 778 } 779 *position = dspFrames; 780 } else { 781 // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes 782 *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 : 783 mProxy->getPosition(); 784 } 785 return NO_ERROR; 786} 787 788status_t AudioTrack::getBufferPosition(size_t *position) 789{ 790 if (mSharedBuffer == 0 || mIsTimed) { 791 return INVALID_OPERATION; 792 } 793 if (position == NULL) { 794 return BAD_VALUE; 795 } 796 797 AutoMutex lock(mLock); 798 *position = mStaticProxy->getBufferPosition(); 799 return NO_ERROR; 800} 801 802status_t AudioTrack::reload() 803{ 804 if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) { 805 return INVALID_OPERATION; 806 } 807 808 AutoMutex lock(mLock); 809 // See setPosition() regarding setting parameters such as loop points or position while active 810 if (mState == STATE_ACTIVE) { 811 return INVALID_OPERATION; 812 } 813 mNewPosition = mUpdatePeriod; 814 mLoopPeriod = 0; 815 // FIXME The new code cannot reload while keeping a loop specified. 816 // Need to check how the old code handled this, and whether it's a significant change. 817 mStaticProxy->setLoop(0, mFrameCount, 0); 818 return NO_ERROR; 819} 820 821audio_io_handle_t AudioTrack::getOutput() 822{ 823 AutoMutex lock(mLock); 824 return mOutput; 825} 826 827// must be called with mLock held 828audio_io_handle_t AudioTrack::getOutput_l() 829{ 830 if (mOutput) { 831 return mOutput; 832 } else { 833 return AudioSystem::getOutput(mStreamType, 834 mSampleRate, mFormat, mChannelMask, mFlags); 835 } 836} 837 838status_t AudioTrack::attachAuxEffect(int effectId) 839{ 840 AutoMutex lock(mLock); 841 status_t status = mAudioTrack->attachAuxEffect(effectId); 842 if (status == NO_ERROR) { 843 mAuxEffectId = effectId; 844 } 845 return status; 846} 847 848// ------------------------------------------------------------------------- 849 850// must be called with mLock held 851status_t AudioTrack::createTrack_l( 852 audio_stream_type_t streamType, 853 uint32_t sampleRate, 854 audio_format_t format, 855 size_t frameCount, 856 audio_output_flags_t flags, 857 const sp<IMemory>& sharedBuffer, 858 audio_io_handle_t output, 859 size_t epoch) 860{ 861 status_t status; 862 const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger(); 863 if (audioFlinger == 0) { 864 ALOGE("Could not get audioflinger"); 865 return NO_INIT; 866 } 867 868 // Not all of these values are needed under all conditions, but it is easier to get them all 869 870 uint32_t afLatency; 871 status = AudioSystem::getLatency(output, streamType, &afLatency); 872 if (status != NO_ERROR) { 873 ALOGE("getLatency(%d) failed status %d", output, status); 874 return NO_INIT; 875 } 876 877 size_t afFrameCount; 878 status = AudioSystem::getFrameCount(output, streamType, &afFrameCount); 879 if (status != NO_ERROR) { 880 ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status); 881 return NO_INIT; 882 } 883 884 uint32_t afSampleRate; 885 status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate); 886 if (status != NO_ERROR) { 887 ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status); 888 return NO_INIT; 889 } 890 891 // Client decides whether the track is TIMED (see below), but can only express a preference 892 // for FAST. Server will perform additional tests. 893 if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !( 894 // either of these use cases: 895 // use case 1: shared buffer 896 (sharedBuffer != 0) || 897 // use case 2: callback handler 898 (mCbf != NULL))) { 899 ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client"); 900 // once denied, do not request again if IAudioTrack is re-created 901 flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST); 902 mFlags = flags; 903 } 904 ALOGV("createTrack_l() output %d afLatency %d", output, afLatency); 905 906 // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where 907 // n = 1 fast track with single buffering; nBuffering is ignored 908 // n = 2 fast track with double buffering 909 // n = 2 normal track, no sample rate conversion 910 // n = 3 normal track, with sample rate conversion 911 // (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering) 912 // n > 3 very high latency or very small notification interval; nBuffering is ignored 913 const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3; 914 915 mNotificationFramesAct = mNotificationFramesReq; 916 917 if (!audio_is_linear_pcm(format)) { 918 919 if (sharedBuffer != 0) { 920 // Same comment as below about ignoring frameCount parameter for set() 921 frameCount = sharedBuffer->size(); 922 } else if (frameCount == 0) { 923 frameCount = afFrameCount; 924 } 925 if (mNotificationFramesAct != frameCount) { 926 mNotificationFramesAct = frameCount; 927 } 928 } else if (sharedBuffer != 0) { 929 930 // Ensure that buffer alignment matches channel count 931 // 8-bit data in shared memory is not currently supported by AudioFlinger 932 size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2; 933 if (mChannelCount > 1) { 934 // More than 2 channels does not require stronger alignment than stereo 935 alignment <<= 1; 936 } 937 if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) { 938 ALOGE("Invalid buffer alignment: address %p, channel count %u", 939 sharedBuffer->pointer(), mChannelCount); 940 return BAD_VALUE; 941 } 942 943 // When initializing a shared buffer AudioTrack via constructors, 944 // there's no frameCount parameter. 945 // But when initializing a shared buffer AudioTrack via set(), 946 // there _is_ a frameCount parameter. We silently ignore it. 947 frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t); 948 949 } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) { 950 951 // FIXME move these calculations and associated checks to server 952 953 // Ensure that buffer depth covers at least audio hardware latency 954 uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate); 955 ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d", 956 afFrameCount, minBufCount, afSampleRate, afLatency); 957 if (minBufCount <= nBuffering) { 958 minBufCount = nBuffering; 959 } 960 961 size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate; 962 ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u" 963 ", afLatency=%d", 964 minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency); 965 966 if (frameCount == 0) { 967 frameCount = minFrameCount; 968 } else if (frameCount < minFrameCount) { 969 // not ALOGW because it happens all the time when playing key clicks over A2DP 970 ALOGV("Minimum buffer size corrected from %d to %d", 971 frameCount, minFrameCount); 972 frameCount = minFrameCount; 973 } 974 // Make sure that application is notified with sufficient margin before underrun 975 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) { 976 mNotificationFramesAct = frameCount/nBuffering; 977 } 978 979 } else { 980 // For fast tracks, the frame count calculations and checks are done by server 981 } 982 983 IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT; 984 if (mIsTimed) { 985 trackFlags |= IAudioFlinger::TRACK_TIMED; 986 } 987 988 pid_t tid = -1; 989 if (flags & AUDIO_OUTPUT_FLAG_FAST) { 990 trackFlags |= IAudioFlinger::TRACK_FAST; 991 if (mAudioTrackThread != 0) { 992 tid = mAudioTrackThread->getTid(); 993 } 994 } 995 996 if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { 997 trackFlags |= IAudioFlinger::TRACK_OFFLOAD; 998 } 999 1000 sp<IAudioTrack> track = audioFlinger->createTrack(streamType, 1001 sampleRate, 1002 // AudioFlinger only sees 16-bit PCM 1003 format == AUDIO_FORMAT_PCM_8_BIT ? 1004 AUDIO_FORMAT_PCM_16_BIT : format, 1005 mChannelMask, 1006 frameCount, 1007 &trackFlags, 1008 sharedBuffer, 1009 output, 1010 tid, 1011 &mSessionId, 1012 mName, 1013 mClientUid, 1014 &status); 1015 1016 if (track == 0) { 1017 ALOGE("AudioFlinger could not create track, status: %d", status); 1018 return status; 1019 } 1020 sp<IMemory> iMem = track->getCblk(); 1021 if (iMem == 0) { 1022 ALOGE("Could not get control block"); 1023 return NO_INIT; 1024 } 1025 // invariant that mAudioTrack != 0 is true only after set() returns successfully 1026 if (mAudioTrack != 0) { 1027 mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this); 1028 mDeathNotifier.clear(); 1029 } 1030 mAudioTrack = track; 1031 mCblkMemory = iMem; 1032 audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer()); 1033 mCblk = cblk; 1034 size_t temp = cblk->frameCount_; 1035 if (temp < frameCount || (frameCount == 0 && temp == 0)) { 1036 // In current design, AudioTrack client checks and ensures frame count validity before 1037 // passing it to AudioFlinger so AudioFlinger should not return a different value except 1038 // for fast track as it uses a special method of assigning frame count. 1039 ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp); 1040 } 1041 frameCount = temp; 1042 mAwaitBoost = false; 1043 if (flags & AUDIO_OUTPUT_FLAG_FAST) { 1044 if (trackFlags & IAudioFlinger::TRACK_FAST) { 1045 ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount); 1046 mAwaitBoost = true; 1047 if (sharedBuffer == 0) { 1048 // Theoretically double-buffering is not required for fast tracks, 1049 // due to tighter scheduling. But in practice, to accommodate kernels with 1050 // scheduling jitter, and apps with computation jitter, we use double-buffering. 1051 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) { 1052 mNotificationFramesAct = frameCount/nBuffering; 1053 } 1054 } 1055 } else { 1056 ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount); 1057 // once denied, do not request again if IAudioTrack is re-created 1058 flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST); 1059 mFlags = flags; 1060 if (sharedBuffer == 0) { 1061 if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) { 1062 mNotificationFramesAct = frameCount/nBuffering; 1063 } 1064 } 1065 } 1066 } 1067 if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) { 1068 if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) { 1069 ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful"); 1070 } else { 1071 ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server"); 1072 flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD); 1073 mFlags = flags; 1074 return NO_INIT; 1075 } 1076 } 1077 1078 mRefreshRemaining = true; 1079 1080 // Starting address of buffers in shared memory. If there is a shared buffer, buffers 1081 // is the value of pointer() for the shared buffer, otherwise buffers points 1082 // immediately after the control block. This address is for the mapping within client 1083 // address space. AudioFlinger::TrackBase::mBuffer is for the server address space. 1084 void* buffers; 1085 if (sharedBuffer == 0) { 1086 buffers = (char*)cblk + sizeof(audio_track_cblk_t); 1087 } else { 1088 buffers = sharedBuffer->pointer(); 1089 } 1090 1091 mAudioTrack->attachAuxEffect(mAuxEffectId); 1092 // FIXME don't believe this lie 1093 mLatency = afLatency + (1000*frameCount) / sampleRate; 1094 mFrameCount = frameCount; 1095 // If IAudioTrack is re-created, don't let the requested frameCount 1096 // decrease. This can confuse clients that cache frameCount(). 1097 if (frameCount > mReqFrameCount) { 1098 mReqFrameCount = frameCount; 1099 } 1100 1101 // update proxy 1102 if (sharedBuffer == 0) { 1103 mStaticProxy.clear(); 1104 mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF); 1105 } else { 1106 mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF); 1107 mProxy = mStaticProxy; 1108 } 1109 mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) | 1110 uint16_t(mVolume[LEFT] * 0x1000)); 1111 mProxy->setSendLevel(mSendLevel); 1112 mProxy->setSampleRate(mSampleRate); 1113 mProxy->setEpoch(epoch); 1114 mProxy->setMinimum(mNotificationFramesAct); 1115 1116 mDeathNotifier = new DeathNotifier(this); 1117 mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this); 1118 1119 return NO_ERROR; 1120} 1121 1122status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount) 1123{ 1124 if (audioBuffer == NULL) { 1125 return BAD_VALUE; 1126 } 1127 if (mTransfer != TRANSFER_OBTAIN) { 1128 audioBuffer->frameCount = 0; 1129 audioBuffer->size = 0; 1130 audioBuffer->raw = NULL; 1131 return INVALID_OPERATION; 1132 } 1133 1134 const struct timespec *requested; 1135 struct timespec timeout; 1136 if (waitCount == -1) { 1137 requested = &ClientProxy::kForever; 1138 } else if (waitCount == 0) { 1139 requested = &ClientProxy::kNonBlocking; 1140 } else if (waitCount > 0) { 1141 long long ms = WAIT_PERIOD_MS * (long long) waitCount; 1142 timeout.tv_sec = ms / 1000; 1143 timeout.tv_nsec = (int) (ms % 1000) * 1000000; 1144 requested = &timeout; 1145 } else { 1146 ALOGE("%s invalid waitCount %d", __func__, waitCount); 1147 requested = NULL; 1148 } 1149 return obtainBuffer(audioBuffer, requested); 1150} 1151 1152status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested, 1153 struct timespec *elapsed, size_t *nonContig) 1154{ 1155 // previous and new IAudioTrack sequence numbers are used to detect track re-creation 1156 uint32_t oldSequence = 0; 1157 uint32_t newSequence; 1158 1159 Proxy::Buffer buffer; 1160 status_t status = NO_ERROR; 1161 1162 static const int32_t kMaxTries = 5; 1163 int32_t tryCounter = kMaxTries; 1164 1165 do { 1166 // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to 1167 // keep them from going away if another thread re-creates the track during obtainBuffer() 1168 sp<AudioTrackClientProxy> proxy; 1169 sp<IMemory> iMem; 1170 1171 { // start of lock scope 1172 AutoMutex lock(mLock); 1173 1174 newSequence = mSequence; 1175 // did previous obtainBuffer() fail due to media server death or voluntary invalidation? 1176 if (status == DEAD_OBJECT) { 1177 // re-create track, unless someone else has already done so 1178 if (newSequence == oldSequence) { 1179 status = restoreTrack_l("obtainBuffer"); 1180 if (status != NO_ERROR) { 1181 buffer.mFrameCount = 0; 1182 buffer.mRaw = NULL; 1183 buffer.mNonContig = 0; 1184 break; 1185 } 1186 } 1187 } 1188 oldSequence = newSequence; 1189 1190 // Keep the extra references 1191 proxy = mProxy; 1192 iMem = mCblkMemory; 1193 1194 if (mState == STATE_STOPPING) { 1195 status = -EINTR; 1196 buffer.mFrameCount = 0; 1197 buffer.mRaw = NULL; 1198 buffer.mNonContig = 0; 1199 break; 1200 } 1201 1202 // Non-blocking if track is stopped or paused 1203 if (mState != STATE_ACTIVE) { 1204 requested = &ClientProxy::kNonBlocking; 1205 } 1206 1207 } // end of lock scope 1208 1209 buffer.mFrameCount = audioBuffer->frameCount; 1210 // FIXME starts the requested timeout and elapsed over from scratch 1211 status = proxy->obtainBuffer(&buffer, requested, elapsed); 1212 1213 } while ((status == DEAD_OBJECT) && (tryCounter-- > 0)); 1214 1215 audioBuffer->frameCount = buffer.mFrameCount; 1216 audioBuffer->size = buffer.mFrameCount * mFrameSizeAF; 1217 audioBuffer->raw = buffer.mRaw; 1218 if (nonContig != NULL) { 1219 *nonContig = buffer.mNonContig; 1220 } 1221 return status; 1222} 1223 1224void AudioTrack::releaseBuffer(Buffer* audioBuffer) 1225{ 1226 if (mTransfer == TRANSFER_SHARED) { 1227 return; 1228 } 1229 1230 size_t stepCount = audioBuffer->size / mFrameSizeAF; 1231 if (stepCount == 0) { 1232 return; 1233 } 1234 1235 Proxy::Buffer buffer; 1236 buffer.mFrameCount = stepCount; 1237 buffer.mRaw = audioBuffer->raw; 1238 1239 AutoMutex lock(mLock); 1240 mInUnderrun = false; 1241 mProxy->releaseBuffer(&buffer); 1242 1243 // restart track if it was disabled by audioflinger due to previous underrun 1244 if (mState == STATE_ACTIVE) { 1245 audio_track_cblk_t* cblk = mCblk; 1246 if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) { 1247 ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting", 1248 this, mName.string()); 1249 // FIXME ignoring status 1250 mAudioTrack->start(); 1251 } 1252 } 1253} 1254 1255// ------------------------------------------------------------------------- 1256 1257ssize_t AudioTrack::write(const void* buffer, size_t userSize) 1258{ 1259 if (mTransfer != TRANSFER_SYNC || mIsTimed) { 1260 return INVALID_OPERATION; 1261 } 1262 1263 if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) { 1264 // Sanity-check: user is most-likely passing an error code, and it would 1265 // make the return value ambiguous (actualSize vs error). 1266 ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize); 1267 return BAD_VALUE; 1268 } 1269 1270 size_t written = 0; 1271 Buffer audioBuffer; 1272 1273 while (userSize >= mFrameSize) { 1274 audioBuffer.frameCount = userSize / mFrameSize; 1275 1276 status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever); 1277 if (err < 0) { 1278 if (written > 0) { 1279 break; 1280 } 1281 return ssize_t(err); 1282 } 1283 1284 size_t toWrite; 1285 if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) { 1286 // Divide capacity by 2 to take expansion into account 1287 toWrite = audioBuffer.size >> 1; 1288 memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite); 1289 } else { 1290 toWrite = audioBuffer.size; 1291 memcpy(audioBuffer.i8, buffer, toWrite); 1292 } 1293 buffer = ((const char *) buffer) + toWrite; 1294 userSize -= toWrite; 1295 written += toWrite; 1296 1297 releaseBuffer(&audioBuffer); 1298 } 1299 1300 return written; 1301} 1302 1303// ------------------------------------------------------------------------- 1304 1305TimedAudioTrack::TimedAudioTrack() { 1306 mIsTimed = true; 1307} 1308 1309status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer) 1310{ 1311 AutoMutex lock(mLock); 1312 status_t result = UNKNOWN_ERROR; 1313 1314#if 1 1315 // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed 1316 // while we are accessing the cblk 1317 sp<IAudioTrack> audioTrack = mAudioTrack; 1318 sp<IMemory> iMem = mCblkMemory; 1319#endif 1320 1321 // If the track is not invalid already, try to allocate a buffer. alloc 1322 // fails indicating that the server is dead, flag the track as invalid so 1323 // we can attempt to restore in just a bit. 1324 audio_track_cblk_t* cblk = mCblk; 1325 if (!(cblk->mFlags & CBLK_INVALID)) { 1326 result = mAudioTrack->allocateTimedBuffer(size, buffer); 1327 if (result == DEAD_OBJECT) { 1328 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 1329 } 1330 } 1331 1332 // If the track is invalid at this point, attempt to restore it. and try the 1333 // allocation one more time. 1334 if (cblk->mFlags & CBLK_INVALID) { 1335 result = restoreTrack_l("allocateTimedBuffer"); 1336 1337 if (result == NO_ERROR) { 1338 result = mAudioTrack->allocateTimedBuffer(size, buffer); 1339 } 1340 } 1341 1342 return result; 1343} 1344 1345status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer, 1346 int64_t pts) 1347{ 1348 status_t status = mAudioTrack->queueTimedBuffer(buffer, pts); 1349 { 1350 AutoMutex lock(mLock); 1351 audio_track_cblk_t* cblk = mCblk; 1352 // restart track if it was disabled by audioflinger due to previous underrun 1353 if (buffer->size() != 0 && status == NO_ERROR && 1354 (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) { 1355 android_atomic_and(~CBLK_DISABLED, &cblk->mFlags); 1356 ALOGW("queueTimedBuffer() track %p disabled, restarting", this); 1357 // FIXME ignoring status 1358 mAudioTrack->start(); 1359 } 1360 } 1361 return status; 1362} 1363 1364status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform, 1365 TargetTimeline target) 1366{ 1367 return mAudioTrack->setMediaTimeTransform(xform, target); 1368} 1369 1370// ------------------------------------------------------------------------- 1371 1372nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread) 1373{ 1374 // Currently the AudioTrack thread is not created if there are no callbacks. 1375 // Would it ever make sense to run the thread, even without callbacks? 1376 // If so, then replace this by checks at each use for mCbf != NULL. 1377 LOG_ALWAYS_FATAL_IF(mCblk == NULL); 1378 1379 mLock.lock(); 1380 if (mAwaitBoost) { 1381 mAwaitBoost = false; 1382 mLock.unlock(); 1383 static const int32_t kMaxTries = 5; 1384 int32_t tryCounter = kMaxTries; 1385 uint32_t pollUs = 10000; 1386 do { 1387 int policy = sched_getscheduler(0); 1388 if (policy == SCHED_FIFO || policy == SCHED_RR) { 1389 break; 1390 } 1391 usleep(pollUs); 1392 pollUs <<= 1; 1393 } while (tryCounter-- > 0); 1394 if (tryCounter < 0) { 1395 ALOGE("did not receive expected priority boost on time"); 1396 } 1397 // Run again immediately 1398 return 0; 1399 } 1400 1401 // Can only reference mCblk while locked 1402 int32_t flags = android_atomic_and( 1403 ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags); 1404 1405 // Check for track invalidation 1406 if (flags & CBLK_INVALID) { 1407 // for offloaded tracks restoreTrack_l() will just update the sequence and clear 1408 // AudioSystem cache. We should not exit here but after calling the callback so 1409 // that the upper layers can recreate the track 1410 if (!isOffloaded() || (mSequence == mObservedSequence)) { 1411 status_t status = restoreTrack_l("processAudioBuffer"); 1412 mLock.unlock(); 1413 // Run again immediately, but with a new IAudioTrack 1414 return 0; 1415 } 1416 } 1417 1418 bool waitStreamEnd = mState == STATE_STOPPING; 1419 bool active = mState == STATE_ACTIVE; 1420 1421 // Manage underrun callback, must be done under lock to avoid race with releaseBuffer() 1422 bool newUnderrun = false; 1423 if (flags & CBLK_UNDERRUN) { 1424#if 0 1425 // Currently in shared buffer mode, when the server reaches the end of buffer, 1426 // the track stays active in continuous underrun state. It's up to the application 1427 // to pause or stop the track, or set the position to a new offset within buffer. 1428 // This was some experimental code to auto-pause on underrun. Keeping it here 1429 // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content. 1430 if (mTransfer == TRANSFER_SHARED) { 1431 mState = STATE_PAUSED; 1432 active = false; 1433 } 1434#endif 1435 if (!mInUnderrun) { 1436 mInUnderrun = true; 1437 newUnderrun = true; 1438 } 1439 } 1440 1441 // Get current position of server 1442 size_t position = mProxy->getPosition(); 1443 1444 // Manage marker callback 1445 bool markerReached = false; 1446 size_t markerPosition = mMarkerPosition; 1447 // FIXME fails for wraparound, need 64 bits 1448 if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) { 1449 mMarkerReached = markerReached = true; 1450 } 1451 1452 // Determine number of new position callback(s) that will be needed, while locked 1453 size_t newPosCount = 0; 1454 size_t newPosition = mNewPosition; 1455 size_t updatePeriod = mUpdatePeriod; 1456 // FIXME fails for wraparound, need 64 bits 1457 if (updatePeriod > 0 && position >= newPosition) { 1458 newPosCount = ((position - newPosition) / updatePeriod) + 1; 1459 mNewPosition += updatePeriod * newPosCount; 1460 } 1461 1462 // Cache other fields that will be needed soon 1463 uint32_t loopPeriod = mLoopPeriod; 1464 uint32_t sampleRate = mSampleRate; 1465 size_t notificationFrames = mNotificationFramesAct; 1466 if (mRefreshRemaining) { 1467 mRefreshRemaining = false; 1468 mRemainingFrames = notificationFrames; 1469 mRetryOnPartialBuffer = false; 1470 } 1471 size_t misalignment = mProxy->getMisalignment(); 1472 uint32_t sequence = mSequence; 1473 sp<AudioTrackClientProxy> proxy = mProxy; 1474 1475 // These fields don't need to be cached, because they are assigned only by set(): 1476 // mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags 1477 // mFlags is also assigned by createTrack_l(), but not the bit we care about. 1478 1479 mLock.unlock(); 1480 1481 if (waitStreamEnd) { 1482 struct timespec timeout; 1483 timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC; 1484 timeout.tv_nsec = 0; 1485 1486 status_t status = proxy->waitStreamEndDone(&timeout); 1487 switch (status) { 1488 case NO_ERROR: 1489 case DEAD_OBJECT: 1490 case TIMED_OUT: 1491 mCbf(EVENT_STREAM_END, mUserData, NULL); 1492 { 1493 AutoMutex lock(mLock); 1494 // The previously assigned value of waitStreamEnd is no longer valid, 1495 // since the mutex has been unlocked and either the callback handler 1496 // or another thread could have re-started the AudioTrack during that time. 1497 waitStreamEnd = mState == STATE_STOPPING; 1498 if (waitStreamEnd) { 1499 mState = STATE_STOPPED; 1500 } 1501 } 1502 if (waitStreamEnd && status != DEAD_OBJECT) { 1503 return NS_INACTIVE; 1504 } 1505 break; 1506 } 1507 return 0; 1508 } 1509 1510 // perform callbacks while unlocked 1511 if (newUnderrun) { 1512 mCbf(EVENT_UNDERRUN, mUserData, NULL); 1513 } 1514 // FIXME we will miss loops if loop cycle was signaled several times since last call 1515 // to processAudioBuffer() 1516 if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) { 1517 mCbf(EVENT_LOOP_END, mUserData, NULL); 1518 } 1519 if (flags & CBLK_BUFFER_END) { 1520 mCbf(EVENT_BUFFER_END, mUserData, NULL); 1521 } 1522 if (markerReached) { 1523 mCbf(EVENT_MARKER, mUserData, &markerPosition); 1524 } 1525 while (newPosCount > 0) { 1526 size_t temp = newPosition; 1527 mCbf(EVENT_NEW_POS, mUserData, &temp); 1528 newPosition += updatePeriod; 1529 newPosCount--; 1530 } 1531 1532 if (mObservedSequence != sequence) { 1533 mObservedSequence = sequence; 1534 mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL); 1535 // for offloaded tracks, just wait for the upper layers to recreate the track 1536 if (isOffloaded()) { 1537 return NS_INACTIVE; 1538 } 1539 } 1540 1541 // if inactive, then don't run me again until re-started 1542 if (!active) { 1543 return NS_INACTIVE; 1544 } 1545 1546 // Compute the estimated time until the next timed event (position, markers, loops) 1547 // FIXME only for non-compressed audio 1548 uint32_t minFrames = ~0; 1549 if (!markerReached && position < markerPosition) { 1550 minFrames = markerPosition - position; 1551 } 1552 if (loopPeriod > 0 && loopPeriod < minFrames) { 1553 minFrames = loopPeriod; 1554 } 1555 if (updatePeriod > 0 && updatePeriod < minFrames) { 1556 minFrames = updatePeriod; 1557 } 1558 1559 // If > 0, poll periodically to recover from a stuck server. A good value is 2. 1560 static const uint32_t kPoll = 0; 1561 if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) { 1562 minFrames = kPoll * notificationFrames; 1563 } 1564 1565 // Convert frame units to time units 1566 nsecs_t ns = NS_WHENEVER; 1567 if (minFrames != (uint32_t) ~0) { 1568 // This "fudge factor" avoids soaking CPU, and compensates for late progress by server 1569 static const nsecs_t kFudgeNs = 10000000LL; // 10 ms 1570 ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs; 1571 } 1572 1573 // If not supplying data by EVENT_MORE_DATA, then we're done 1574 if (mTransfer != TRANSFER_CALLBACK) { 1575 return ns; 1576 } 1577 1578 struct timespec timeout; 1579 const struct timespec *requested = &ClientProxy::kForever; 1580 if (ns != NS_WHENEVER) { 1581 timeout.tv_sec = ns / 1000000000LL; 1582 timeout.tv_nsec = ns % 1000000000LL; 1583 ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000); 1584 requested = &timeout; 1585 } 1586 1587 while (mRemainingFrames > 0) { 1588 1589 Buffer audioBuffer; 1590 audioBuffer.frameCount = mRemainingFrames; 1591 size_t nonContig; 1592 status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig); 1593 LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0), 1594 "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount); 1595 requested = &ClientProxy::kNonBlocking; 1596 size_t avail = audioBuffer.frameCount + nonContig; 1597 ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d", 1598 mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err); 1599 if (err != NO_ERROR) { 1600 if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR || 1601 (isOffloaded() && (err == DEAD_OBJECT))) { 1602 return 0; 1603 } 1604 ALOGE("Error %d obtaining an audio buffer, giving up.", err); 1605 return NS_NEVER; 1606 } 1607 1608 if (mRetryOnPartialBuffer && !isOffloaded()) { 1609 mRetryOnPartialBuffer = false; 1610 if (avail < mRemainingFrames) { 1611 int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate; 1612 if (ns < 0 || myns < ns) { 1613 ns = myns; 1614 } 1615 int32_t old = android_atomic_and(~CBLK_FUTEX_WAKE, &mCblk->mFutex); 1616 char str[64] = {0}; 1617 struct timespec ts; 1618 1619 snprintf(str, sizeof(str), "futex_wait timeout %lld Us", ns/1000LL); 1620 1621 ATRACE_BEGIN(str); 1622 ts.tv_sec = 0; 1623 ts.tv_nsec = ns; 1624 // wait for max ns allowing server to wake us up if possible 1625 int ret = __futex_syscall4(&mCblk->mFutex, 1626 FUTEX_WAIT, 1627 old & ~CBLK_FUTEX_WAKE, &ts); 1628 ATRACE_END(); 1629 return 0; //retry immediately as space (possibly) became available 1630 } 1631 } 1632 1633 // Divide buffer size by 2 to take into account the expansion 1634 // due to 8 to 16 bit conversion: the callback must fill only half 1635 // of the destination buffer 1636 if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) { 1637 audioBuffer.size >>= 1; 1638 } 1639 1640 size_t reqSize = audioBuffer.size; 1641 mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer); 1642 size_t writtenSize = audioBuffer.size; 1643 size_t writtenFrames = writtenSize / mFrameSize; 1644 1645 // Sanity check on returned size 1646 if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) { 1647 ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes", 1648 reqSize, (int) writtenSize); 1649 return NS_NEVER; 1650 } 1651 1652 if (writtenSize == 0) { 1653 // The callback is done filling buffers 1654 // Keep this thread going to handle timed events and 1655 // still try to get more data in intervals of WAIT_PERIOD_MS 1656 // but don't just loop and block the CPU, so wait 1657 return WAIT_PERIOD_MS * 1000000LL; 1658 } 1659 1660 if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) { 1661 // 8 to 16 bit conversion, note that source and destination are the same address 1662 memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize); 1663 audioBuffer.size <<= 1; 1664 } 1665 1666 size_t releasedFrames = audioBuffer.size / mFrameSizeAF; 1667 audioBuffer.frameCount = releasedFrames; 1668 mRemainingFrames -= releasedFrames; 1669 if (misalignment >= releasedFrames) { 1670 misalignment -= releasedFrames; 1671 } else { 1672 misalignment = 0; 1673 } 1674 1675 releaseBuffer(&audioBuffer); 1676 1677 // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer 1678 // if callback doesn't like to accept the full chunk 1679 if (writtenSize < reqSize) { 1680 continue; 1681 } 1682 1683 // There could be enough non-contiguous frames available to satisfy the remaining request 1684 if (mRemainingFrames <= nonContig) { 1685 continue; 1686 } 1687 1688#if 0 1689 // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a 1690 // sum <= notificationFrames. It replaces that series by at most two EVENT_MORE_DATA 1691 // that total to a sum == notificationFrames. 1692 if (0 < misalignment && misalignment <= mRemainingFrames) { 1693 mRemainingFrames = misalignment; 1694 return (mRemainingFrames * 1100000000LL) / sampleRate; 1695 } 1696#endif 1697 1698 } 1699 mRemainingFrames = notificationFrames; 1700 mRetryOnPartialBuffer = true; 1701 1702 // A lot has transpired since ns was calculated, so run again immediately and re-calculate 1703 return 0; 1704} 1705 1706status_t AudioTrack::restoreTrack_l(const char *from) 1707{ 1708 ALOGW("dead IAudioTrack, %s, creating a new one from %s()", 1709 isOffloaded() ? "Offloaded" : "PCM", from); 1710 ++mSequence; 1711 status_t result; 1712 1713 // refresh the audio configuration cache in this process to make sure we get new 1714 // output parameters in getOutput_l() and createTrack_l() 1715 AudioSystem::clearAudioConfigCache(); 1716 1717 if (isOffloaded()) { 1718 return DEAD_OBJECT; 1719 } 1720 1721 // force new output query from audio policy manager; 1722 mOutput = 0; 1723 audio_io_handle_t output = getOutput_l(); 1724 1725 // if the new IAudioTrack is created, createTrack_l() will modify the 1726 // following member variables: mAudioTrack, mCblkMemory and mCblk. 1727 // It will also delete the strong references on previous IAudioTrack and IMemory 1728 1729 // take the frames that will be lost by track recreation into account in saved position 1730 size_t position = mProxy->getPosition() + mProxy->getFramesFilled(); 1731 size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0; 1732 result = createTrack_l(mStreamType, 1733 mSampleRate, 1734 mFormat, 1735 mReqFrameCount, // so that frame count never goes down 1736 mFlags, 1737 mSharedBuffer, 1738 output, 1739 position /*epoch*/); 1740 1741 if (result == NO_ERROR) { 1742 // continue playback from last known position, but 1743 // don't attempt to restore loop after invalidation; it's difficult and not worthwhile 1744 if (mStaticProxy != NULL) { 1745 mLoopPeriod = 0; 1746 mStaticProxy->setLoop(bufferPosition, mFrameCount, 0); 1747 } 1748 // FIXME How do we simulate the fact that all frames present in the buffer at the time of 1749 // track destruction have been played? This is critical for SoundPool implementation 1750 // This must be broken, and needs to be tested/debugged. 1751#if 0 1752 // restore write index and set other indexes to reflect empty buffer status 1753 if (!strcmp(from, "start")) { 1754 // Make sure that a client relying on callback events indicating underrun or 1755 // the actual amount of audio frames played (e.g SoundPool) receives them. 1756 if (mSharedBuffer == 0) { 1757 // restart playback even if buffer is not completely filled. 1758 android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags); 1759 } 1760 } 1761#endif 1762 if (mState == STATE_ACTIVE) { 1763 result = mAudioTrack->start(); 1764 } 1765 } 1766 if (result != NO_ERROR) { 1767 //Use of direct and offloaded output streams is ref counted by audio policy manager. 1768 // As getOutput was called above and resulted in an output stream to be opened, 1769 // we need to release it. 1770 AudioSystem::releaseOutput(output); 1771 ALOGW("restoreTrack_l() failed status %d", result); 1772 mState = STATE_STOPPED; 1773 } 1774 1775 return result; 1776} 1777 1778status_t AudioTrack::setParameters(const String8& keyValuePairs) 1779{ 1780 AutoMutex lock(mLock); 1781 return mAudioTrack->setParameters(keyValuePairs); 1782} 1783 1784status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp) 1785{ 1786 AutoMutex lock(mLock); 1787 // FIXME not implemented for fast tracks; should use proxy and SSQ 1788 if (mFlags & AUDIO_OUTPUT_FLAG_FAST) { 1789 return INVALID_OPERATION; 1790 } 1791 if (mState != STATE_ACTIVE && mState != STATE_PAUSED) { 1792 return INVALID_OPERATION; 1793 } 1794 status_t status = mAudioTrack->getTimestamp(timestamp); 1795 if (status == NO_ERROR) { 1796 timestamp.mPosition += mProxy->getEpoch(); 1797 } 1798 return status; 1799} 1800 1801String8 AudioTrack::getParameters(const String8& keys) 1802{ 1803 if (mOutput) { 1804 return AudioSystem::getParameters(mOutput, keys); 1805 } else { 1806 return String8::empty(); 1807 } 1808} 1809 1810status_t AudioTrack::dump(int fd, const Vector<String16>& args) const 1811{ 1812 1813 const size_t SIZE = 256; 1814 char buffer[SIZE]; 1815 String8 result; 1816 1817 result.append(" AudioTrack::dump\n"); 1818 snprintf(buffer, 255, " stream type(%d), left - right volume(%f, %f)\n", mStreamType, 1819 mVolume[0], mVolume[1]); 1820 result.append(buffer); 1821 snprintf(buffer, 255, " format(%d), channel count(%d), frame count(%d)\n", mFormat, 1822 mChannelCount, mFrameCount); 1823 result.append(buffer); 1824 snprintf(buffer, 255, " sample rate(%u), status(%d)\n", mSampleRate, mStatus); 1825 result.append(buffer); 1826 snprintf(buffer, 255, " state(%d), latency (%d)\n", mState, mLatency); 1827 result.append(buffer); 1828 ::write(fd, result.string(), result.size()); 1829 return NO_ERROR; 1830} 1831 1832uint32_t AudioTrack::getUnderrunFrames() const 1833{ 1834 AutoMutex lock(mLock); 1835 return mProxy->getUnderrunFrames(); 1836} 1837 1838// ========================================================================= 1839 1840void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who) 1841{ 1842 sp<AudioTrack> audioTrack = mAudioTrack.promote(); 1843 if (audioTrack != 0) { 1844 AutoMutex lock(audioTrack->mLock); 1845 audioTrack->mProxy->binderDied(); 1846 } 1847} 1848 1849// ========================================================================= 1850 1851AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava) 1852 : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL), 1853 mIgnoreNextPausedInt(false) 1854{ 1855} 1856 1857AudioTrack::AudioTrackThread::~AudioTrackThread() 1858{ 1859} 1860 1861bool AudioTrack::AudioTrackThread::threadLoop() 1862{ 1863 { 1864 AutoMutex _l(mMyLock); 1865 if (mPaused) { 1866 mMyCond.wait(mMyLock); 1867 // caller will check for exitPending() 1868 return true; 1869 } 1870 if (mIgnoreNextPausedInt) { 1871 mIgnoreNextPausedInt = false; 1872 mPausedInt = false; 1873 } 1874 if (mPausedInt) { 1875 if (mPausedNs > 0) { 1876 (void) mMyCond.waitRelative(mMyLock, mPausedNs); 1877 } else { 1878 mMyCond.wait(mMyLock); 1879 } 1880 mPausedInt = false; 1881 return true; 1882 } 1883 } 1884 nsecs_t ns = mReceiver.processAudioBuffer(this); 1885 switch (ns) { 1886 case 0: 1887 return true; 1888 case NS_INACTIVE: 1889 pauseInternal(); 1890 return true; 1891 case NS_NEVER: 1892 return false; 1893 case NS_WHENEVER: 1894 // FIXME increase poll interval, or make event-driven 1895 ns = 1000000000LL; 1896 // fall through 1897 default: 1898 LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns); 1899 pauseInternal(ns); 1900 return true; 1901 } 1902} 1903 1904void AudioTrack::AudioTrackThread::requestExit() 1905{ 1906 // must be in this order to avoid a race condition 1907 Thread::requestExit(); 1908 resume(); 1909} 1910 1911void AudioTrack::AudioTrackThread::pause() 1912{ 1913 AutoMutex _l(mMyLock); 1914 mPaused = true; 1915} 1916 1917void AudioTrack::AudioTrackThread::resume() 1918{ 1919 AutoMutex _l(mMyLock); 1920 mIgnoreNextPausedInt = true; 1921 if (mPaused || mPausedInt) { 1922 mPaused = false; 1923 mPausedInt = false; 1924 mMyCond.signal(); 1925 } 1926} 1927 1928void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns) 1929{ 1930 AutoMutex _l(mMyLock); 1931 mPausedInt = true; 1932 mPausedNs = ns; 1933} 1934 1935}; // namespace android 1936