Tracks.cpp revision 6e6704c06d61bc356e30c164081e5bcffb37920c
1/* 2** 3** Copyright 2012, The Android Open Source Project 4** 5** Licensed under the Apache License, Version 2.0 (the "License"); 6** you may not use this file except in compliance with the License. 7** You may obtain a copy of the License at 8** 9** http://www.apache.org/licenses/LICENSE-2.0 10** 11** Unless required by applicable law or agreed to in writing, software 12** distributed under the License is distributed on an "AS IS" BASIS, 13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 14** See the License for the specific language governing permissions and 15** limitations under the License. 16*/ 17 18 19#define LOG_TAG "AudioFlinger" 20//#define LOG_NDEBUG 0 21 22#include "Configuration.h" 23#include <math.h> 24#include <sys/syscall.h> 25#include <utils/Log.h> 26 27#include <private/media/AudioTrackShared.h> 28 29#include <common_time/cc_helper.h> 30#include <common_time/local_clock.h> 31 32#include "AudioMixer.h" 33#include "AudioFlinger.h" 34#include "ServiceUtilities.h" 35 36#include <media/nbaio/Pipe.h> 37#include <media/nbaio/PipeReader.h> 38#include <audio_utils/minifloat.h> 39 40// ---------------------------------------------------------------------------- 41 42// Note: the following macro is used for extremely verbose logging message. In 43// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to 44// 0; but one side effect of this is to turn all LOGV's as well. Some messages 45// are so verbose that we want to suppress them even when we have ALOG_ASSERT 46// turned on. Do not uncomment the #def below unless you really know what you 47// are doing and want to see all of the extremely verbose messages. 48//#define VERY_VERY_VERBOSE_LOGGING 49#ifdef VERY_VERY_VERBOSE_LOGGING 50#define ALOGVV ALOGV 51#else 52#define ALOGVV(a...) do { } while(0) 53#endif 54 55namespace android { 56 57// ---------------------------------------------------------------------------- 58// TrackBase 59// ---------------------------------------------------------------------------- 60 61static volatile int32_t nextTrackId = 55; 62 63// TrackBase constructor must be called with AudioFlinger::mLock held 64AudioFlinger::ThreadBase::TrackBase::TrackBase( 65 ThreadBase *thread, 66 const sp<Client>& client, 67 uint32_t sampleRate, 68 audio_format_t format, 69 audio_channel_mask_t channelMask, 70 size_t frameCount, 71 const sp<IMemory>& sharedBuffer, 72 int sessionId, 73 int clientUid, 74 IAudioFlinger::track_flags_t flags, 75 bool isOut, 76 alloc_type alloc) 77 : RefBase(), 78 mThread(thread), 79 mClient(client), 80 mCblk(NULL), 81 // mBuffer 82 mState(IDLE), 83 mSampleRate(sampleRate), 84 mFormat(format), 85 mChannelMask(channelMask), 86 mChannelCount(isOut ? 87 audio_channel_count_from_out_mask(channelMask) : 88 audio_channel_count_from_in_mask(channelMask)), 89 mFrameSize(audio_is_linear_pcm(format) ? 90 mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)), 91 mFrameCount(frameCount), 92 mSessionId(sessionId), 93 mFlags(flags), 94 mIsOut(isOut), 95 mServerProxy(NULL), 96 mId(android_atomic_inc(&nextTrackId)), 97 mTerminated(false) 98{ 99 // if the caller is us, trust the specified uid 100 if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) { 101 int newclientUid = IPCThreadState::self()->getCallingUid(); 102 if (clientUid != -1 && clientUid != newclientUid) { 103 ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid); 104 } 105 clientUid = newclientUid; 106 } 107 // clientUid contains the uid of the app that is responsible for this track, so we can blame 108 // battery usage on it. 109 mUid = clientUid; 110 111 // client == 0 implies sharedBuffer == 0 112 ALOG_ASSERT(!(client == 0 && sharedBuffer != 0)); 113 114 ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(), 115 sharedBuffer->size()); 116 117 // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize); 118 size_t size = sizeof(audio_track_cblk_t); 119 size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize; 120 if (sharedBuffer == 0 && alloc == ALLOC_CBLK) { 121 size += bufferSize; 122 } 123 124 if (client != 0) { 125 mCblkMemory = client->heap()->allocate(size); 126 if (mCblkMemory == 0 || 127 (mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer())) == NULL) { 128 ALOGE("not enough memory for AudioTrack size=%u", size); 129 client->heap()->dump("AudioTrack"); 130 mCblkMemory.clear(); 131 return; 132 } 133 } else { 134 // this syntax avoids calling the audio_track_cblk_t constructor twice 135 mCblk = (audio_track_cblk_t *) new uint8_t[size]; 136 // assume mCblk != NULL 137 } 138 139 // construct the shared structure in-place. 140 if (mCblk != NULL) { 141 new(mCblk) audio_track_cblk_t(); 142 switch (alloc) { 143 case ALLOC_READONLY: { 144 const sp<MemoryDealer> roHeap(thread->readOnlyHeap()); 145 if (roHeap == 0 || 146 (mBufferMemory = roHeap->allocate(bufferSize)) == 0 || 147 (mBuffer = mBufferMemory->pointer()) == NULL) { 148 ALOGE("not enough memory for read-only buffer size=%zu", bufferSize); 149 if (roHeap != 0) { 150 roHeap->dump("buffer"); 151 } 152 mCblkMemory.clear(); 153 mBufferMemory.clear(); 154 return; 155 } 156 memset(mBuffer, 0, bufferSize); 157 } break; 158 case ALLOC_PIPE: 159 mBufferMemory = thread->pipeMemory(); 160 // mBuffer is the virtual address as seen from current process (mediaserver), 161 // and should normally be coming from mBufferMemory->pointer(). 162 // However in this case the TrackBase does not reference the buffer directly. 163 // It should references the buffer via the pipe. 164 // Therefore, to detect incorrect usage of the buffer, we set mBuffer to NULL. 165 mBuffer = NULL; 166 break; 167 case ALLOC_CBLK: 168 // clear all buffers 169 if (sharedBuffer == 0) { 170 mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t); 171 memset(mBuffer, 0, bufferSize); 172 } else { 173 mBuffer = sharedBuffer->pointer(); 174#if 0 175 mCblk->mFlags = CBLK_FORCEREADY; // FIXME hack, need to fix the track ready logic 176#endif 177 } 178 break; 179 } 180 181#ifdef TEE_SINK 182 if (mTeeSinkTrackEnabled) { 183 NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount); 184 if (Format_isValid(pipeFormat)) { 185 Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat); 186 size_t numCounterOffers = 0; 187 const NBAIO_Format offers[1] = {pipeFormat}; 188 ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers); 189 ALOG_ASSERT(index == 0); 190 PipeReader *pipeReader = new PipeReader(*pipe); 191 numCounterOffers = 0; 192 index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers); 193 ALOG_ASSERT(index == 0); 194 mTeeSink = pipe; 195 mTeeSource = pipeReader; 196 } 197 } 198#endif 199 200 } 201} 202 203AudioFlinger::ThreadBase::TrackBase::~TrackBase() 204{ 205#ifdef TEE_SINK 206 dumpTee(-1, mTeeSource, mId); 207#endif 208 // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference 209 delete mServerProxy; 210 if (mCblk != NULL) { 211 if (mClient == 0) { 212 delete mCblk; 213 } else { 214 mCblk->~audio_track_cblk_t(); // destroy our shared-structure. 215 } 216 } 217 mCblkMemory.clear(); // free the shared memory before releasing the heap it belongs to 218 if (mClient != 0) { 219 // Client destructor must run with AudioFlinger client mutex locked 220 Mutex::Autolock _l(mClient->audioFlinger()->mClientLock); 221 // If the client's reference count drops to zero, the associated destructor 222 // must run with AudioFlinger lock held. Thus the explicit clear() rather than 223 // relying on the automatic clear() at end of scope. 224 mClient.clear(); 225 } 226 // flush the binder command buffer 227 IPCThreadState::self()->flushCommands(); 228} 229 230// AudioBufferProvider interface 231// getNextBuffer() = 0; 232// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack 233void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer) 234{ 235#ifdef TEE_SINK 236 if (mTeeSink != 0) { 237 (void) mTeeSink->write(buffer->raw, buffer->frameCount); 238 } 239#endif 240 241 ServerProxy::Buffer buf; 242 buf.mFrameCount = buffer->frameCount; 243 buf.mRaw = buffer->raw; 244 buffer->frameCount = 0; 245 buffer->raw = NULL; 246 mServerProxy->releaseBuffer(&buf); 247} 248 249status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event) 250{ 251 mSyncEvents.add(event); 252 return NO_ERROR; 253} 254 255// ---------------------------------------------------------------------------- 256// Playback 257// ---------------------------------------------------------------------------- 258 259AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track) 260 : BnAudioTrack(), 261 mTrack(track) 262{ 263} 264 265AudioFlinger::TrackHandle::~TrackHandle() { 266 // just stop the track on deletion, associated resources 267 // will be freed from the main thread once all pending buffers have 268 // been played. Unless it's not in the active track list, in which 269 // case we free everything now... 270 mTrack->destroy(); 271} 272 273sp<IMemory> AudioFlinger::TrackHandle::getCblk() const { 274 return mTrack->getCblk(); 275} 276 277status_t AudioFlinger::TrackHandle::start() { 278 return mTrack->start(); 279} 280 281void AudioFlinger::TrackHandle::stop() { 282 mTrack->stop(); 283} 284 285void AudioFlinger::TrackHandle::flush() { 286 mTrack->flush(); 287} 288 289void AudioFlinger::TrackHandle::pause() { 290 mTrack->pause(); 291} 292 293status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId) 294{ 295 return mTrack->attachAuxEffect(EffectId); 296} 297 298status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size, 299 sp<IMemory>* buffer) { 300 if (!mTrack->isTimedTrack()) 301 return INVALID_OPERATION; 302 303 PlaybackThread::TimedTrack* tt = 304 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 305 return tt->allocateTimedBuffer(size, buffer); 306} 307 308status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer, 309 int64_t pts) { 310 if (!mTrack->isTimedTrack()) 311 return INVALID_OPERATION; 312 313 if (buffer == 0 || buffer->pointer() == NULL) { 314 ALOGE("queueTimedBuffer() buffer is 0 or has NULL pointer()"); 315 return BAD_VALUE; 316 } 317 318 PlaybackThread::TimedTrack* tt = 319 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 320 return tt->queueTimedBuffer(buffer, pts); 321} 322 323status_t AudioFlinger::TrackHandle::setMediaTimeTransform( 324 const LinearTransform& xform, int target) { 325 326 if (!mTrack->isTimedTrack()) 327 return INVALID_OPERATION; 328 329 PlaybackThread::TimedTrack* tt = 330 reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get()); 331 return tt->setMediaTimeTransform( 332 xform, static_cast<TimedAudioTrack::TargetTimeline>(target)); 333} 334 335status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) { 336 return mTrack->setParameters(keyValuePairs); 337} 338 339status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp) 340{ 341 return mTrack->getTimestamp(timestamp); 342} 343 344 345void AudioFlinger::TrackHandle::signal() 346{ 347 return mTrack->signal(); 348} 349 350status_t AudioFlinger::TrackHandle::onTransact( 351 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 352{ 353 return BnAudioTrack::onTransact(code, data, reply, flags); 354} 355 356// ---------------------------------------------------------------------------- 357 358// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held 359AudioFlinger::PlaybackThread::Track::Track( 360 PlaybackThread *thread, 361 const sp<Client>& client, 362 audio_stream_type_t streamType, 363 uint32_t sampleRate, 364 audio_format_t format, 365 audio_channel_mask_t channelMask, 366 size_t frameCount, 367 const sp<IMemory>& sharedBuffer, 368 int sessionId, 369 int uid, 370 IAudioFlinger::track_flags_t flags) 371 : TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer, 372 sessionId, uid, flags, true /*isOut*/), 373 mFillingUpStatus(FS_INVALID), 374 // mRetryCount initialized later when needed 375 mSharedBuffer(sharedBuffer), 376 mStreamType(streamType), 377 mName(-1), // see note below 378 mMainBuffer(thread->mixBuffer()), 379 mAuxBuffer(NULL), 380 mAuxEffectId(0), mHasVolumeController(false), 381 mPresentationCompleteFrames(0), 382 mFastIndex(-1), 383 mCachedVolume(1.0), 384 mIsInvalid(false), 385 mAudioTrackServerProxy(NULL), 386 mResumeToStopping(false), 387 mFlushHwPending(false), 388 mPreviousValid(false), 389 mPreviousFramesWritten(0) 390 // mPreviousTimestamp 391{ 392 if (mCblk == NULL) { 393 return; 394 } 395 396 if (sharedBuffer == 0) { 397 mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount, 398 mFrameSize); 399 } else { 400 mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount, 401 mFrameSize); 402 } 403 mServerProxy = mAudioTrackServerProxy; 404 405 mName = thread->getTrackName_l(channelMask, format, sessionId); 406 if (mName < 0) { 407 ALOGE("no more track names available"); 408 return; 409 } 410 // only allocate a fast track index if we were able to allocate a normal track name 411 if (flags & IAudioFlinger::TRACK_FAST) { 412 mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads(); 413 ALOG_ASSERT(thread->mFastTrackAvailMask != 0); 414 int i = __builtin_ctz(thread->mFastTrackAvailMask); 415 ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks); 416 // FIXME This is too eager. We allocate a fast track index before the 417 // fast track becomes active. Since fast tracks are a scarce resource, 418 // this means we are potentially denying other more important fast tracks from 419 // being created. It would be better to allocate the index dynamically. 420 mFastIndex = i; 421 // Read the initial underruns because this field is never cleared by the fast mixer 422 mObservedUnderruns = thread->getFastTrackUnderruns(i); 423 thread->mFastTrackAvailMask &= ~(1 << i); 424 } 425} 426 427AudioFlinger::PlaybackThread::Track::~Track() 428{ 429 ALOGV("PlaybackThread::Track destructor"); 430 431 // The destructor would clear mSharedBuffer, 432 // but it will not push the decremented reference count, 433 // leaving the client's IMemory dangling indefinitely. 434 // This prevents that leak. 435 if (mSharedBuffer != 0) { 436 mSharedBuffer.clear(); 437 } 438} 439 440status_t AudioFlinger::PlaybackThread::Track::initCheck() const 441{ 442 status_t status = TrackBase::initCheck(); 443 if (status == NO_ERROR && mName < 0) { 444 status = NO_MEMORY; 445 } 446 return status; 447} 448 449void AudioFlinger::PlaybackThread::Track::destroy() 450{ 451 // NOTE: destroyTrack_l() can remove a strong reference to this Track 452 // by removing it from mTracks vector, so there is a risk that this Tracks's 453 // destructor is called. As the destructor needs to lock mLock, 454 // we must acquire a strong reference on this Track before locking mLock 455 // here so that the destructor is called only when exiting this function. 456 // On the other hand, as long as Track::destroy() is only called by 457 // TrackHandle destructor, the TrackHandle still holds a strong ref on 458 // this Track with its member mTrack. 459 sp<Track> keep(this); 460 { // scope for mLock 461 sp<ThreadBase> thread = mThread.promote(); 462 if (thread != 0) { 463 Mutex::Autolock _l(thread->mLock); 464 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 465 bool wasActive = playbackThread->destroyTrack_l(this); 466 if (!isOutputTrack() && !wasActive) { 467 AudioSystem::releaseOutput(thread->id()); 468 } 469 } 470 } 471} 472 473/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result) 474{ 475 result.append(" Name Active Client Type Fmt Chn mask Session fCount S F SRate " 476 "L dB R dB Server Main buf Aux Buf Flags UndFrmCnt\n"); 477} 478 479void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size, bool active) 480{ 481 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); 482 if (isFastTrack()) { 483 sprintf(buffer, " F %2d", mFastIndex); 484 } else if (mName >= AudioMixer::TRACK0) { 485 sprintf(buffer, " %4d", mName - AudioMixer::TRACK0); 486 } else { 487 sprintf(buffer, " none"); 488 } 489 track_state state = mState; 490 char stateChar; 491 if (isTerminated()) { 492 stateChar = 'T'; 493 } else { 494 switch (state) { 495 case IDLE: 496 stateChar = 'I'; 497 break; 498 case STOPPING_1: 499 stateChar = 's'; 500 break; 501 case STOPPING_2: 502 stateChar = '5'; 503 break; 504 case STOPPED: 505 stateChar = 'S'; 506 break; 507 case RESUMING: 508 stateChar = 'R'; 509 break; 510 case ACTIVE: 511 stateChar = 'A'; 512 break; 513 case PAUSING: 514 stateChar = 'p'; 515 break; 516 case PAUSED: 517 stateChar = 'P'; 518 break; 519 case FLUSHED: 520 stateChar = 'F'; 521 break; 522 default: 523 stateChar = '?'; 524 break; 525 } 526 } 527 char nowInUnderrun; 528 switch (mObservedUnderruns.mBitFields.mMostRecent) { 529 case UNDERRUN_FULL: 530 nowInUnderrun = ' '; 531 break; 532 case UNDERRUN_PARTIAL: 533 nowInUnderrun = '<'; 534 break; 535 case UNDERRUN_EMPTY: 536 nowInUnderrun = '*'; 537 break; 538 default: 539 nowInUnderrun = '?'; 540 break; 541 } 542 snprintf(&buffer[8], size-8, " %6s %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g " 543 "%08X %p %p 0x%03X %9u%c\n", 544 active ? "yes" : "no", 545 (mClient == 0) ? getpid_cached : mClient->pid(), 546 mStreamType, 547 mFormat, 548 mChannelMask, 549 mSessionId, 550 mFrameCount, 551 stateChar, 552 mFillingUpStatus, 553 mAudioTrackServerProxy->getSampleRate(), 554 20.0 * log10(float_from_gain(gain_minifloat_unpack_left(vlr))), 555 20.0 * log10(float_from_gain(gain_minifloat_unpack_right(vlr))), 556 mCblk->mServer, 557 mMainBuffer, 558 mAuxBuffer, 559 mCblk->mFlags, 560 mAudioTrackServerProxy->getUnderrunFrames(), 561 nowInUnderrun); 562} 563 564uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const { 565 return mAudioTrackServerProxy->getSampleRate(); 566} 567 568// AudioBufferProvider interface 569status_t AudioFlinger::PlaybackThread::Track::getNextBuffer( 570 AudioBufferProvider::Buffer* buffer, int64_t pts __unused) 571{ 572 ServerProxy::Buffer buf; 573 size_t desiredFrames = buffer->frameCount; 574 buf.mFrameCount = desiredFrames; 575 status_t status = mServerProxy->obtainBuffer(&buf); 576 buffer->frameCount = buf.mFrameCount; 577 buffer->raw = buf.mRaw; 578 if (buf.mFrameCount == 0) { 579 mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames); 580 } 581 return status; 582} 583 584// releaseBuffer() is not overridden 585 586// ExtendedAudioBufferProvider interface 587 588// Note that framesReady() takes a mutex on the control block using tryLock(). 589// This could result in priority inversion if framesReady() is called by the normal mixer, 590// as the normal mixer thread runs at lower 591// priority than the client's callback thread: there is a short window within framesReady() 592// during which the normal mixer could be preempted, and the client callback would block. 593// Another problem can occur if framesReady() is called by the fast mixer: 594// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer. 595// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue. 596size_t AudioFlinger::PlaybackThread::Track::framesReady() const { 597 return mAudioTrackServerProxy->framesReady(); 598} 599 600size_t AudioFlinger::PlaybackThread::Track::framesReleased() const 601{ 602 return mAudioTrackServerProxy->framesReleased(); 603} 604 605// Don't call for fast tracks; the framesReady() could result in priority inversion 606bool AudioFlinger::PlaybackThread::Track::isReady() const { 607 if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) { 608 return true; 609 } 610 611 if (isStopping()) { 612 if (framesReady() > 0) { 613 mFillingUpStatus = FS_FILLED; 614 } 615 return true; 616 } 617 618 if (framesReady() >= mFrameCount || 619 (mCblk->mFlags & CBLK_FORCEREADY)) { 620 mFillingUpStatus = FS_FILLED; 621 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 622 return true; 623 } 624 return false; 625} 626 627status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event __unused, 628 int triggerSession __unused) 629{ 630 status_t status = NO_ERROR; 631 ALOGV("start(%d), calling pid %d session %d", 632 mName, IPCThreadState::self()->getCallingPid(), mSessionId); 633 634 sp<ThreadBase> thread = mThread.promote(); 635 if (thread != 0) { 636 if (isOffloaded()) { 637 Mutex::Autolock _laf(thread->mAudioFlinger->mLock); 638 Mutex::Autolock _lth(thread->mLock); 639 sp<EffectChain> ec = thread->getEffectChain_l(mSessionId); 640 if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() || 641 (ec != 0 && ec->isNonOffloadableEnabled())) { 642 invalidate(); 643 return PERMISSION_DENIED; 644 } 645 } 646 Mutex::Autolock _lth(thread->mLock); 647 track_state state = mState; 648 // here the track could be either new, or restarted 649 // in both cases "unstop" the track 650 651 // initial state-stopping. next state-pausing. 652 // What if resume is called ? 653 654 if (state == PAUSED || state == PAUSING) { 655 if (mResumeToStopping) { 656 // happened we need to resume to STOPPING_1 657 mState = TrackBase::STOPPING_1; 658 ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this); 659 } else { 660 mState = TrackBase::RESUMING; 661 ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this); 662 } 663 } else { 664 mState = TrackBase::ACTIVE; 665 ALOGV("? => ACTIVE (%d) on thread %p", mName, this); 666 } 667 668 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 669 status = playbackThread->addTrack_l(this); 670 if (status == INVALID_OPERATION || status == PERMISSION_DENIED) { 671 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 672 // restore previous state if start was rejected by policy manager 673 if (status == PERMISSION_DENIED) { 674 mState = state; 675 } 676 } 677 // track was already in the active list, not a problem 678 if (status == ALREADY_EXISTS) { 679 status = NO_ERROR; 680 } else { 681 // Acknowledge any pending flush(), so that subsequent new data isn't discarded. 682 // It is usually unsafe to access the server proxy from a binder thread. 683 // But in this case we know the mixer thread (whether normal mixer or fast mixer) 684 // isn't looking at this track yet: we still hold the normal mixer thread lock, 685 // and for fast tracks the track is not yet in the fast mixer thread's active set. 686 ServerProxy::Buffer buffer; 687 buffer.mFrameCount = 1; 688 (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/); 689 } 690 } else { 691 status = BAD_VALUE; 692 } 693 return status; 694} 695 696void AudioFlinger::PlaybackThread::Track::stop() 697{ 698 ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 699 sp<ThreadBase> thread = mThread.promote(); 700 if (thread != 0) { 701 Mutex::Autolock _l(thread->mLock); 702 track_state state = mState; 703 if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) { 704 // If the track is not active (PAUSED and buffers full), flush buffers 705 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 706 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 707 reset(); 708 mState = STOPPED; 709 } else if (!isFastTrack() && !isOffloaded() && !isDirect()) { 710 mState = STOPPED; 711 } else { 712 // For fast tracks prepareTracks_l() will set state to STOPPING_2 713 // presentation is complete 714 // For an offloaded track this starts a drain and state will 715 // move to STOPPING_2 when drain completes and then STOPPED 716 mState = STOPPING_1; 717 } 718 ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName, 719 playbackThread); 720 } 721 } 722} 723 724void AudioFlinger::PlaybackThread::Track::pause() 725{ 726 ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid()); 727 sp<ThreadBase> thread = mThread.promote(); 728 if (thread != 0) { 729 Mutex::Autolock _l(thread->mLock); 730 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 731 switch (mState) { 732 case STOPPING_1: 733 case STOPPING_2: 734 if (!isOffloaded()) { 735 /* nothing to do if track is not offloaded */ 736 break; 737 } 738 739 // Offloaded track was draining, we need to carry on draining when resumed 740 mResumeToStopping = true; 741 // fall through... 742 case ACTIVE: 743 case RESUMING: 744 mState = PAUSING; 745 ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get()); 746 playbackThread->broadcast_l(); 747 break; 748 749 default: 750 break; 751 } 752 } 753} 754 755void AudioFlinger::PlaybackThread::Track::flush() 756{ 757 ALOGV("flush(%d)", mName); 758 sp<ThreadBase> thread = mThread.promote(); 759 if (thread != 0) { 760 Mutex::Autolock _l(thread->mLock); 761 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 762 763 if (isOffloaded()) { 764 // If offloaded we allow flush during any state except terminated 765 // and keep the track active to avoid problems if user is seeking 766 // rapidly and underlying hardware has a significant delay handling 767 // a pause 768 if (isTerminated()) { 769 return; 770 } 771 772 ALOGV("flush: offload flush"); 773 reset(); 774 775 if (mState == STOPPING_1 || mState == STOPPING_2) { 776 ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE"); 777 mState = ACTIVE; 778 } 779 780 if (mState == ACTIVE) { 781 ALOGV("flush called in active state, resetting buffer time out retry count"); 782 mRetryCount = PlaybackThread::kMaxTrackRetriesOffload; 783 } 784 785 mFlushHwPending = true; 786 mResumeToStopping = false; 787 } else { 788 if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED && 789 mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) { 790 return; 791 } 792 // No point remaining in PAUSED state after a flush => go to 793 // FLUSHED state 794 mState = FLUSHED; 795 // do not reset the track if it is still in the process of being stopped or paused. 796 // this will be done by prepareTracks_l() when the track is stopped. 797 // prepareTracks_l() will see mState == FLUSHED, then 798 // remove from active track list, reset(), and trigger presentation complete 799 if (playbackThread->mActiveTracks.indexOf(this) < 0) { 800 reset(); 801 } 802 } 803 // Prevent flush being lost if the track is flushed and then resumed 804 // before mixer thread can run. This is important when offloading 805 // because the hardware buffer could hold a large amount of audio 806 playbackThread->broadcast_l(); 807 } 808} 809 810// must be called with thread lock held 811void AudioFlinger::PlaybackThread::Track::flushAck() 812{ 813 if (!isOffloaded()) 814 return; 815 816 mFlushHwPending = false; 817} 818 819void AudioFlinger::PlaybackThread::Track::reset() 820{ 821 // Do not reset twice to avoid discarding data written just after a flush and before 822 // the audioflinger thread detects the track is stopped. 823 if (!mResetDone) { 824 // Force underrun condition to avoid false underrun callback until first data is 825 // written to buffer 826 android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags); 827 mFillingUpStatus = FS_FILLING; 828 mResetDone = true; 829 if (mState == FLUSHED) { 830 mState = IDLE; 831 } 832 } 833} 834 835status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs) 836{ 837 sp<ThreadBase> thread = mThread.promote(); 838 if (thread == 0) { 839 ALOGE("thread is dead"); 840 return FAILED_TRANSACTION; 841 } else if ((thread->type() == ThreadBase::DIRECT) || 842 (thread->type() == ThreadBase::OFFLOAD)) { 843 return thread->setParameters(keyValuePairs); 844 } else { 845 return PERMISSION_DENIED; 846 } 847} 848 849status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp) 850{ 851 // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant 852 if (isFastTrack()) { 853 // FIXME no lock held to set mPreviousValid = false 854 return INVALID_OPERATION; 855 } 856 sp<ThreadBase> thread = mThread.promote(); 857 if (thread == 0) { 858 // FIXME no lock held to set mPreviousValid = false 859 return INVALID_OPERATION; 860 } 861 Mutex::Autolock _l(thread->mLock); 862 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 863 if (!isOffloaded() && !isDirect()) { 864 if (!playbackThread->mLatchQValid) { 865 mPreviousValid = false; 866 return INVALID_OPERATION; 867 } 868 uint32_t unpresentedFrames = 869 ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) / 870 playbackThread->mSampleRate; 871 uint32_t framesWritten = mAudioTrackServerProxy->framesReleased(); 872 bool checkPreviousTimestamp = mPreviousValid && framesWritten >= mPreviousFramesWritten; 873 if (framesWritten < unpresentedFrames) { 874 mPreviousValid = false; 875 return INVALID_OPERATION; 876 } 877 mPreviousFramesWritten = framesWritten; 878 uint32_t position = framesWritten - unpresentedFrames; 879 struct timespec time = playbackThread->mLatchQ.mTimestamp.mTime; 880 if (checkPreviousTimestamp) { 881 if (time.tv_sec < mPreviousTimestamp.mTime.tv_sec || 882 (time.tv_sec == mPreviousTimestamp.mTime.tv_sec && 883 time.tv_nsec < mPreviousTimestamp.mTime.tv_nsec)) { 884 ALOGW("Time is going backwards"); 885 } 886 // position can bobble slightly as an artifact; this hides the bobble 887 static const uint32_t MINIMUM_POSITION_DELTA = 8u; 888 if ((position <= mPreviousTimestamp.mPosition) || 889 (position - mPreviousTimestamp.mPosition) < MINIMUM_POSITION_DELTA) { 890 position = mPreviousTimestamp.mPosition; 891 time = mPreviousTimestamp.mTime; 892 } 893 } 894 timestamp.mPosition = position; 895 timestamp.mTime = time; 896 mPreviousTimestamp = timestamp; 897 mPreviousValid = true; 898 return NO_ERROR; 899 } 900 901 return playbackThread->getTimestamp_l(timestamp); 902} 903 904status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId) 905{ 906 status_t status = DEAD_OBJECT; 907 sp<ThreadBase> thread = mThread.promote(); 908 if (thread != 0) { 909 PlaybackThread *playbackThread = (PlaybackThread *)thread.get(); 910 sp<AudioFlinger> af = mClient->audioFlinger(); 911 912 Mutex::Autolock _l(af->mLock); 913 914 sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId); 915 916 if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) { 917 Mutex::Autolock _dl(playbackThread->mLock); 918 Mutex::Autolock _sl(srcThread->mLock); 919 sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX); 920 if (chain == 0) { 921 return INVALID_OPERATION; 922 } 923 924 sp<EffectModule> effect = chain->getEffectFromId_l(EffectId); 925 if (effect == 0) { 926 return INVALID_OPERATION; 927 } 928 srcThread->removeEffect_l(effect); 929 status = playbackThread->addEffect_l(effect); 930 if (status != NO_ERROR) { 931 srcThread->addEffect_l(effect); 932 return INVALID_OPERATION; 933 } 934 // removeEffect_l() has stopped the effect if it was active so it must be restarted 935 if (effect->state() == EffectModule::ACTIVE || 936 effect->state() == EffectModule::STOPPING) { 937 effect->start(); 938 } 939 940 sp<EffectChain> dstChain = effect->chain().promote(); 941 if (dstChain == 0) { 942 srcThread->addEffect_l(effect); 943 return INVALID_OPERATION; 944 } 945 AudioSystem::unregisterEffect(effect->id()); 946 AudioSystem::registerEffect(&effect->desc(), 947 srcThread->id(), 948 dstChain->strategy(), 949 AUDIO_SESSION_OUTPUT_MIX, 950 effect->id()); 951 AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled()); 952 } 953 status = playbackThread->attachAuxEffect(this, EffectId); 954 } 955 return status; 956} 957 958void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer) 959{ 960 mAuxEffectId = EffectId; 961 mAuxBuffer = buffer; 962} 963 964bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten, 965 size_t audioHalFrames) 966{ 967 // a track is considered presented when the total number of frames written to audio HAL 968 // corresponds to the number of frames written when presentationComplete() is called for the 969 // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time. 970 // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used 971 // to detect when all frames have been played. In this case framesWritten isn't 972 // useful because it doesn't always reflect whether there is data in the h/w 973 // buffers, particularly if a track has been paused and resumed during draining 974 ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d", 975 mPresentationCompleteFrames, framesWritten); 976 if (mPresentationCompleteFrames == 0) { 977 mPresentationCompleteFrames = framesWritten + audioHalFrames; 978 ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d", 979 mPresentationCompleteFrames, audioHalFrames); 980 } 981 982 if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) { 983 triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE); 984 mAudioTrackServerProxy->setStreamEndDone(); 985 return true; 986 } 987 return false; 988} 989 990void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type) 991{ 992 for (size_t i = 0; i < mSyncEvents.size(); i++) { 993 if (mSyncEvents[i]->type() == type) { 994 mSyncEvents[i]->trigger(); 995 mSyncEvents.removeAt(i); 996 i--; 997 } 998 } 999} 1000 1001// implement VolumeBufferProvider interface 1002 1003gain_minifloat_packed_t AudioFlinger::PlaybackThread::Track::getVolumeLR() 1004{ 1005 // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs 1006 ALOG_ASSERT(isFastTrack() && (mCblk != NULL)); 1007 gain_minifloat_packed_t vlr = mAudioTrackServerProxy->getVolumeLR(); 1008 float vl = float_from_gain(gain_minifloat_unpack_left(vlr)); 1009 float vr = float_from_gain(gain_minifloat_unpack_right(vlr)); 1010 // track volumes come from shared memory, so can't be trusted and must be clamped 1011 if (vl > GAIN_FLOAT_UNITY) { 1012 vl = GAIN_FLOAT_UNITY; 1013 } 1014 if (vr > GAIN_FLOAT_UNITY) { 1015 vr = GAIN_FLOAT_UNITY; 1016 } 1017 // now apply the cached master volume and stream type volume; 1018 // this is trusted but lacks any synchronization or barrier so may be stale 1019 float v = mCachedVolume; 1020 vl *= v; 1021 vr *= v; 1022 // re-combine into packed minifloat 1023 vlr = gain_minifloat_pack(gain_from_float(vl), gain_from_float(vr)); 1024 // FIXME look at mute, pause, and stop flags 1025 return vlr; 1026} 1027 1028status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event) 1029{ 1030 if (isTerminated() || mState == PAUSED || 1031 ((framesReady() == 0) && ((mSharedBuffer != 0) || 1032 (mState == STOPPED)))) { 1033 ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ", 1034 mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady()); 1035 event->cancel(); 1036 return INVALID_OPERATION; 1037 } 1038 (void) TrackBase::setSyncEvent(event); 1039 return NO_ERROR; 1040} 1041 1042void AudioFlinger::PlaybackThread::Track::invalidate() 1043{ 1044 // FIXME should use proxy, and needs work 1045 audio_track_cblk_t* cblk = mCblk; 1046 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 1047 android_atomic_release_store(0x40000000, &cblk->mFutex); 1048 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 1049 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX); 1050 mIsInvalid = true; 1051} 1052 1053void AudioFlinger::PlaybackThread::Track::signal() 1054{ 1055 sp<ThreadBase> thread = mThread.promote(); 1056 if (thread != 0) { 1057 PlaybackThread *t = (PlaybackThread *)thread.get(); 1058 Mutex::Autolock _l(t->mLock); 1059 t->broadcast_l(); 1060 } 1061} 1062 1063//To be called with thread lock held 1064bool AudioFlinger::PlaybackThread::Track::isResumePending() { 1065 1066 if (mState == RESUMING) 1067 return true; 1068 /* Resume is pending if track was stopping before pause was called */ 1069 if (mState == STOPPING_1 && 1070 mResumeToStopping) 1071 return true; 1072 1073 return false; 1074} 1075 1076//To be called with thread lock held 1077void AudioFlinger::PlaybackThread::Track::resumeAck() { 1078 1079 1080 if (mState == RESUMING) 1081 mState = ACTIVE; 1082 1083 // Other possibility of pending resume is stopping_1 state 1084 // Do not update the state from stopping as this prevents 1085 // drain being called. 1086 if (mState == STOPPING_1) { 1087 mResumeToStopping = false; 1088 } 1089} 1090// ---------------------------------------------------------------------------- 1091 1092sp<AudioFlinger::PlaybackThread::TimedTrack> 1093AudioFlinger::PlaybackThread::TimedTrack::create( 1094 PlaybackThread *thread, 1095 const sp<Client>& client, 1096 audio_stream_type_t streamType, 1097 uint32_t sampleRate, 1098 audio_format_t format, 1099 audio_channel_mask_t channelMask, 1100 size_t frameCount, 1101 const sp<IMemory>& sharedBuffer, 1102 int sessionId, 1103 int uid) 1104{ 1105 if (!client->reserveTimedTrack()) 1106 return 0; 1107 1108 return new TimedTrack( 1109 thread, client, streamType, sampleRate, format, channelMask, frameCount, 1110 sharedBuffer, sessionId, uid); 1111} 1112 1113AudioFlinger::PlaybackThread::TimedTrack::TimedTrack( 1114 PlaybackThread *thread, 1115 const sp<Client>& client, 1116 audio_stream_type_t streamType, 1117 uint32_t sampleRate, 1118 audio_format_t format, 1119 audio_channel_mask_t channelMask, 1120 size_t frameCount, 1121 const sp<IMemory>& sharedBuffer, 1122 int sessionId, 1123 int uid) 1124 : Track(thread, client, streamType, sampleRate, format, channelMask, 1125 frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED), 1126 mQueueHeadInFlight(false), 1127 mTrimQueueHeadOnRelease(false), 1128 mFramesPendingInQueue(0), 1129 mTimedSilenceBuffer(NULL), 1130 mTimedSilenceBufferSize(0), 1131 mTimedAudioOutputOnTime(false), 1132 mMediaTimeTransformValid(false) 1133{ 1134 LocalClock lc; 1135 mLocalTimeFreq = lc.getLocalFreq(); 1136 1137 mLocalTimeToSampleTransform.a_zero = 0; 1138 mLocalTimeToSampleTransform.b_zero = 0; 1139 mLocalTimeToSampleTransform.a_to_b_numer = sampleRate; 1140 mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq; 1141 LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer, 1142 &mLocalTimeToSampleTransform.a_to_b_denom); 1143 1144 mMediaTimeToSampleTransform.a_zero = 0; 1145 mMediaTimeToSampleTransform.b_zero = 0; 1146 mMediaTimeToSampleTransform.a_to_b_numer = sampleRate; 1147 mMediaTimeToSampleTransform.a_to_b_denom = 1000000; 1148 LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer, 1149 &mMediaTimeToSampleTransform.a_to_b_denom); 1150} 1151 1152AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() { 1153 mClient->releaseTimedTrack(); 1154 delete [] mTimedSilenceBuffer; 1155} 1156 1157status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer( 1158 size_t size, sp<IMemory>* buffer) { 1159 1160 Mutex::Autolock _l(mTimedBufferQueueLock); 1161 1162 trimTimedBufferQueue_l(); 1163 1164 // lazily initialize the shared memory heap for timed buffers 1165 if (mTimedMemoryDealer == NULL) { 1166 const int kTimedBufferHeapSize = 512 << 10; 1167 1168 mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize, 1169 "AudioFlingerTimed"); 1170 if (mTimedMemoryDealer == NULL) { 1171 return NO_MEMORY; 1172 } 1173 } 1174 1175 sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size); 1176 if (newBuffer == 0 || newBuffer->pointer() == NULL) { 1177 return NO_MEMORY; 1178 } 1179 1180 *buffer = newBuffer; 1181 return NO_ERROR; 1182} 1183 1184// caller must hold mTimedBufferQueueLock 1185void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() { 1186 int64_t mediaTimeNow; 1187 { 1188 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1189 if (!mMediaTimeTransformValid) 1190 return; 1191 1192 int64_t targetTimeNow; 1193 status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) 1194 ? mCCHelper.getCommonTime(&targetTimeNow) 1195 : mCCHelper.getLocalTime(&targetTimeNow); 1196 1197 if (OK != res) 1198 return; 1199 1200 if (!mMediaTimeTransform.doReverseTransform(targetTimeNow, 1201 &mediaTimeNow)) { 1202 return; 1203 } 1204 } 1205 1206 size_t trimEnd; 1207 for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) { 1208 int64_t bufEnd; 1209 1210 if ((trimEnd + 1) < mTimedBufferQueue.size()) { 1211 // We have a next buffer. Just use its PTS as the PTS of the frame 1212 // following the last frame in this buffer. If the stream is sparse 1213 // (ie, there are deliberate gaps left in the stream which should be 1214 // filled with silence by the TimedAudioTrack), then this can result 1215 // in one extra buffer being left un-trimmed when it could have 1216 // been. In general, this is not typical, and we would rather 1217 // optimized away the TS calculation below for the more common case 1218 // where PTSes are contiguous. 1219 bufEnd = mTimedBufferQueue[trimEnd + 1].pts(); 1220 } else { 1221 // We have no next buffer. Compute the PTS of the frame following 1222 // the last frame in this buffer by computing the duration of of 1223 // this frame in media time units and adding it to the PTS of the 1224 // buffer. 1225 int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size() 1226 / mFrameSize; 1227 1228 if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount, 1229 &bufEnd)) { 1230 ALOGE("Failed to convert frame count of %lld to media time" 1231 " duration" " (scale factor %d/%u) in %s", 1232 frameCount, 1233 mMediaTimeToSampleTransform.a_to_b_numer, 1234 mMediaTimeToSampleTransform.a_to_b_denom, 1235 __PRETTY_FUNCTION__); 1236 break; 1237 } 1238 bufEnd += mTimedBufferQueue[trimEnd].pts(); 1239 } 1240 1241 if (bufEnd > mediaTimeNow) 1242 break; 1243 1244 // Is the buffer we want to use in the middle of a mix operation right 1245 // now? If so, don't actually trim it. Just wait for the releaseBuffer 1246 // from the mixer which should be coming back shortly. 1247 if (!trimEnd && mQueueHeadInFlight) { 1248 mTrimQueueHeadOnRelease = true; 1249 } 1250 } 1251 1252 size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0; 1253 if (trimStart < trimEnd) { 1254 // Update the bookkeeping for framesReady() 1255 for (size_t i = trimStart; i < trimEnd; ++i) { 1256 updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim"); 1257 } 1258 1259 // Now actually remove the buffers from the queue. 1260 mTimedBufferQueue.removeItemsAt(trimStart, trimEnd); 1261 } 1262} 1263 1264void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l( 1265 const char* logTag) { 1266 ALOG_ASSERT(mTimedBufferQueue.size() > 0, 1267 "%s called (reason \"%s\"), but timed buffer queue has no" 1268 " elements to trim.", __FUNCTION__, logTag); 1269 1270 updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag); 1271 mTimedBufferQueue.removeAt(0); 1272} 1273 1274void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l( 1275 const TimedBuffer& buf, 1276 const char* logTag __unused) { 1277 uint32_t bufBytes = buf.buffer()->size(); 1278 uint32_t consumedAlready = buf.position(); 1279 1280 ALOG_ASSERT(consumedAlready <= bufBytes, 1281 "Bad bookkeeping while updating frames pending. Timed buffer is" 1282 " only %u bytes long, but claims to have consumed %u" 1283 " bytes. (update reason: \"%s\")", 1284 bufBytes, consumedAlready, logTag); 1285 1286 uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize; 1287 ALOG_ASSERT(mFramesPendingInQueue >= bufFrames, 1288 "Bad bookkeeping while updating frames pending. Should have at" 1289 " least %u queued frames, but we think we have only %u. (update" 1290 " reason: \"%s\")", 1291 bufFrames, mFramesPendingInQueue, logTag); 1292 1293 mFramesPendingInQueue -= bufFrames; 1294} 1295 1296status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer( 1297 const sp<IMemory>& buffer, int64_t pts) { 1298 1299 { 1300 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1301 if (!mMediaTimeTransformValid) 1302 return INVALID_OPERATION; 1303 } 1304 1305 Mutex::Autolock _l(mTimedBufferQueueLock); 1306 1307 uint32_t bufFrames = buffer->size() / mFrameSize; 1308 mFramesPendingInQueue += bufFrames; 1309 mTimedBufferQueue.add(TimedBuffer(buffer, pts)); 1310 1311 return NO_ERROR; 1312} 1313 1314status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform( 1315 const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) { 1316 1317 ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d", 1318 xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom, 1319 target); 1320 1321 if (!(target == TimedAudioTrack::LOCAL_TIME || 1322 target == TimedAudioTrack::COMMON_TIME)) { 1323 return BAD_VALUE; 1324 } 1325 1326 Mutex::Autolock lock(mMediaTimeTransformLock); 1327 mMediaTimeTransform = xform; 1328 mMediaTimeTransformTarget = target; 1329 mMediaTimeTransformValid = true; 1330 1331 return NO_ERROR; 1332} 1333 1334#define min(a, b) ((a) < (b) ? (a) : (b)) 1335 1336// implementation of getNextBuffer for tracks whose buffers have timestamps 1337status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer( 1338 AudioBufferProvider::Buffer* buffer, int64_t pts) 1339{ 1340 if (pts == AudioBufferProvider::kInvalidPTS) { 1341 buffer->raw = NULL; 1342 buffer->frameCount = 0; 1343 mTimedAudioOutputOnTime = false; 1344 return INVALID_OPERATION; 1345 } 1346 1347 Mutex::Autolock _l(mTimedBufferQueueLock); 1348 1349 ALOG_ASSERT(!mQueueHeadInFlight, 1350 "getNextBuffer called without releaseBuffer!"); 1351 1352 while (true) { 1353 1354 // if we have no timed buffers, then fail 1355 if (mTimedBufferQueue.isEmpty()) { 1356 buffer->raw = NULL; 1357 buffer->frameCount = 0; 1358 return NOT_ENOUGH_DATA; 1359 } 1360 1361 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1362 1363 // calculate the PTS of the head of the timed buffer queue expressed in 1364 // local time 1365 int64_t headLocalPTS; 1366 { 1367 Mutex::Autolock mttLock(mMediaTimeTransformLock); 1368 1369 ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid"); 1370 1371 if (mMediaTimeTransform.a_to_b_denom == 0) { 1372 // the transform represents a pause, so yield silence 1373 timedYieldSilence_l(buffer->frameCount, buffer); 1374 return NO_ERROR; 1375 } 1376 1377 int64_t transformedPTS; 1378 if (!mMediaTimeTransform.doForwardTransform(head.pts(), 1379 &transformedPTS)) { 1380 // the transform failed. this shouldn't happen, but if it does 1381 // then just drop this buffer 1382 ALOGW("timedGetNextBuffer transform failed"); 1383 buffer->raw = NULL; 1384 buffer->frameCount = 0; 1385 trimTimedBufferQueueHead_l("getNextBuffer; no transform"); 1386 return NO_ERROR; 1387 } 1388 1389 if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) { 1390 if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS, 1391 &headLocalPTS)) { 1392 buffer->raw = NULL; 1393 buffer->frameCount = 0; 1394 return INVALID_OPERATION; 1395 } 1396 } else { 1397 headLocalPTS = transformedPTS; 1398 } 1399 } 1400 1401 uint32_t sr = sampleRate(); 1402 1403 // adjust the head buffer's PTS to reflect the portion of the head buffer 1404 // that has already been consumed 1405 int64_t effectivePTS = headLocalPTS + 1406 ((head.position() / mFrameSize) * mLocalTimeFreq / sr); 1407 1408 // Calculate the delta in samples between the head of the input buffer 1409 // queue and the start of the next output buffer that will be written. 1410 // If the transformation fails because of over or underflow, it means 1411 // that the sample's position in the output stream is so far out of 1412 // whack that it should just be dropped. 1413 int64_t sampleDelta; 1414 if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) { 1415 ALOGV("*** head buffer is too far from PTS: dropped buffer"); 1416 trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from" 1417 " mix"); 1418 continue; 1419 } 1420 if (!mLocalTimeToSampleTransform.doForwardTransform( 1421 (effectivePTS - pts) << 32, &sampleDelta)) { 1422 ALOGV("*** too late during sample rate transform: dropped buffer"); 1423 trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample"); 1424 continue; 1425 } 1426 1427 ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld" 1428 " sampleDelta=[%d.%08x]", 1429 head.pts(), head.position(), pts, 1430 static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1) 1431 + (sampleDelta >> 32)), 1432 static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF)); 1433 1434 // if the delta between the ideal placement for the next input sample and 1435 // the current output position is within this threshold, then we will 1436 // concatenate the next input samples to the previous output 1437 const int64_t kSampleContinuityThreshold = 1438 (static_cast<int64_t>(sr) << 32) / 250; 1439 1440 // if this is the first buffer of audio that we're emitting from this track 1441 // then it should be almost exactly on time. 1442 const int64_t kSampleStartupThreshold = 1LL << 32; 1443 1444 if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) || 1445 (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) { 1446 // the next input is close enough to being on time, so concatenate it 1447 // with the last output 1448 timedYieldSamples_l(buffer); 1449 1450 ALOGVV("*** on time: head.pos=%d frameCount=%u", 1451 head.position(), buffer->frameCount); 1452 return NO_ERROR; 1453 } 1454 1455 // Looks like our output is not on time. Reset our on timed status. 1456 // Next time we mix samples from our input queue, then should be within 1457 // the StartupThreshold. 1458 mTimedAudioOutputOnTime = false; 1459 if (sampleDelta > 0) { 1460 // the gap between the current output position and the proper start of 1461 // the next input sample is too big, so fill it with silence 1462 uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32; 1463 1464 timedYieldSilence_l(framesUntilNextInput, buffer); 1465 ALOGV("*** silence: frameCount=%u", buffer->frameCount); 1466 return NO_ERROR; 1467 } else { 1468 // the next input sample is late 1469 uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32)); 1470 size_t onTimeSamplePosition = 1471 head.position() + lateFrames * mFrameSize; 1472 1473 if (onTimeSamplePosition > head.buffer()->size()) { 1474 // all the remaining samples in the head are too late, so 1475 // drop it and move on 1476 ALOGV("*** too late: dropped buffer"); 1477 trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer"); 1478 continue; 1479 } else { 1480 // skip over the late samples 1481 head.setPosition(onTimeSamplePosition); 1482 1483 // yield the available samples 1484 timedYieldSamples_l(buffer); 1485 1486 ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount); 1487 return NO_ERROR; 1488 } 1489 } 1490 } 1491} 1492 1493// Yield samples from the timed buffer queue head up to the given output 1494// buffer's capacity. 1495// 1496// Caller must hold mTimedBufferQueueLock 1497void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l( 1498 AudioBufferProvider::Buffer* buffer) { 1499 1500 const TimedBuffer& head = mTimedBufferQueue[0]; 1501 1502 buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) + 1503 head.position()); 1504 1505 uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) / 1506 mFrameSize); 1507 size_t framesRequested = buffer->frameCount; 1508 buffer->frameCount = min(framesLeftInHead, framesRequested); 1509 1510 mQueueHeadInFlight = true; 1511 mTimedAudioOutputOnTime = true; 1512} 1513 1514// Yield samples of silence up to the given output buffer's capacity 1515// 1516// Caller must hold mTimedBufferQueueLock 1517void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l( 1518 uint32_t numFrames, AudioBufferProvider::Buffer* buffer) { 1519 1520 // lazily allocate a buffer filled with silence 1521 if (mTimedSilenceBufferSize < numFrames * mFrameSize) { 1522 delete [] mTimedSilenceBuffer; 1523 mTimedSilenceBufferSize = numFrames * mFrameSize; 1524 mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize]; 1525 memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize); 1526 } 1527 1528 buffer->raw = mTimedSilenceBuffer; 1529 size_t framesRequested = buffer->frameCount; 1530 buffer->frameCount = min(numFrames, framesRequested); 1531 1532 mTimedAudioOutputOnTime = false; 1533} 1534 1535// AudioBufferProvider interface 1536void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer( 1537 AudioBufferProvider::Buffer* buffer) { 1538 1539 Mutex::Autolock _l(mTimedBufferQueueLock); 1540 1541 // If the buffer which was just released is part of the buffer at the head 1542 // of the queue, be sure to update the amt of the buffer which has been 1543 // consumed. If the buffer being returned is not part of the head of the 1544 // queue, its either because the buffer is part of the silence buffer, or 1545 // because the head of the timed queue was trimmed after the mixer called 1546 // getNextBuffer but before the mixer called releaseBuffer. 1547 if (buffer->raw == mTimedSilenceBuffer) { 1548 ALOG_ASSERT(!mQueueHeadInFlight, 1549 "Queue head in flight during release of silence buffer!"); 1550 goto done; 1551 } 1552 1553 ALOG_ASSERT(mQueueHeadInFlight, 1554 "TimedTrack::releaseBuffer of non-silence buffer, but no queue" 1555 " head in flight."); 1556 1557 if (mTimedBufferQueue.size()) { 1558 TimedBuffer& head = mTimedBufferQueue.editItemAt(0); 1559 1560 void* start = head.buffer()->pointer(); 1561 void* end = reinterpret_cast<void*>( 1562 reinterpret_cast<uint8_t*>(head.buffer()->pointer()) 1563 + head.buffer()->size()); 1564 1565 ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end), 1566 "released buffer not within the head of the timed buffer" 1567 " queue; qHead = [%p, %p], released buffer = %p", 1568 start, end, buffer->raw); 1569 1570 head.setPosition(head.position() + 1571 (buffer->frameCount * mFrameSize)); 1572 mQueueHeadInFlight = false; 1573 1574 ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount, 1575 "Bad bookkeeping during releaseBuffer! Should have at" 1576 " least %u queued frames, but we think we have only %u", 1577 buffer->frameCount, mFramesPendingInQueue); 1578 1579 mFramesPendingInQueue -= buffer->frameCount; 1580 1581 if ((static_cast<size_t>(head.position()) >= head.buffer()->size()) 1582 || mTrimQueueHeadOnRelease) { 1583 trimTimedBufferQueueHead_l("releaseBuffer"); 1584 mTrimQueueHeadOnRelease = false; 1585 } 1586 } else { 1587 LOG_ALWAYS_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no" 1588 " buffers in the timed buffer queue"); 1589 } 1590 1591done: 1592 buffer->raw = 0; 1593 buffer->frameCount = 0; 1594} 1595 1596size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const { 1597 Mutex::Autolock _l(mTimedBufferQueueLock); 1598 return mFramesPendingInQueue; 1599} 1600 1601AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer() 1602 : mPTS(0), mPosition(0) {} 1603 1604AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer( 1605 const sp<IMemory>& buffer, int64_t pts) 1606 : mBuffer(buffer), mPTS(pts), mPosition(0) {} 1607 1608 1609// ---------------------------------------------------------------------------- 1610 1611AudioFlinger::PlaybackThread::OutputTrack::OutputTrack( 1612 PlaybackThread *playbackThread, 1613 DuplicatingThread *sourceThread, 1614 uint32_t sampleRate, 1615 audio_format_t format, 1616 audio_channel_mask_t channelMask, 1617 size_t frameCount, 1618 int uid) 1619 : Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount, 1620 NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT), 1621 mActive(false), mSourceThread(sourceThread), mClientProxy(NULL) 1622{ 1623 1624 if (mCblk != NULL) { 1625 mOutBuffer.frameCount = 0; 1626 playbackThread->mTracks.add(this); 1627 ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, " 1628 "frameCount %u, mChannelMask 0x%08x", 1629 mCblk, mBuffer, 1630 frameCount, mChannelMask); 1631 // since client and server are in the same process, 1632 // the buffer has the same virtual address on both sides 1633 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize); 1634 mClientProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY); 1635 mClientProxy->setSendLevel(0.0); 1636 mClientProxy->setSampleRate(sampleRate); 1637 mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize, 1638 true /*clientInServer*/); 1639 } else { 1640 ALOGW("Error creating output track on thread %p", playbackThread); 1641 } 1642} 1643 1644AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack() 1645{ 1646 clearBufferQueue(); 1647 delete mClientProxy; 1648 // superclass destructor will now delete the server proxy and shared memory both refer to 1649} 1650 1651status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event, 1652 int triggerSession) 1653{ 1654 status_t status = Track::start(event, triggerSession); 1655 if (status != NO_ERROR) { 1656 return status; 1657 } 1658 1659 mActive = true; 1660 mRetryCount = 127; 1661 return status; 1662} 1663 1664void AudioFlinger::PlaybackThread::OutputTrack::stop() 1665{ 1666 Track::stop(); 1667 clearBufferQueue(); 1668 mOutBuffer.frameCount = 0; 1669 mActive = false; 1670} 1671 1672bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames) 1673{ 1674 Buffer *pInBuffer; 1675 Buffer inBuffer; 1676 uint32_t channelCount = mChannelCount; 1677 bool outputBufferFull = false; 1678 inBuffer.frameCount = frames; 1679 inBuffer.i16 = data; 1680 1681 uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs(); 1682 1683 if (!mActive && frames != 0) { 1684 start(); 1685 sp<ThreadBase> thread = mThread.promote(); 1686 if (thread != 0) { 1687 MixerThread *mixerThread = (MixerThread *)thread.get(); 1688 if (mFrameCount > frames) { 1689 if (mBufferQueue.size() < kMaxOverFlowBuffers) { 1690 uint32_t startFrames = (mFrameCount - frames); 1691 pInBuffer = new Buffer; 1692 pInBuffer->mBuffer = new int16_t[startFrames * channelCount]; 1693 pInBuffer->frameCount = startFrames; 1694 pInBuffer->i16 = pInBuffer->mBuffer; 1695 memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t)); 1696 mBufferQueue.add(pInBuffer); 1697 } else { 1698 ALOGW("OutputTrack::write() %p no more buffers in queue", this); 1699 } 1700 } 1701 } 1702 } 1703 1704 while (waitTimeLeftMs) { 1705 // First write pending buffers, then new data 1706 if (mBufferQueue.size()) { 1707 pInBuffer = mBufferQueue.itemAt(0); 1708 } else { 1709 pInBuffer = &inBuffer; 1710 } 1711 1712 if (pInBuffer->frameCount == 0) { 1713 break; 1714 } 1715 1716 if (mOutBuffer.frameCount == 0) { 1717 mOutBuffer.frameCount = pInBuffer->frameCount; 1718 nsecs_t startTime = systemTime(); 1719 status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs); 1720 if (status != NO_ERROR) { 1721 ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this, 1722 mThread.unsafe_get(), status); 1723 outputBufferFull = true; 1724 break; 1725 } 1726 uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime); 1727 if (waitTimeLeftMs >= waitTimeMs) { 1728 waitTimeLeftMs -= waitTimeMs; 1729 } else { 1730 waitTimeLeftMs = 0; 1731 } 1732 } 1733 1734 uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount : 1735 pInBuffer->frameCount; 1736 memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t)); 1737 Proxy::Buffer buf; 1738 buf.mFrameCount = outFrames; 1739 buf.mRaw = NULL; 1740 mClientProxy->releaseBuffer(&buf); 1741 pInBuffer->frameCount -= outFrames; 1742 pInBuffer->i16 += outFrames * channelCount; 1743 mOutBuffer.frameCount -= outFrames; 1744 mOutBuffer.i16 += outFrames * channelCount; 1745 1746 if (pInBuffer->frameCount == 0) { 1747 if (mBufferQueue.size()) { 1748 mBufferQueue.removeAt(0); 1749 delete [] pInBuffer->mBuffer; 1750 delete pInBuffer; 1751 ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this, 1752 mThread.unsafe_get(), mBufferQueue.size()); 1753 } else { 1754 break; 1755 } 1756 } 1757 } 1758 1759 // If we could not write all frames, allocate a buffer and queue it for next time. 1760 if (inBuffer.frameCount) { 1761 sp<ThreadBase> thread = mThread.promote(); 1762 if (thread != 0 && !thread->standby()) { 1763 if (mBufferQueue.size() < kMaxOverFlowBuffers) { 1764 pInBuffer = new Buffer; 1765 pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount]; 1766 pInBuffer->frameCount = inBuffer.frameCount; 1767 pInBuffer->i16 = pInBuffer->mBuffer; 1768 memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount * 1769 sizeof(int16_t)); 1770 mBufferQueue.add(pInBuffer); 1771 ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this, 1772 mThread.unsafe_get(), mBufferQueue.size()); 1773 } else { 1774 ALOGW("OutputTrack::write() %p thread %p no more overflow buffers", 1775 mThread.unsafe_get(), this); 1776 } 1777 } 1778 } 1779 1780 // Calling write() with a 0 length buffer, means that no more data will be written: 1781 // If no more buffers are pending, fill output track buffer to make sure it is started 1782 // by output mixer. 1783 if (frames == 0 && mBufferQueue.size() == 0) { 1784 // FIXME borken, replace by getting framesReady() from proxy 1785 size_t user = 0; // was mCblk->user 1786 if (user < mFrameCount) { 1787 frames = mFrameCount - user; 1788 pInBuffer = new Buffer; 1789 pInBuffer->mBuffer = new int16_t[frames * channelCount]; 1790 pInBuffer->frameCount = frames; 1791 pInBuffer->i16 = pInBuffer->mBuffer; 1792 memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t)); 1793 mBufferQueue.add(pInBuffer); 1794 } else if (mActive) { 1795 stop(); 1796 } 1797 } 1798 1799 return outputBufferFull; 1800} 1801 1802status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer( 1803 AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs) 1804{ 1805 ClientProxy::Buffer buf; 1806 buf.mFrameCount = buffer->frameCount; 1807 struct timespec timeout; 1808 timeout.tv_sec = waitTimeMs / 1000; 1809 timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000; 1810 status_t status = mClientProxy->obtainBuffer(&buf, &timeout); 1811 buffer->frameCount = buf.mFrameCount; 1812 buffer->raw = buf.mRaw; 1813 return status; 1814} 1815 1816void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue() 1817{ 1818 size_t size = mBufferQueue.size(); 1819 1820 for (size_t i = 0; i < size; i++) { 1821 Buffer *pBuffer = mBufferQueue.itemAt(i); 1822 delete [] pBuffer->mBuffer; 1823 delete pBuffer; 1824 } 1825 mBufferQueue.clear(); 1826} 1827 1828 1829// ---------------------------------------------------------------------------- 1830// Record 1831// ---------------------------------------------------------------------------- 1832 1833AudioFlinger::RecordHandle::RecordHandle( 1834 const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack) 1835 : BnAudioRecord(), 1836 mRecordTrack(recordTrack) 1837{ 1838} 1839 1840AudioFlinger::RecordHandle::~RecordHandle() { 1841 stop_nonvirtual(); 1842 mRecordTrack->destroy(); 1843} 1844 1845status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event, 1846 int triggerSession) { 1847 ALOGV("RecordHandle::start()"); 1848 return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession); 1849} 1850 1851void AudioFlinger::RecordHandle::stop() { 1852 stop_nonvirtual(); 1853} 1854 1855void AudioFlinger::RecordHandle::stop_nonvirtual() { 1856 ALOGV("RecordHandle::stop()"); 1857 mRecordTrack->stop(); 1858} 1859 1860status_t AudioFlinger::RecordHandle::onTransact( 1861 uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags) 1862{ 1863 return BnAudioRecord::onTransact(code, data, reply, flags); 1864} 1865 1866// ---------------------------------------------------------------------------- 1867 1868// RecordTrack constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held 1869AudioFlinger::RecordThread::RecordTrack::RecordTrack( 1870 RecordThread *thread, 1871 const sp<Client>& client, 1872 uint32_t sampleRate, 1873 audio_format_t format, 1874 audio_channel_mask_t channelMask, 1875 size_t frameCount, 1876 int sessionId, 1877 int uid, 1878 IAudioFlinger::track_flags_t flags) 1879 : TrackBase(thread, client, sampleRate, format, 1880 channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, 1881 flags, false /*isOut*/, 1882 flags & IAudioFlinger::TRACK_FAST ? ALLOC_PIPE : ALLOC_CBLK), 1883 mOverflow(false), mResampler(NULL), mRsmpOutBuffer(NULL), mRsmpOutFrameCount(0), 1884 // See real initialization of mRsmpInFront at RecordThread::start() 1885 mRsmpInUnrel(0), mRsmpInFront(0), mFramesToDrop(0), mResamplerBufferProvider(NULL) 1886{ 1887 if (mCblk == NULL) { 1888 return; 1889 } 1890 1891 mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize); 1892 1893 uint32_t channelCount = audio_channel_count_from_in_mask(channelMask); 1894 // FIXME I don't understand either of the channel count checks 1895 if (thread->mSampleRate != sampleRate && thread->mChannelCount <= FCC_2 && 1896 channelCount <= FCC_2) { 1897 // sink SR 1898 mResampler = AudioResampler::create(AUDIO_FORMAT_PCM_16_BIT, 1899 thread->mChannelCount, sampleRate); 1900 // source SR 1901 mResampler->setSampleRate(thread->mSampleRate); 1902 mResampler->setVolume(AudioMixer::UNITY_GAIN_FLOAT, AudioMixer::UNITY_GAIN_FLOAT); 1903 mResamplerBufferProvider = new ResamplerBufferProvider(this); 1904 } 1905 1906 if (flags & IAudioFlinger::TRACK_FAST) { 1907 ALOG_ASSERT(thread->mFastTrackAvail); 1908 thread->mFastTrackAvail = false; 1909 } 1910} 1911 1912AudioFlinger::RecordThread::RecordTrack::~RecordTrack() 1913{ 1914 ALOGV("%s", __func__); 1915 delete mResampler; 1916 delete[] mRsmpOutBuffer; 1917 delete mResamplerBufferProvider; 1918} 1919 1920// AudioBufferProvider interface 1921status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer, 1922 int64_t pts __unused) 1923{ 1924 ServerProxy::Buffer buf; 1925 buf.mFrameCount = buffer->frameCount; 1926 status_t status = mServerProxy->obtainBuffer(&buf); 1927 buffer->frameCount = buf.mFrameCount; 1928 buffer->raw = buf.mRaw; 1929 if (buf.mFrameCount == 0) { 1930 // FIXME also wake futex so that overrun is noticed more quickly 1931 (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags); 1932 } 1933 return status; 1934} 1935 1936status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event, 1937 int triggerSession) 1938{ 1939 sp<ThreadBase> thread = mThread.promote(); 1940 if (thread != 0) { 1941 RecordThread *recordThread = (RecordThread *)thread.get(); 1942 return recordThread->start(this, event, triggerSession); 1943 } else { 1944 return BAD_VALUE; 1945 } 1946} 1947 1948void AudioFlinger::RecordThread::RecordTrack::stop() 1949{ 1950 sp<ThreadBase> thread = mThread.promote(); 1951 if (thread != 0) { 1952 RecordThread *recordThread = (RecordThread *)thread.get(); 1953 if (recordThread->stop(this)) { 1954 AudioSystem::stopInput(recordThread->id()); 1955 } 1956 } 1957} 1958 1959void AudioFlinger::RecordThread::RecordTrack::destroy() 1960{ 1961 // see comments at AudioFlinger::PlaybackThread::Track::destroy() 1962 sp<RecordTrack> keep(this); 1963 { 1964 sp<ThreadBase> thread = mThread.promote(); 1965 if (thread != 0) { 1966 if (mState == ACTIVE || mState == RESUMING) { 1967 AudioSystem::stopInput(thread->id()); 1968 } 1969 AudioSystem::releaseInput(thread->id()); 1970 Mutex::Autolock _l(thread->mLock); 1971 RecordThread *recordThread = (RecordThread *) thread.get(); 1972 recordThread->destroyTrack_l(this); 1973 } 1974 } 1975} 1976 1977void AudioFlinger::RecordThread::RecordTrack::invalidate() 1978{ 1979 // FIXME should use proxy, and needs work 1980 audio_track_cblk_t* cblk = mCblk; 1981 android_atomic_or(CBLK_INVALID, &cblk->mFlags); 1982 android_atomic_release_store(0x40000000, &cblk->mFutex); 1983 // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE 1984 (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX); 1985} 1986 1987 1988/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result) 1989{ 1990 result.append(" Active Client Fmt Chn mask Session S Server fCount SRate\n"); 1991} 1992 1993void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size, bool active) 1994{ 1995 snprintf(buffer, size, " %6s %6u %3u %08X %7u %1d %08X %6zu %5u\n", 1996 active ? "yes" : "no", 1997 (mClient == 0) ? getpid_cached : mClient->pid(), 1998 mFormat, 1999 mChannelMask, 2000 mSessionId, 2001 mState, 2002 mCblk->mServer, 2003 mFrameCount, 2004 mSampleRate); 2005 2006} 2007 2008void AudioFlinger::RecordThread::RecordTrack::handleSyncStartEvent(const sp<SyncEvent>& event) 2009{ 2010 if (event == mSyncStartEvent) { 2011 ssize_t framesToDrop = 0; 2012 sp<ThreadBase> threadBase = mThread.promote(); 2013 if (threadBase != 0) { 2014 // TODO: use actual buffer filling status instead of 2 buffers when info is available 2015 // from audio HAL 2016 framesToDrop = threadBase->mFrameCount * 2; 2017 } 2018 mFramesToDrop = framesToDrop; 2019 } 2020} 2021 2022void AudioFlinger::RecordThread::RecordTrack::clearSyncStartEvent() 2023{ 2024 if (mSyncStartEvent != 0) { 2025 mSyncStartEvent->cancel(); 2026 mSyncStartEvent.clear(); 2027 } 2028 mFramesToDrop = 0; 2029} 2030 2031}; // namespace android 2032