Tracks.cpp revision ee499291404a192b059f2e04c5afc65aa6cdd74c
1/*
2**
3** Copyright 2012, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19#define LOG_TAG "AudioFlinger"
20//#define LOG_NDEBUG 0
21
22#include "Configuration.h"
23#include <math.h>
24#include <sys/syscall.h>
25#include <utils/Log.h>
26
27#include <private/media/AudioTrackShared.h>
28
29#include <common_time/cc_helper.h>
30#include <common_time/local_clock.h>
31
32#include "AudioMixer.h"
33#include "AudioFlinger.h"
34#include "ServiceUtilities.h"
35
36#include <media/nbaio/Pipe.h>
37#include <media/nbaio/PipeReader.h>
38
39// ----------------------------------------------------------------------------
40
41// Note: the following macro is used for extremely verbose logging message.  In
42// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
43// 0; but one side effect of this is to turn all LOGV's as well.  Some messages
44// are so verbose that we want to suppress them even when we have ALOG_ASSERT
45// turned on.  Do not uncomment the #def below unless you really know what you
46// are doing and want to see all of the extremely verbose messages.
47//#define VERY_VERY_VERBOSE_LOGGING
48#ifdef VERY_VERY_VERBOSE_LOGGING
49#define ALOGVV ALOGV
50#else
51#define ALOGVV(a...) do { } while(0)
52#endif
53
54namespace android {
55
56// ----------------------------------------------------------------------------
57//      TrackBase
58// ----------------------------------------------------------------------------
59
60static volatile int32_t nextTrackId = 55;
61
62// TrackBase constructor must be called with AudioFlinger::mLock held
63AudioFlinger::ThreadBase::TrackBase::TrackBase(
64            ThreadBase *thread,
65            const sp<Client>& client,
66            uint32_t sampleRate,
67            audio_format_t format,
68            audio_channel_mask_t channelMask,
69            size_t frameCount,
70            const sp<IMemory>& sharedBuffer,
71            int sessionId,
72            int clientUid,
73            bool isOut)
74    :   RefBase(),
75        mThread(thread),
76        mClient(client),
77        mCblk(NULL),
78        // mBuffer
79        mState(IDLE),
80        mSampleRate(sampleRate),
81        mFormat(format),
82        mChannelMask(channelMask),
83        mChannelCount(popcount(channelMask)),
84        mFrameSize(audio_is_linear_pcm(format) ?
85                mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
86        mFrameCount(frameCount),
87        mSessionId(sessionId),
88        mIsOut(isOut),
89        mServerProxy(NULL),
90        mId(android_atomic_inc(&nextTrackId)),
91        mTerminated(false)
92{
93    // if the caller is us, trust the specified uid
94    if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
95        int newclientUid = IPCThreadState::self()->getCallingUid();
96        if (clientUid != -1 && clientUid != newclientUid) {
97            ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
98        }
99        clientUid = newclientUid;
100    }
101    // clientUid contains the uid of the app that is responsible for this track, so we can blame
102    // battery usage on it.
103    mUid = clientUid;
104
105    // client == 0 implies sharedBuffer == 0
106    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
107
108    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
109            sharedBuffer->size());
110
111    // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
112    size_t size = sizeof(audio_track_cblk_t);
113    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
114    if (sharedBuffer == 0) {
115        size += bufferSize;
116    }
117
118    if (client != 0) {
119        mCblkMemory = client->heap()->allocate(size);
120        if (mCblkMemory != 0) {
121            mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
122            // can't assume mCblk != NULL
123        } else {
124            ALOGE("not enough memory for AudioTrack size=%u", size);
125            client->heap()->dump("AudioTrack");
126            return;
127        }
128    } else {
129        // this syntax avoids calling the audio_track_cblk_t constructor twice
130        mCblk = (audio_track_cblk_t *) new uint8_t[size];
131        // assume mCblk != NULL
132    }
133
134    // construct the shared structure in-place.
135    if (mCblk != NULL) {
136        new(mCblk) audio_track_cblk_t();
137        // clear all buffers
138        mCblk->frameCount_ = frameCount;
139        if (sharedBuffer == 0) {
140            mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
141            memset(mBuffer, 0, bufferSize);
142        } else {
143            mBuffer = sharedBuffer->pointer();
144#if 0
145            mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
146#endif
147        }
148
149#ifdef TEE_SINK
150        if (mTeeSinkTrackEnabled) {
151            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
152            if (pipeFormat != Format_Invalid) {
153                Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
154                size_t numCounterOffers = 0;
155                const NBAIO_Format offers[1] = {pipeFormat};
156                ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
157                ALOG_ASSERT(index == 0);
158                PipeReader *pipeReader = new PipeReader(*pipe);
159                numCounterOffers = 0;
160                index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
161                ALOG_ASSERT(index == 0);
162                mTeeSink = pipe;
163                mTeeSource = pipeReader;
164            }
165        }
166#endif
167
168    }
169}
170
171AudioFlinger::ThreadBase::TrackBase::~TrackBase()
172{
173#ifdef TEE_SINK
174    dumpTee(-1, mTeeSource, mId);
175#endif
176    // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
177    delete mServerProxy;
178    if (mCblk != NULL) {
179        if (mClient == 0) {
180            delete mCblk;
181        } else {
182            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
183        }
184    }
185    mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
186    if (mClient != 0) {
187        // Client destructor must run with AudioFlinger mutex locked
188        Mutex::Autolock _l(mClient->audioFlinger()->mLock);
189        // If the client's reference count drops to zero, the associated destructor
190        // must run with AudioFlinger lock held. Thus the explicit clear() rather than
191        // relying on the automatic clear() at end of scope.
192        mClient.clear();
193    }
194}
195
196// AudioBufferProvider interface
197// getNextBuffer() = 0;
198// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
199void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
200{
201#ifdef TEE_SINK
202    if (mTeeSink != 0) {
203        (void) mTeeSink->write(buffer->raw, buffer->frameCount);
204    }
205#endif
206
207    ServerProxy::Buffer buf;
208    buf.mFrameCount = buffer->frameCount;
209    buf.mRaw = buffer->raw;
210    buffer->frameCount = 0;
211    buffer->raw = NULL;
212    mServerProxy->releaseBuffer(&buf);
213}
214
215status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
216{
217    mSyncEvents.add(event);
218    return NO_ERROR;
219}
220
221// ----------------------------------------------------------------------------
222//      Playback
223// ----------------------------------------------------------------------------
224
225AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
226    : BnAudioTrack(),
227      mTrack(track)
228{
229}
230
231AudioFlinger::TrackHandle::~TrackHandle() {
232    // just stop the track on deletion, associated resources
233    // will be freed from the main thread once all pending buffers have
234    // been played. Unless it's not in the active track list, in which
235    // case we free everything now...
236    mTrack->destroy();
237}
238
239sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
240    return mTrack->getCblk();
241}
242
243status_t AudioFlinger::TrackHandle::start() {
244    return mTrack->start();
245}
246
247void AudioFlinger::TrackHandle::stop() {
248    mTrack->stop();
249}
250
251void AudioFlinger::TrackHandle::flush() {
252    mTrack->flush();
253}
254
255void AudioFlinger::TrackHandle::pause() {
256    mTrack->pause();
257}
258
259status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
260{
261    return mTrack->attachAuxEffect(EffectId);
262}
263
264status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
265                                                         sp<IMemory>* buffer) {
266    if (!mTrack->isTimedTrack())
267        return INVALID_OPERATION;
268
269    PlaybackThread::TimedTrack* tt =
270            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
271    return tt->allocateTimedBuffer(size, buffer);
272}
273
274status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
275                                                     int64_t pts) {
276    if (!mTrack->isTimedTrack())
277        return INVALID_OPERATION;
278
279    PlaybackThread::TimedTrack* tt =
280            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
281    return tt->queueTimedBuffer(buffer, pts);
282}
283
284status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
285    const LinearTransform& xform, int target) {
286
287    if (!mTrack->isTimedTrack())
288        return INVALID_OPERATION;
289
290    PlaybackThread::TimedTrack* tt =
291            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
292    return tt->setMediaTimeTransform(
293        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
294}
295
296status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
297    return mTrack->setParameters(keyValuePairs);
298}
299
300status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp)
301{
302    return mTrack->getTimestamp(timestamp);
303}
304
305
306void AudioFlinger::TrackHandle::signal()
307{
308    return mTrack->signal();
309}
310
311status_t AudioFlinger::TrackHandle::onTransact(
312    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
313{
314    return BnAudioTrack::onTransact(code, data, reply, flags);
315}
316
317// ----------------------------------------------------------------------------
318
319// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
320AudioFlinger::PlaybackThread::Track::Track(
321            PlaybackThread *thread,
322            const sp<Client>& client,
323            audio_stream_type_t streamType,
324            uint32_t sampleRate,
325            audio_format_t format,
326            audio_channel_mask_t channelMask,
327            size_t frameCount,
328            const sp<IMemory>& sharedBuffer,
329            int sessionId,
330            int uid,
331            IAudioFlinger::track_flags_t flags)
332    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
333            sessionId, uid, true /*isOut*/),
334    mFillingUpStatus(FS_INVALID),
335    // mRetryCount initialized later when needed
336    mSharedBuffer(sharedBuffer),
337    mStreamType(streamType),
338    mName(-1),  // see note below
339    mMainBuffer(thread->mixBuffer()),
340    mAuxBuffer(NULL),
341    mAuxEffectId(0), mHasVolumeController(false),
342    mPresentationCompleteFrames(0),
343    mFlags(flags),
344    mFastIndex(-1),
345    mCachedVolume(1.0),
346    mIsInvalid(false),
347    mAudioTrackServerProxy(NULL),
348    mResumeToStopping(false)
349{
350    if (mCblk != NULL) {
351        if (sharedBuffer == 0) {
352            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
353                    mFrameSize);
354        } else {
355            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
356                    mFrameSize);
357        }
358        mServerProxy = mAudioTrackServerProxy;
359        // to avoid leaking a track name, do not allocate one unless there is an mCblk
360        mName = thread->getTrackName_l(channelMask, sessionId);
361        if (mName < 0) {
362            ALOGE("no more track names available");
363            return;
364        }
365        // only allocate a fast track index if we were able to allocate a normal track name
366        if (flags & IAudioFlinger::TRACK_FAST) {
367            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
368            ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
369            int i = __builtin_ctz(thread->mFastTrackAvailMask);
370            ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
371            // FIXME This is too eager.  We allocate a fast track index before the
372            //       fast track becomes active.  Since fast tracks are a scarce resource,
373            //       this means we are potentially denying other more important fast tracks from
374            //       being created.  It would be better to allocate the index dynamically.
375            mFastIndex = i;
376            // Read the initial underruns because this field is never cleared by the fast mixer
377            mObservedUnderruns = thread->getFastTrackUnderruns(i);
378            thread->mFastTrackAvailMask &= ~(1 << i);
379        }
380    }
381    ALOGV("Track constructor name %d, calling pid %d", mName,
382            IPCThreadState::self()->getCallingPid());
383}
384
385AudioFlinger::PlaybackThread::Track::~Track()
386{
387    ALOGV("PlaybackThread::Track destructor");
388
389    // The destructor would clear mSharedBuffer,
390    // but it will not push the decremented reference count,
391    // leaving the client's IMemory dangling indefinitely.
392    // This prevents that leak.
393    if (mSharedBuffer != 0) {
394        mSharedBuffer.clear();
395        // flush the binder command buffer
396        IPCThreadState::self()->flushCommands();
397    }
398}
399
400void AudioFlinger::PlaybackThread::Track::destroy()
401{
402    // NOTE: destroyTrack_l() can remove a strong reference to this Track
403    // by removing it from mTracks vector, so there is a risk that this Tracks's
404    // destructor is called. As the destructor needs to lock mLock,
405    // we must acquire a strong reference on this Track before locking mLock
406    // here so that the destructor is called only when exiting this function.
407    // On the other hand, as long as Track::destroy() is only called by
408    // TrackHandle destructor, the TrackHandle still holds a strong ref on
409    // this Track with its member mTrack.
410    sp<Track> keep(this);
411    { // scope for mLock
412        sp<ThreadBase> thread = mThread.promote();
413        if (thread != 0) {
414            Mutex::Autolock _l(thread->mLock);
415            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
416            bool wasActive = playbackThread->destroyTrack_l(this);
417            if (!isOutputTrack() && !wasActive) {
418                AudioSystem::releaseOutput(thread->id());
419            }
420        }
421    }
422}
423
424/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
425{
426    result.append("   Name Client Type      Fmt Chn mask Session fCount S F SRate  "
427                  "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
428}
429
430void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
431{
432    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
433    if (isFastTrack()) {
434        sprintf(buffer, "   F %2d", mFastIndex);
435    } else {
436        sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
437    }
438    track_state state = mState;
439    char stateChar;
440    if (isTerminated()) {
441        stateChar = 'T';
442    } else {
443        switch (state) {
444        case IDLE:
445            stateChar = 'I';
446            break;
447        case STOPPING_1:
448            stateChar = 's';
449            break;
450        case STOPPING_2:
451            stateChar = '5';
452            break;
453        case STOPPED:
454            stateChar = 'S';
455            break;
456        case RESUMING:
457            stateChar = 'R';
458            break;
459        case ACTIVE:
460            stateChar = 'A';
461            break;
462        case PAUSING:
463            stateChar = 'p';
464            break;
465        case PAUSED:
466            stateChar = 'P';
467            break;
468        case FLUSHED:
469            stateChar = 'F';
470            break;
471        default:
472            stateChar = '?';
473            break;
474        }
475    }
476    char nowInUnderrun;
477    switch (mObservedUnderruns.mBitFields.mMostRecent) {
478    case UNDERRUN_FULL:
479        nowInUnderrun = ' ';
480        break;
481    case UNDERRUN_PARTIAL:
482        nowInUnderrun = '<';
483        break;
484    case UNDERRUN_EMPTY:
485        nowInUnderrun = '*';
486        break;
487    default:
488        nowInUnderrun = '?';
489        break;
490    }
491    snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6zu %1c %1d %5u %5.2g %5.2g  "
492                                 "%08X %p %p 0x%03X %9u%c\n",
493            (mClient == 0) ? getpid_cached : mClient->pid(),
494            mStreamType,
495            mFormat,
496            mChannelMask,
497            mSessionId,
498            mFrameCount,
499            stateChar,
500            mFillingUpStatus,
501            mAudioTrackServerProxy->getSampleRate(),
502            20.0 * log10((vlr & 0xFFFF) / 4096.0),
503            20.0 * log10((vlr >> 16) / 4096.0),
504            mCblk->mServer,
505            mMainBuffer,
506            mAuxBuffer,
507            mCblk->mFlags,
508            mAudioTrackServerProxy->getUnderrunFrames(),
509            nowInUnderrun);
510}
511
512uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
513    return mAudioTrackServerProxy->getSampleRate();
514}
515
516// AudioBufferProvider interface
517status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
518        AudioBufferProvider::Buffer* buffer, int64_t pts)
519{
520    ServerProxy::Buffer buf;
521    size_t desiredFrames = buffer->frameCount;
522    buf.mFrameCount = desiredFrames;
523    status_t status = mServerProxy->obtainBuffer(&buf);
524    buffer->frameCount = buf.mFrameCount;
525    buffer->raw = buf.mRaw;
526    if (buf.mFrameCount == 0) {
527        mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
528    }
529    return status;
530}
531
532// releaseBuffer() is not overridden
533
534// ExtendedAudioBufferProvider interface
535
536// Note that framesReady() takes a mutex on the control block using tryLock().
537// This could result in priority inversion if framesReady() is called by the normal mixer,
538// as the normal mixer thread runs at lower
539// priority than the client's callback thread:  there is a short window within framesReady()
540// during which the normal mixer could be preempted, and the client callback would block.
541// Another problem can occur if framesReady() is called by the fast mixer:
542// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
543// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
544size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
545    return mAudioTrackServerProxy->framesReady();
546}
547
548size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
549{
550    return mAudioTrackServerProxy->framesReleased();
551}
552
553// Don't call for fast tracks; the framesReady() could result in priority inversion
554bool AudioFlinger::PlaybackThread::Track::isReady() const {
555    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
556        return true;
557    }
558
559    if (framesReady() >= mFrameCount ||
560            (mCblk->mFlags & CBLK_FORCEREADY)) {
561        mFillingUpStatus = FS_FILLED;
562        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
563        return true;
564    }
565    return false;
566}
567
568status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
569                                                    int triggerSession)
570{
571    status_t status = NO_ERROR;
572    ALOGV("start(%d), calling pid %d session %d",
573            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
574
575    sp<ThreadBase> thread = mThread.promote();
576    if (thread != 0) {
577        if (isOffloaded()) {
578            Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
579            Mutex::Autolock _lth(thread->mLock);
580            sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
581            if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
582                    (ec != 0 && ec->isNonOffloadableEnabled())) {
583                invalidate();
584                return PERMISSION_DENIED;
585            }
586        }
587        Mutex::Autolock _lth(thread->mLock);
588        track_state state = mState;
589        // here the track could be either new, or restarted
590        // in both cases "unstop" the track
591
592        if (state == PAUSED) {
593            if (mResumeToStopping) {
594                // happened we need to resume to STOPPING_1
595                mState = TrackBase::STOPPING_1;
596                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
597            } else {
598                mState = TrackBase::RESUMING;
599                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
600            }
601        } else {
602            mState = TrackBase::ACTIVE;
603            ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
604        }
605
606        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
607        status = playbackThread->addTrack_l(this);
608        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
609            triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
610            //  restore previous state if start was rejected by policy manager
611            if (status == PERMISSION_DENIED) {
612                mState = state;
613            }
614        }
615        // track was already in the active list, not a problem
616        if (status == ALREADY_EXISTS) {
617            status = NO_ERROR;
618        } else {
619            // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
620            // It is usually unsafe to access the server proxy from a binder thread.
621            // But in this case we know the mixer thread (whether normal mixer or fast mixer)
622            // isn't looking at this track yet:  we still hold the normal mixer thread lock,
623            // and for fast tracks the track is not yet in the fast mixer thread's active set.
624            ServerProxy::Buffer buffer;
625            buffer.mFrameCount = 1;
626            (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
627        }
628    } else {
629        status = BAD_VALUE;
630    }
631    return status;
632}
633
634void AudioFlinger::PlaybackThread::Track::stop()
635{
636    ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
637    sp<ThreadBase> thread = mThread.promote();
638    if (thread != 0) {
639        Mutex::Autolock _l(thread->mLock);
640        track_state state = mState;
641        if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
642            // If the track is not active (PAUSED and buffers full), flush buffers
643            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
644            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
645                reset();
646                mState = STOPPED;
647            } else if (!isFastTrack() && !isOffloaded()) {
648                mState = STOPPED;
649            } else {
650                // For fast tracks prepareTracks_l() will set state to STOPPING_2
651                // presentation is complete
652                // For an offloaded track this starts a drain and state will
653                // move to STOPPING_2 when drain completes and then STOPPED
654                mState = STOPPING_1;
655            }
656            ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
657                    playbackThread);
658        }
659    }
660}
661
662void AudioFlinger::PlaybackThread::Track::pause()
663{
664    ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
665    sp<ThreadBase> thread = mThread.promote();
666    if (thread != 0) {
667        Mutex::Autolock _l(thread->mLock);
668        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
669        switch (mState) {
670        case STOPPING_1:
671        case STOPPING_2:
672            if (!isOffloaded()) {
673                /* nothing to do if track is not offloaded */
674                break;
675            }
676
677            // Offloaded track was draining, we need to carry on draining when resumed
678            mResumeToStopping = true;
679            // fall through...
680        case ACTIVE:
681        case RESUMING:
682            mState = PAUSING;
683            ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
684            playbackThread->broadcast_l();
685            break;
686
687        default:
688            break;
689        }
690    }
691}
692
693void AudioFlinger::PlaybackThread::Track::flush()
694{
695    ALOGV("flush(%d)", mName);
696    sp<ThreadBase> thread = mThread.promote();
697    if (thread != 0) {
698        Mutex::Autolock _l(thread->mLock);
699        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
700
701        if (isOffloaded()) {
702            // If offloaded we allow flush during any state except terminated
703            // and keep the track active to avoid problems if user is seeking
704            // rapidly and underlying hardware has a significant delay handling
705            // a pause
706            if (isTerminated()) {
707                return;
708            }
709
710            ALOGV("flush: offload flush");
711            reset();
712
713            if (mState == STOPPING_1 || mState == STOPPING_2) {
714                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
715                mState = ACTIVE;
716            }
717
718            if (mState == ACTIVE) {
719                ALOGV("flush called in active state, resetting buffer time out retry count");
720                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
721            }
722
723            mResumeToStopping = false;
724        } else {
725            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
726                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
727                return;
728            }
729            // No point remaining in PAUSED state after a flush => go to
730            // FLUSHED state
731            mState = FLUSHED;
732            // do not reset the track if it is still in the process of being stopped or paused.
733            // this will be done by prepareTracks_l() when the track is stopped.
734            // prepareTracks_l() will see mState == FLUSHED, then
735            // remove from active track list, reset(), and trigger presentation complete
736            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
737                reset();
738            }
739        }
740        // Prevent flush being lost if the track is flushed and then resumed
741        // before mixer thread can run. This is important when offloading
742        // because the hardware buffer could hold a large amount of audio
743        playbackThread->flushOutput_l();
744        playbackThread->broadcast_l();
745    }
746}
747
748void AudioFlinger::PlaybackThread::Track::reset()
749{
750    // Do not reset twice to avoid discarding data written just after a flush and before
751    // the audioflinger thread detects the track is stopped.
752    if (!mResetDone) {
753        // Force underrun condition to avoid false underrun callback until first data is
754        // written to buffer
755        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
756        mFillingUpStatus = FS_FILLING;
757        mResetDone = true;
758        if (mState == FLUSHED) {
759            mState = IDLE;
760        }
761    }
762}
763
764status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
765{
766    sp<ThreadBase> thread = mThread.promote();
767    if (thread == 0) {
768        ALOGE("thread is dead");
769        return FAILED_TRANSACTION;
770    } else if ((thread->type() == ThreadBase::DIRECT) ||
771                    (thread->type() == ThreadBase::OFFLOAD)) {
772        return thread->setParameters(keyValuePairs);
773    } else {
774        return PERMISSION_DENIED;
775    }
776}
777
778status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
779{
780    // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
781    if (isFastTrack()) {
782        return INVALID_OPERATION;
783    }
784    sp<ThreadBase> thread = mThread.promote();
785    if (thread == 0) {
786        return INVALID_OPERATION;
787    }
788    Mutex::Autolock _l(thread->mLock);
789    PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
790    if (!isOffloaded()) {
791        if (!playbackThread->mLatchQValid) {
792            return INVALID_OPERATION;
793        }
794        uint32_t unpresentedFrames =
795                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
796                playbackThread->mSampleRate;
797        uint32_t framesWritten = mAudioTrackServerProxy->framesReleased();
798        if (framesWritten < unpresentedFrames) {
799            return INVALID_OPERATION;
800        }
801        timestamp.mPosition = framesWritten - unpresentedFrames;
802        timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
803        return NO_ERROR;
804    }
805
806    return playbackThread->getTimestamp_l(timestamp);
807}
808
809status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
810{
811    status_t status = DEAD_OBJECT;
812    sp<ThreadBase> thread = mThread.promote();
813    if (thread != 0) {
814        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
815        sp<AudioFlinger> af = mClient->audioFlinger();
816
817        Mutex::Autolock _l(af->mLock);
818
819        sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
820
821        if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
822            Mutex::Autolock _dl(playbackThread->mLock);
823            Mutex::Autolock _sl(srcThread->mLock);
824            sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
825            if (chain == 0) {
826                return INVALID_OPERATION;
827            }
828
829            sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
830            if (effect == 0) {
831                return INVALID_OPERATION;
832            }
833            srcThread->removeEffect_l(effect);
834            status = playbackThread->addEffect_l(effect);
835            if (status != NO_ERROR) {
836                srcThread->addEffect_l(effect);
837                return INVALID_OPERATION;
838            }
839            // removeEffect_l() has stopped the effect if it was active so it must be restarted
840            if (effect->state() == EffectModule::ACTIVE ||
841                    effect->state() == EffectModule::STOPPING) {
842                effect->start();
843            }
844
845            sp<EffectChain> dstChain = effect->chain().promote();
846            if (dstChain == 0) {
847                srcThread->addEffect_l(effect);
848                return INVALID_OPERATION;
849            }
850            AudioSystem::unregisterEffect(effect->id());
851            AudioSystem::registerEffect(&effect->desc(),
852                                        srcThread->id(),
853                                        dstChain->strategy(),
854                                        AUDIO_SESSION_OUTPUT_MIX,
855                                        effect->id());
856            AudioSystem::setEffectEnabled(effect->id(), effect->isEnabled());
857        }
858        status = playbackThread->attachAuxEffect(this, EffectId);
859    }
860    return status;
861}
862
863void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
864{
865    mAuxEffectId = EffectId;
866    mAuxBuffer = buffer;
867}
868
869bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
870                                                         size_t audioHalFrames)
871{
872    // a track is considered presented when the total number of frames written to audio HAL
873    // corresponds to the number of frames written when presentationComplete() is called for the
874    // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
875    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
876    // to detect when all frames have been played. In this case framesWritten isn't
877    // useful because it doesn't always reflect whether there is data in the h/w
878    // buffers, particularly if a track has been paused and resumed during draining
879    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
880                      mPresentationCompleteFrames, framesWritten);
881    if (mPresentationCompleteFrames == 0) {
882        mPresentationCompleteFrames = framesWritten + audioHalFrames;
883        ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
884                  mPresentationCompleteFrames, audioHalFrames);
885    }
886
887    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
888        ALOGV("presentationComplete() session %d complete: framesWritten %d",
889                  mSessionId, framesWritten);
890        triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
891        mAudioTrackServerProxy->setStreamEndDone();
892        return true;
893    }
894    return false;
895}
896
897void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
898{
899    for (size_t i = 0; i < mSyncEvents.size(); i++) {
900        if (mSyncEvents[i]->type() == type) {
901            mSyncEvents[i]->trigger();
902            mSyncEvents.removeAt(i);
903            i--;
904        }
905    }
906}
907
908// implement VolumeBufferProvider interface
909
910uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
911{
912    // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
913    ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
914    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
915    uint32_t vl = vlr & 0xFFFF;
916    uint32_t vr = vlr >> 16;
917    // track volumes come from shared memory, so can't be trusted and must be clamped
918    if (vl > MAX_GAIN_INT) {
919        vl = MAX_GAIN_INT;
920    }
921    if (vr > MAX_GAIN_INT) {
922        vr = MAX_GAIN_INT;
923    }
924    // now apply the cached master volume and stream type volume;
925    // this is trusted but lacks any synchronization or barrier so may be stale
926    float v = mCachedVolume;
927    vl *= v;
928    vr *= v;
929    // re-combine into U4.16
930    vlr = (vr << 16) | (vl & 0xFFFF);
931    // FIXME look at mute, pause, and stop flags
932    return vlr;
933}
934
935status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
936{
937    if (isTerminated() || mState == PAUSED ||
938            ((framesReady() == 0) && ((mSharedBuffer != 0) ||
939                                      (mState == STOPPED)))) {
940        ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
941              mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
942        event->cancel();
943        return INVALID_OPERATION;
944    }
945    (void) TrackBase::setSyncEvent(event);
946    return NO_ERROR;
947}
948
949void AudioFlinger::PlaybackThread::Track::invalidate()
950{
951    // FIXME should use proxy, and needs work
952    audio_track_cblk_t* cblk = mCblk;
953    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
954    android_atomic_release_store(0x40000000, &cblk->mFutex);
955    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
956    (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
957    mIsInvalid = true;
958}
959
960void AudioFlinger::PlaybackThread::Track::signal()
961{
962    sp<ThreadBase> thread = mThread.promote();
963    if (thread != 0) {
964        PlaybackThread *t = (PlaybackThread *)thread.get();
965        Mutex::Autolock _l(t->mLock);
966        t->broadcast_l();
967    }
968}
969
970// ----------------------------------------------------------------------------
971
972sp<AudioFlinger::PlaybackThread::TimedTrack>
973AudioFlinger::PlaybackThread::TimedTrack::create(
974            PlaybackThread *thread,
975            const sp<Client>& client,
976            audio_stream_type_t streamType,
977            uint32_t sampleRate,
978            audio_format_t format,
979            audio_channel_mask_t channelMask,
980            size_t frameCount,
981            const sp<IMemory>& sharedBuffer,
982            int sessionId,
983            int uid) {
984    if (!client->reserveTimedTrack())
985        return 0;
986
987    return new TimedTrack(
988        thread, client, streamType, sampleRate, format, channelMask, frameCount,
989        sharedBuffer, sessionId, uid);
990}
991
992AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
993            PlaybackThread *thread,
994            const sp<Client>& client,
995            audio_stream_type_t streamType,
996            uint32_t sampleRate,
997            audio_format_t format,
998            audio_channel_mask_t channelMask,
999            size_t frameCount,
1000            const sp<IMemory>& sharedBuffer,
1001            int sessionId,
1002            int uid)
1003    : Track(thread, client, streamType, sampleRate, format, channelMask,
1004            frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED),
1005      mQueueHeadInFlight(false),
1006      mTrimQueueHeadOnRelease(false),
1007      mFramesPendingInQueue(0),
1008      mTimedSilenceBuffer(NULL),
1009      mTimedSilenceBufferSize(0),
1010      mTimedAudioOutputOnTime(false),
1011      mMediaTimeTransformValid(false)
1012{
1013    LocalClock lc;
1014    mLocalTimeFreq = lc.getLocalFreq();
1015
1016    mLocalTimeToSampleTransform.a_zero = 0;
1017    mLocalTimeToSampleTransform.b_zero = 0;
1018    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
1019    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
1020    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
1021                            &mLocalTimeToSampleTransform.a_to_b_denom);
1022
1023    mMediaTimeToSampleTransform.a_zero = 0;
1024    mMediaTimeToSampleTransform.b_zero = 0;
1025    mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
1026    mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
1027    LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
1028                            &mMediaTimeToSampleTransform.a_to_b_denom);
1029}
1030
1031AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
1032    mClient->releaseTimedTrack();
1033    delete [] mTimedSilenceBuffer;
1034}
1035
1036status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
1037    size_t size, sp<IMemory>* buffer) {
1038
1039    Mutex::Autolock _l(mTimedBufferQueueLock);
1040
1041    trimTimedBufferQueue_l();
1042
1043    // lazily initialize the shared memory heap for timed buffers
1044    if (mTimedMemoryDealer == NULL) {
1045        const int kTimedBufferHeapSize = 512 << 10;
1046
1047        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
1048                                              "AudioFlingerTimed");
1049        if (mTimedMemoryDealer == NULL)
1050            return NO_MEMORY;
1051    }
1052
1053    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
1054    if (newBuffer == NULL) {
1055        newBuffer = mTimedMemoryDealer->allocate(size);
1056        if (newBuffer == NULL)
1057            return NO_MEMORY;
1058    }
1059
1060    *buffer = newBuffer;
1061    return NO_ERROR;
1062}
1063
1064// caller must hold mTimedBufferQueueLock
1065void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
1066    int64_t mediaTimeNow;
1067    {
1068        Mutex::Autolock mttLock(mMediaTimeTransformLock);
1069        if (!mMediaTimeTransformValid)
1070            return;
1071
1072        int64_t targetTimeNow;
1073        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
1074            ? mCCHelper.getCommonTime(&targetTimeNow)
1075            : mCCHelper.getLocalTime(&targetTimeNow);
1076
1077        if (OK != res)
1078            return;
1079
1080        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
1081                                                    &mediaTimeNow)) {
1082            return;
1083        }
1084    }
1085
1086    size_t trimEnd;
1087    for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
1088        int64_t bufEnd;
1089
1090        if ((trimEnd + 1) < mTimedBufferQueue.size()) {
1091            // We have a next buffer.  Just use its PTS as the PTS of the frame
1092            // following the last frame in this buffer.  If the stream is sparse
1093            // (ie, there are deliberate gaps left in the stream which should be
1094            // filled with silence by the TimedAudioTrack), then this can result
1095            // in one extra buffer being left un-trimmed when it could have
1096            // been.  In general, this is not typical, and we would rather
1097            // optimized away the TS calculation below for the more common case
1098            // where PTSes are contiguous.
1099            bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
1100        } else {
1101            // We have no next buffer.  Compute the PTS of the frame following
1102            // the last frame in this buffer by computing the duration of of
1103            // this frame in media time units and adding it to the PTS of the
1104            // buffer.
1105            int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
1106                               / mFrameSize;
1107
1108            if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
1109                                                                &bufEnd)) {
1110                ALOGE("Failed to convert frame count of %lld to media time"
1111                      " duration" " (scale factor %d/%u) in %s",
1112                      frameCount,
1113                      mMediaTimeToSampleTransform.a_to_b_numer,
1114                      mMediaTimeToSampleTransform.a_to_b_denom,
1115                      __PRETTY_FUNCTION__);
1116                break;
1117            }
1118            bufEnd += mTimedBufferQueue[trimEnd].pts();
1119        }
1120
1121        if (bufEnd > mediaTimeNow)
1122            break;
1123
1124        // Is the buffer we want to use in the middle of a mix operation right
1125        // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
1126        // from the mixer which should be coming back shortly.
1127        if (!trimEnd && mQueueHeadInFlight) {
1128            mTrimQueueHeadOnRelease = true;
1129        }
1130    }
1131
1132    size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1133    if (trimStart < trimEnd) {
1134        // Update the bookkeeping for framesReady()
1135        for (size_t i = trimStart; i < trimEnd; ++i) {
1136            updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1137        }
1138
1139        // Now actually remove the buffers from the queue.
1140        mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1141    }
1142}
1143
1144void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1145        const char* logTag) {
1146    ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1147                "%s called (reason \"%s\"), but timed buffer queue has no"
1148                " elements to trim.", __FUNCTION__, logTag);
1149
1150    updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1151    mTimedBufferQueue.removeAt(0);
1152}
1153
1154void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1155        const TimedBuffer& buf,
1156        const char* logTag) {
1157    uint32_t bufBytes        = buf.buffer()->size();
1158    uint32_t consumedAlready = buf.position();
1159
1160    ALOG_ASSERT(consumedAlready <= bufBytes,
1161                "Bad bookkeeping while updating frames pending.  Timed buffer is"
1162                " only %u bytes long, but claims to have consumed %u"
1163                " bytes.  (update reason: \"%s\")",
1164                bufBytes, consumedAlready, logTag);
1165
1166    uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1167    ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1168                "Bad bookkeeping while updating frames pending.  Should have at"
1169                " least %u queued frames, but we think we have only %u.  (update"
1170                " reason: \"%s\")",
1171                bufFrames, mFramesPendingInQueue, logTag);
1172
1173    mFramesPendingInQueue -= bufFrames;
1174}
1175
1176status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1177    const sp<IMemory>& buffer, int64_t pts) {
1178
1179    {
1180        Mutex::Autolock mttLock(mMediaTimeTransformLock);
1181        if (!mMediaTimeTransformValid)
1182            return INVALID_OPERATION;
1183    }
1184
1185    Mutex::Autolock _l(mTimedBufferQueueLock);
1186
1187    uint32_t bufFrames = buffer->size() / mFrameSize;
1188    mFramesPendingInQueue += bufFrames;
1189    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1190
1191    return NO_ERROR;
1192}
1193
1194status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1195    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1196
1197    ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1198           xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1199           target);
1200
1201    if (!(target == TimedAudioTrack::LOCAL_TIME ||
1202          target == TimedAudioTrack::COMMON_TIME)) {
1203        return BAD_VALUE;
1204    }
1205
1206    Mutex::Autolock lock(mMediaTimeTransformLock);
1207    mMediaTimeTransform = xform;
1208    mMediaTimeTransformTarget = target;
1209    mMediaTimeTransformValid = true;
1210
1211    return NO_ERROR;
1212}
1213
1214#define min(a, b) ((a) < (b) ? (a) : (b))
1215
1216// implementation of getNextBuffer for tracks whose buffers have timestamps
1217status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1218    AudioBufferProvider::Buffer* buffer, int64_t pts)
1219{
1220    if (pts == AudioBufferProvider::kInvalidPTS) {
1221        buffer->raw = NULL;
1222        buffer->frameCount = 0;
1223        mTimedAudioOutputOnTime = false;
1224        return INVALID_OPERATION;
1225    }
1226
1227    Mutex::Autolock _l(mTimedBufferQueueLock);
1228
1229    ALOG_ASSERT(!mQueueHeadInFlight,
1230                "getNextBuffer called without releaseBuffer!");
1231
1232    while (true) {
1233
1234        // if we have no timed buffers, then fail
1235        if (mTimedBufferQueue.isEmpty()) {
1236            buffer->raw = NULL;
1237            buffer->frameCount = 0;
1238            return NOT_ENOUGH_DATA;
1239        }
1240
1241        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1242
1243        // calculate the PTS of the head of the timed buffer queue expressed in
1244        // local time
1245        int64_t headLocalPTS;
1246        {
1247            Mutex::Autolock mttLock(mMediaTimeTransformLock);
1248
1249            ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1250
1251            if (mMediaTimeTransform.a_to_b_denom == 0) {
1252                // the transform represents a pause, so yield silence
1253                timedYieldSilence_l(buffer->frameCount, buffer);
1254                return NO_ERROR;
1255            }
1256
1257            int64_t transformedPTS;
1258            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1259                                                        &transformedPTS)) {
1260                // the transform failed.  this shouldn't happen, but if it does
1261                // then just drop this buffer
1262                ALOGW("timedGetNextBuffer transform failed");
1263                buffer->raw = NULL;
1264                buffer->frameCount = 0;
1265                trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1266                return NO_ERROR;
1267            }
1268
1269            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1270                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1271                                                          &headLocalPTS)) {
1272                    buffer->raw = NULL;
1273                    buffer->frameCount = 0;
1274                    return INVALID_OPERATION;
1275                }
1276            } else {
1277                headLocalPTS = transformedPTS;
1278            }
1279        }
1280
1281        uint32_t sr = sampleRate();
1282
1283        // adjust the head buffer's PTS to reflect the portion of the head buffer
1284        // that has already been consumed
1285        int64_t effectivePTS = headLocalPTS +
1286                ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
1287
1288        // Calculate the delta in samples between the head of the input buffer
1289        // queue and the start of the next output buffer that will be written.
1290        // If the transformation fails because of over or underflow, it means
1291        // that the sample's position in the output stream is so far out of
1292        // whack that it should just be dropped.
1293        int64_t sampleDelta;
1294        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1295            ALOGV("*** head buffer is too far from PTS: dropped buffer");
1296            trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1297                                       " mix");
1298            continue;
1299        }
1300        if (!mLocalTimeToSampleTransform.doForwardTransform(
1301                (effectivePTS - pts) << 32, &sampleDelta)) {
1302            ALOGV("*** too late during sample rate transform: dropped buffer");
1303            trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1304            continue;
1305        }
1306
1307        ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1308               " sampleDelta=[%d.%08x]",
1309               head.pts(), head.position(), pts,
1310               static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1311                   + (sampleDelta >> 32)),
1312               static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1313
1314        // if the delta between the ideal placement for the next input sample and
1315        // the current output position is within this threshold, then we will
1316        // concatenate the next input samples to the previous output
1317        const int64_t kSampleContinuityThreshold =
1318                (static_cast<int64_t>(sr) << 32) / 250;
1319
1320        // if this is the first buffer of audio that we're emitting from this track
1321        // then it should be almost exactly on time.
1322        const int64_t kSampleStartupThreshold = 1LL << 32;
1323
1324        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1325           (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1326            // the next input is close enough to being on time, so concatenate it
1327            // with the last output
1328            timedYieldSamples_l(buffer);
1329
1330            ALOGVV("*** on time: head.pos=%d frameCount=%u",
1331                    head.position(), buffer->frameCount);
1332            return NO_ERROR;
1333        }
1334
1335        // Looks like our output is not on time.  Reset our on timed status.
1336        // Next time we mix samples from our input queue, then should be within
1337        // the StartupThreshold.
1338        mTimedAudioOutputOnTime = false;
1339        if (sampleDelta > 0) {
1340            // the gap between the current output position and the proper start of
1341            // the next input sample is too big, so fill it with silence
1342            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1343
1344            timedYieldSilence_l(framesUntilNextInput, buffer);
1345            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1346            return NO_ERROR;
1347        } else {
1348            // the next input sample is late
1349            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1350            size_t onTimeSamplePosition =
1351                    head.position() + lateFrames * mFrameSize;
1352
1353            if (onTimeSamplePosition > head.buffer()->size()) {
1354                // all the remaining samples in the head are too late, so
1355                // drop it and move on
1356                ALOGV("*** too late: dropped buffer");
1357                trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1358                continue;
1359            } else {
1360                // skip over the late samples
1361                head.setPosition(onTimeSamplePosition);
1362
1363                // yield the available samples
1364                timedYieldSamples_l(buffer);
1365
1366                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1367                return NO_ERROR;
1368            }
1369        }
1370    }
1371}
1372
1373// Yield samples from the timed buffer queue head up to the given output
1374// buffer's capacity.
1375//
1376// Caller must hold mTimedBufferQueueLock
1377void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1378    AudioBufferProvider::Buffer* buffer) {
1379
1380    const TimedBuffer& head = mTimedBufferQueue[0];
1381
1382    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1383                   head.position());
1384
1385    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1386                                 mFrameSize);
1387    size_t framesRequested = buffer->frameCount;
1388    buffer->frameCount = min(framesLeftInHead, framesRequested);
1389
1390    mQueueHeadInFlight = true;
1391    mTimedAudioOutputOnTime = true;
1392}
1393
1394// Yield samples of silence up to the given output buffer's capacity
1395//
1396// Caller must hold mTimedBufferQueueLock
1397void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1398    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1399
1400    // lazily allocate a buffer filled with silence
1401    if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1402        delete [] mTimedSilenceBuffer;
1403        mTimedSilenceBufferSize = numFrames * mFrameSize;
1404        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1405        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1406    }
1407
1408    buffer->raw = mTimedSilenceBuffer;
1409    size_t framesRequested = buffer->frameCount;
1410    buffer->frameCount = min(numFrames, framesRequested);
1411
1412    mTimedAudioOutputOnTime = false;
1413}
1414
1415// AudioBufferProvider interface
1416void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1417    AudioBufferProvider::Buffer* buffer) {
1418
1419    Mutex::Autolock _l(mTimedBufferQueueLock);
1420
1421    // If the buffer which was just released is part of the buffer at the head
1422    // of the queue, be sure to update the amt of the buffer which has been
1423    // consumed.  If the buffer being returned is not part of the head of the
1424    // queue, its either because the buffer is part of the silence buffer, or
1425    // because the head of the timed queue was trimmed after the mixer called
1426    // getNextBuffer but before the mixer called releaseBuffer.
1427    if (buffer->raw == mTimedSilenceBuffer) {
1428        ALOG_ASSERT(!mQueueHeadInFlight,
1429                    "Queue head in flight during release of silence buffer!");
1430        goto done;
1431    }
1432
1433    ALOG_ASSERT(mQueueHeadInFlight,
1434                "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1435                " head in flight.");
1436
1437    if (mTimedBufferQueue.size()) {
1438        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1439
1440        void* start = head.buffer()->pointer();
1441        void* end   = reinterpret_cast<void*>(
1442                        reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1443                        + head.buffer()->size());
1444
1445        ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1446                    "released buffer not within the head of the timed buffer"
1447                    " queue; qHead = [%p, %p], released buffer = %p",
1448                    start, end, buffer->raw);
1449
1450        head.setPosition(head.position() +
1451                (buffer->frameCount * mFrameSize));
1452        mQueueHeadInFlight = false;
1453
1454        ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1455                    "Bad bookkeeping during releaseBuffer!  Should have at"
1456                    " least %u queued frames, but we think we have only %u",
1457                    buffer->frameCount, mFramesPendingInQueue);
1458
1459        mFramesPendingInQueue -= buffer->frameCount;
1460
1461        if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1462            || mTrimQueueHeadOnRelease) {
1463            trimTimedBufferQueueHead_l("releaseBuffer");
1464            mTrimQueueHeadOnRelease = false;
1465        }
1466    } else {
1467        LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1468                  " buffers in the timed buffer queue");
1469    }
1470
1471done:
1472    buffer->raw = 0;
1473    buffer->frameCount = 0;
1474}
1475
1476size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1477    Mutex::Autolock _l(mTimedBufferQueueLock);
1478    return mFramesPendingInQueue;
1479}
1480
1481AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1482        : mPTS(0), mPosition(0) {}
1483
1484AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1485    const sp<IMemory>& buffer, int64_t pts)
1486        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1487
1488
1489// ----------------------------------------------------------------------------
1490
1491AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1492            PlaybackThread *playbackThread,
1493            DuplicatingThread *sourceThread,
1494            uint32_t sampleRate,
1495            audio_format_t format,
1496            audio_channel_mask_t channelMask,
1497            size_t frameCount,
1498            int uid)
1499    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
1500                NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT),
1501    mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
1502{
1503
1504    if (mCblk != NULL) {
1505        mOutBuffer.frameCount = 0;
1506        playbackThread->mTracks.add(this);
1507        ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
1508                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
1509                mCblk, mBuffer,
1510                mCblk->frameCount_, mChannelMask);
1511        // since client and server are in the same process,
1512        // the buffer has the same virtual address on both sides
1513        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
1514        mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
1515        mClientProxy->setSendLevel(0.0);
1516        mClientProxy->setSampleRate(sampleRate);
1517        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1518                true /*clientInServer*/);
1519    } else {
1520        ALOGW("Error creating output track on thread %p", playbackThread);
1521    }
1522}
1523
1524AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1525{
1526    clearBufferQueue();
1527    delete mClientProxy;
1528    // superclass destructor will now delete the server proxy and shared memory both refer to
1529}
1530
1531status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1532                                                          int triggerSession)
1533{
1534    status_t status = Track::start(event, triggerSession);
1535    if (status != NO_ERROR) {
1536        return status;
1537    }
1538
1539    mActive = true;
1540    mRetryCount = 127;
1541    return status;
1542}
1543
1544void AudioFlinger::PlaybackThread::OutputTrack::stop()
1545{
1546    Track::stop();
1547    clearBufferQueue();
1548    mOutBuffer.frameCount = 0;
1549    mActive = false;
1550}
1551
1552bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1553{
1554    Buffer *pInBuffer;
1555    Buffer inBuffer;
1556    uint32_t channelCount = mChannelCount;
1557    bool outputBufferFull = false;
1558    inBuffer.frameCount = frames;
1559    inBuffer.i16 = data;
1560
1561    uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1562
1563    if (!mActive && frames != 0) {
1564        start();
1565        sp<ThreadBase> thread = mThread.promote();
1566        if (thread != 0) {
1567            MixerThread *mixerThread = (MixerThread *)thread.get();
1568            if (mFrameCount > frames) {
1569                if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1570                    uint32_t startFrames = (mFrameCount - frames);
1571                    pInBuffer = new Buffer;
1572                    pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1573                    pInBuffer->frameCount = startFrames;
1574                    pInBuffer->i16 = pInBuffer->mBuffer;
1575                    memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1576                    mBufferQueue.add(pInBuffer);
1577                } else {
1578                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
1579                }
1580            }
1581        }
1582    }
1583
1584    while (waitTimeLeftMs) {
1585        // First write pending buffers, then new data
1586        if (mBufferQueue.size()) {
1587            pInBuffer = mBufferQueue.itemAt(0);
1588        } else {
1589            pInBuffer = &inBuffer;
1590        }
1591
1592        if (pInBuffer->frameCount == 0) {
1593            break;
1594        }
1595
1596        if (mOutBuffer.frameCount == 0) {
1597            mOutBuffer.frameCount = pInBuffer->frameCount;
1598            nsecs_t startTime = systemTime();
1599            status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1600            if (status != NO_ERROR) {
1601                ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
1602                        mThread.unsafe_get(), status);
1603                outputBufferFull = true;
1604                break;
1605            }
1606            uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1607            if (waitTimeLeftMs >= waitTimeMs) {
1608                waitTimeLeftMs -= waitTimeMs;
1609            } else {
1610                waitTimeLeftMs = 0;
1611            }
1612        }
1613
1614        uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1615                pInBuffer->frameCount;
1616        memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1617        Proxy::Buffer buf;
1618        buf.mFrameCount = outFrames;
1619        buf.mRaw = NULL;
1620        mClientProxy->releaseBuffer(&buf);
1621        pInBuffer->frameCount -= outFrames;
1622        pInBuffer->i16 += outFrames * channelCount;
1623        mOutBuffer.frameCount -= outFrames;
1624        mOutBuffer.i16 += outFrames * channelCount;
1625
1626        if (pInBuffer->frameCount == 0) {
1627            if (mBufferQueue.size()) {
1628                mBufferQueue.removeAt(0);
1629                delete [] pInBuffer->mBuffer;
1630                delete pInBuffer;
1631                ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1632                        mThread.unsafe_get(), mBufferQueue.size());
1633            } else {
1634                break;
1635            }
1636        }
1637    }
1638
1639    // If we could not write all frames, allocate a buffer and queue it for next time.
1640    if (inBuffer.frameCount) {
1641        sp<ThreadBase> thread = mThread.promote();
1642        if (thread != 0 && !thread->standby()) {
1643            if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1644                pInBuffer = new Buffer;
1645                pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1646                pInBuffer->frameCount = inBuffer.frameCount;
1647                pInBuffer->i16 = pInBuffer->mBuffer;
1648                memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1649                        sizeof(int16_t));
1650                mBufferQueue.add(pInBuffer);
1651                ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1652                        mThread.unsafe_get(), mBufferQueue.size());
1653            } else {
1654                ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1655                        mThread.unsafe_get(), this);
1656            }
1657        }
1658    }
1659
1660    // Calling write() with a 0 length buffer, means that no more data will be written:
1661    // If no more buffers are pending, fill output track buffer to make sure it is started
1662    // by output mixer.
1663    if (frames == 0 && mBufferQueue.size() == 0) {
1664        // FIXME borken, replace by getting framesReady() from proxy
1665        size_t user = 0;    // was mCblk->user
1666        if (user < mFrameCount) {
1667            frames = mFrameCount - user;
1668            pInBuffer = new Buffer;
1669            pInBuffer->mBuffer = new int16_t[frames * channelCount];
1670            pInBuffer->frameCount = frames;
1671            pInBuffer->i16 = pInBuffer->mBuffer;
1672            memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1673            mBufferQueue.add(pInBuffer);
1674        } else if (mActive) {
1675            stop();
1676        }
1677    }
1678
1679    return outputBufferFull;
1680}
1681
1682status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1683        AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1684{
1685    ClientProxy::Buffer buf;
1686    buf.mFrameCount = buffer->frameCount;
1687    struct timespec timeout;
1688    timeout.tv_sec = waitTimeMs / 1000;
1689    timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
1690    status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
1691    buffer->frameCount = buf.mFrameCount;
1692    buffer->raw = buf.mRaw;
1693    return status;
1694}
1695
1696void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1697{
1698    size_t size = mBufferQueue.size();
1699
1700    for (size_t i = 0; i < size; i++) {
1701        Buffer *pBuffer = mBufferQueue.itemAt(i);
1702        delete [] pBuffer->mBuffer;
1703        delete pBuffer;
1704    }
1705    mBufferQueue.clear();
1706}
1707
1708
1709// ----------------------------------------------------------------------------
1710//      Record
1711// ----------------------------------------------------------------------------
1712
1713AudioFlinger::RecordHandle::RecordHandle(
1714        const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1715    : BnAudioRecord(),
1716    mRecordTrack(recordTrack)
1717{
1718}
1719
1720AudioFlinger::RecordHandle::~RecordHandle() {
1721    stop_nonvirtual();
1722    mRecordTrack->destroy();
1723}
1724
1725sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
1726    return mRecordTrack->getCblk();
1727}
1728
1729status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1730        int triggerSession) {
1731    ALOGV("RecordHandle::start()");
1732    return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1733}
1734
1735void AudioFlinger::RecordHandle::stop() {
1736    stop_nonvirtual();
1737}
1738
1739void AudioFlinger::RecordHandle::stop_nonvirtual() {
1740    ALOGV("RecordHandle::stop()");
1741    mRecordTrack->stop();
1742}
1743
1744status_t AudioFlinger::RecordHandle::onTransact(
1745    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1746{
1747    return BnAudioRecord::onTransact(code, data, reply, flags);
1748}
1749
1750// ----------------------------------------------------------------------------
1751
1752// RecordTrack constructor must be called with AudioFlinger::mLock held
1753AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1754            RecordThread *thread,
1755            const sp<Client>& client,
1756            uint32_t sampleRate,
1757            audio_format_t format,
1758            audio_channel_mask_t channelMask,
1759            size_t frameCount,
1760            int sessionId,
1761            int uid)
1762    :   TrackBase(thread, client, sampleRate, format,
1763                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/),
1764        mOverflow(false)
1765{
1766    ALOGV("RecordTrack constructor");
1767    if (mCblk != NULL) {
1768        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
1769                mFrameSize);
1770        mServerProxy = mAudioRecordServerProxy;
1771    }
1772}
1773
1774AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
1775{
1776    ALOGV("%s", __func__);
1777}
1778
1779// AudioBufferProvider interface
1780status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
1781        int64_t pts)
1782{
1783    ServerProxy::Buffer buf;
1784    buf.mFrameCount = buffer->frameCount;
1785    status_t status = mServerProxy->obtainBuffer(&buf);
1786    buffer->frameCount = buf.mFrameCount;
1787    buffer->raw = buf.mRaw;
1788    if (buf.mFrameCount == 0) {
1789        // FIXME also wake futex so that overrun is noticed more quickly
1790        (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
1791    }
1792    return status;
1793}
1794
1795status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
1796                                                        int triggerSession)
1797{
1798    sp<ThreadBase> thread = mThread.promote();
1799    if (thread != 0) {
1800        RecordThread *recordThread = (RecordThread *)thread.get();
1801        return recordThread->start(this, event, triggerSession);
1802    } else {
1803        return BAD_VALUE;
1804    }
1805}
1806
1807void AudioFlinger::RecordThread::RecordTrack::stop()
1808{
1809    sp<ThreadBase> thread = mThread.promote();
1810    if (thread != 0) {
1811        RecordThread *recordThread = (RecordThread *)thread.get();
1812        if (recordThread->stop(this)) {
1813            AudioSystem::stopInput(recordThread->id());
1814        }
1815    }
1816}
1817
1818void AudioFlinger::RecordThread::RecordTrack::destroy()
1819{
1820    // see comments at AudioFlinger::PlaybackThread::Track::destroy()
1821    sp<RecordTrack> keep(this);
1822    {
1823        sp<ThreadBase> thread = mThread.promote();
1824        if (thread != 0) {
1825            if (mState == ACTIVE || mState == RESUMING) {
1826                AudioSystem::stopInput(thread->id());
1827            }
1828            AudioSystem::releaseInput(thread->id());
1829            Mutex::Autolock _l(thread->mLock);
1830            RecordThread *recordThread = (RecordThread *) thread.get();
1831            recordThread->destroyTrack_l(this);
1832        }
1833    }
1834}
1835
1836void AudioFlinger::RecordThread::RecordTrack::invalidate()
1837{
1838    // FIXME should use proxy, and needs work
1839    audio_track_cblk_t* cblk = mCblk;
1840    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1841    android_atomic_release_store(0x40000000, &cblk->mFutex);
1842    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1843    (void) syscall(__NR_futex, &cblk->mFutex, FUTEX_WAKE, INT_MAX);
1844}
1845
1846
1847/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
1848{
1849    result.append("Client Fmt Chn mask Session S   Server fCount\n");
1850}
1851
1852void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
1853{
1854    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6zu\n",
1855            (mClient == 0) ? getpid_cached : mClient->pid(),
1856            mFormat,
1857            mChannelMask,
1858            mSessionId,
1859            mState,
1860            mCblk->mServer,
1861            mFrameCount);
1862}
1863
1864}; // namespace android
1865