Tracks.cpp revision 35cc4f3127322ad3e3dd1e15e8ae29ff4b4a3af6
1/*
2**
3** Copyright 2012, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19#define LOG_TAG "AudioFlinger"
20//#define LOG_NDEBUG 0
21
22#include "Configuration.h"
23#include <math.h>
24#include <cutils/compiler.h>
25#include <utils/Log.h>
26
27#include <private/media/AudioTrackShared.h>
28
29#include <common_time/cc_helper.h>
30#include <common_time/local_clock.h>
31
32#include "AudioMixer.h"
33#include "AudioFlinger.h"
34#include "ServiceUtilities.h"
35
36#include <media/nbaio/Pipe.h>
37#include <media/nbaio/PipeReader.h>
38
39// ----------------------------------------------------------------------------
40
41// Note: the following macro is used for extremely verbose logging message.  In
42// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
43// 0; but one side effect of this is to turn all LOGV's as well.  Some messages
44// are so verbose that we want to suppress them even when we have ALOG_ASSERT
45// turned on.  Do not uncomment the #def below unless you really know what you
46// are doing and want to see all of the extremely verbose messages.
47//#define VERY_VERY_VERBOSE_LOGGING
48#ifdef VERY_VERY_VERBOSE_LOGGING
49#define ALOGVV ALOGV
50#else
51#define ALOGVV(a...) do { } while(0)
52#endif
53
54namespace android {
55
56// ----------------------------------------------------------------------------
57//      TrackBase
58// ----------------------------------------------------------------------------
59
60static volatile int32_t nextTrackId = 55;
61
62// TrackBase constructor must be called with AudioFlinger::mLock held
63AudioFlinger::ThreadBase::TrackBase::TrackBase(
64            ThreadBase *thread,
65            const sp<Client>& client,
66            uint32_t sampleRate,
67            audio_format_t format,
68            audio_channel_mask_t channelMask,
69            size_t frameCount,
70            const sp<IMemory>& sharedBuffer,
71            int sessionId,
72            bool isOut)
73    :   RefBase(),
74        mThread(thread),
75        mClient(client),
76        mCblk(NULL),
77        // mBuffer
78        mState(IDLE),
79        mSampleRate(sampleRate),
80        mFormat(format),
81        mChannelMask(channelMask),
82        mChannelCount(popcount(channelMask)),
83        mFrameSize(audio_is_linear_pcm(format) ?
84                mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
85        mFrameCount(frameCount),
86        mSessionId(sessionId),
87        mIsOut(isOut),
88        mServerProxy(NULL),
89        mId(android_atomic_inc(&nextTrackId)),
90        mTerminated(false)
91{
92    // client == 0 implies sharedBuffer == 0
93    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
94
95    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
96            sharedBuffer->size());
97
98    // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
99    size_t size = sizeof(audio_track_cblk_t);
100    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
101    if (sharedBuffer == 0) {
102        size += bufferSize;
103    }
104
105    if (client != 0) {
106        mCblkMemory = client->heap()->allocate(size);
107        if (mCblkMemory != 0) {
108            mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
109            // can't assume mCblk != NULL
110        } else {
111            ALOGE("not enough memory for AudioTrack size=%u", size);
112            client->heap()->dump("AudioTrack");
113            return;
114        }
115    } else {
116        // this syntax avoids calling the audio_track_cblk_t constructor twice
117        mCblk = (audio_track_cblk_t *) new uint8_t[size];
118        // assume mCblk != NULL
119    }
120
121    // construct the shared structure in-place.
122    if (mCblk != NULL) {
123        new(mCblk) audio_track_cblk_t();
124        // clear all buffers
125        mCblk->frameCount_ = frameCount;
126        if (sharedBuffer == 0) {
127            mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
128            memset(mBuffer, 0, bufferSize);
129        } else {
130            mBuffer = sharedBuffer->pointer();
131#if 0
132            mCblk->flags = CBLK_FORCEREADY;     // FIXME hack, need to fix the track ready logic
133#endif
134        }
135
136#ifdef TEE_SINK
137        if (mTeeSinkTrackEnabled) {
138            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
139            if (pipeFormat != Format_Invalid) {
140                Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
141                size_t numCounterOffers = 0;
142                const NBAIO_Format offers[1] = {pipeFormat};
143                ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
144                ALOG_ASSERT(index == 0);
145                PipeReader *pipeReader = new PipeReader(*pipe);
146                numCounterOffers = 0;
147                index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
148                ALOG_ASSERT(index == 0);
149                mTeeSink = pipe;
150                mTeeSource = pipeReader;
151            }
152        }
153#endif
154
155    }
156}
157
158AudioFlinger::ThreadBase::TrackBase::~TrackBase()
159{
160#ifdef TEE_SINK
161    dumpTee(-1, mTeeSource, mId);
162#endif
163    // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
164    delete mServerProxy;
165    if (mCblk != NULL) {
166        if (mClient == 0) {
167            delete mCblk;
168        } else {
169            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
170        }
171    }
172    mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
173    if (mClient != 0) {
174        // Client destructor must run with AudioFlinger mutex locked
175        Mutex::Autolock _l(mClient->audioFlinger()->mLock);
176        // If the client's reference count drops to zero, the associated destructor
177        // must run with AudioFlinger lock held. Thus the explicit clear() rather than
178        // relying on the automatic clear() at end of scope.
179        mClient.clear();
180    }
181}
182
183// AudioBufferProvider interface
184// getNextBuffer() = 0;
185// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
186void AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
187{
188#ifdef TEE_SINK
189    if (mTeeSink != 0) {
190        (void) mTeeSink->write(buffer->raw, buffer->frameCount);
191    }
192#endif
193
194    ServerProxy::Buffer buf;
195    buf.mFrameCount = buffer->frameCount;
196    buf.mRaw = buffer->raw;
197    buffer->frameCount = 0;
198    buffer->raw = NULL;
199    mServerProxy->releaseBuffer(&buf);
200}
201
202status_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
203{
204    mSyncEvents.add(event);
205    return NO_ERROR;
206}
207
208// ----------------------------------------------------------------------------
209//      Playback
210// ----------------------------------------------------------------------------
211
212AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
213    : BnAudioTrack(),
214      mTrack(track)
215{
216}
217
218AudioFlinger::TrackHandle::~TrackHandle() {
219    // just stop the track on deletion, associated resources
220    // will be freed from the main thread once all pending buffers have
221    // been played. Unless it's not in the active track list, in which
222    // case we free everything now...
223    mTrack->destroy();
224}
225
226sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
227    return mTrack->getCblk();
228}
229
230status_t AudioFlinger::TrackHandle::start() {
231    return mTrack->start();
232}
233
234void AudioFlinger::TrackHandle::stop() {
235    mTrack->stop();
236}
237
238void AudioFlinger::TrackHandle::flush() {
239    mTrack->flush();
240}
241
242void AudioFlinger::TrackHandle::pause() {
243    mTrack->pause();
244}
245
246status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
247    return mTrack->setParameters(keyValuePairs);
248}
249
250status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
251{
252    return mTrack->attachAuxEffect(EffectId);
253}
254
255status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
256                                                         sp<IMemory>* buffer) {
257    if (!mTrack->isTimedTrack())
258        return INVALID_OPERATION;
259
260    PlaybackThread::TimedTrack* tt =
261            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
262    return tt->allocateTimedBuffer(size, buffer);
263}
264
265status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
266                                                     int64_t pts) {
267    if (!mTrack->isTimedTrack())
268        return INVALID_OPERATION;
269
270    PlaybackThread::TimedTrack* tt =
271            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
272    return tt->queueTimedBuffer(buffer, pts);
273}
274
275status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
276    const LinearTransform& xform, int target) {
277
278    if (!mTrack->isTimedTrack())
279        return INVALID_OPERATION;
280
281    PlaybackThread::TimedTrack* tt =
282            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
283    return tt->setMediaTimeTransform(
284        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
285}
286
287status_t AudioFlinger::TrackHandle::onTransact(
288    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
289{
290    return BnAudioTrack::onTransact(code, data, reply, flags);
291}
292
293// ----------------------------------------------------------------------------
294
295// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
296AudioFlinger::PlaybackThread::Track::Track(
297            PlaybackThread *thread,
298            const sp<Client>& client,
299            audio_stream_type_t streamType,
300            uint32_t sampleRate,
301            audio_format_t format,
302            audio_channel_mask_t channelMask,
303            size_t frameCount,
304            const sp<IMemory>& sharedBuffer,
305            int sessionId,
306            IAudioFlinger::track_flags_t flags)
307    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
308            sessionId, true /*isOut*/),
309    mFillingUpStatus(FS_INVALID),
310    // mRetryCount initialized later when needed
311    mSharedBuffer(sharedBuffer),
312    mStreamType(streamType),
313    mName(-1),  // see note below
314    mMainBuffer(thread->mixBuffer()),
315    mAuxBuffer(NULL),
316    mAuxEffectId(0), mHasVolumeController(false),
317    mPresentationCompleteFrames(0),
318    mFlags(flags),
319    mFastIndex(-1),
320    mUnderrunCount(0),
321    mCachedVolume(1.0),
322    mIsInvalid(false),
323    mAudioTrackServerProxy(NULL),
324    mResumeToStopping(false)
325{
326    if (mCblk != NULL) {
327        if (sharedBuffer == 0) {
328            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
329                    mFrameSize);
330        } else {
331            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
332                    mFrameSize);
333        }
334        mServerProxy = mAudioTrackServerProxy;
335        // to avoid leaking a track name, do not allocate one unless there is an mCblk
336        mName = thread->getTrackName_l(channelMask, sessionId);
337        mCblk->mName = mName;
338        if (mName < 0) {
339            ALOGE("no more track names available");
340            return;
341        }
342        // only allocate a fast track index if we were able to allocate a normal track name
343        if (flags & IAudioFlinger::TRACK_FAST) {
344            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
345            ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
346            int i = __builtin_ctz(thread->mFastTrackAvailMask);
347            ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
348            // FIXME This is too eager.  We allocate a fast track index before the
349            //       fast track becomes active.  Since fast tracks are a scarce resource,
350            //       this means we are potentially denying other more important fast tracks from
351            //       being created.  It would be better to allocate the index dynamically.
352            mFastIndex = i;
353            mCblk->mName = i;
354            // Read the initial underruns because this field is never cleared by the fast mixer
355            mObservedUnderruns = thread->getFastTrackUnderruns(i);
356            thread->mFastTrackAvailMask &= ~(1 << i);
357        }
358    }
359    ALOGV("Track constructor name %d, calling pid %d", mName,
360            IPCThreadState::self()->getCallingPid());
361}
362
363AudioFlinger::PlaybackThread::Track::~Track()
364{
365    ALOGV("PlaybackThread::Track destructor");
366}
367
368void AudioFlinger::PlaybackThread::Track::destroy()
369{
370    // NOTE: destroyTrack_l() can remove a strong reference to this Track
371    // by removing it from mTracks vector, so there is a risk that this Tracks's
372    // destructor is called. As the destructor needs to lock mLock,
373    // we must acquire a strong reference on this Track before locking mLock
374    // here so that the destructor is called only when exiting this function.
375    // On the other hand, as long as Track::destroy() is only called by
376    // TrackHandle destructor, the TrackHandle still holds a strong ref on
377    // this Track with its member mTrack.
378    sp<Track> keep(this);
379    { // scope for mLock
380        sp<ThreadBase> thread = mThread.promote();
381        if (thread != 0) {
382            Mutex::Autolock _l(thread->mLock);
383            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
384            bool wasActive = playbackThread->destroyTrack_l(this);
385            if (!isOutputTrack() && !wasActive) {
386                AudioSystem::releaseOutput(thread->id());
387            }
388        }
389    }
390}
391
392/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
393{
394    result.append("   Name Client Type Fmt Chn mask Session fCount S F SRate  "
395                  "L dB  R dB    Server Main buf  Aux Buf Flags Underruns\n");
396}
397
398void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
399{
400    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
401    if (isFastTrack()) {
402        sprintf(buffer, "   F %2d", mFastIndex);
403    } else {
404        sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
405    }
406    track_state state = mState;
407    char stateChar;
408    if (isTerminated()) {
409        stateChar = 'T';
410    } else {
411        switch (state) {
412        case IDLE:
413            stateChar = 'I';
414            break;
415        case STOPPING_1:
416            stateChar = 's';
417            break;
418        case STOPPING_2:
419            stateChar = '5';
420            break;
421        case STOPPED:
422            stateChar = 'S';
423            break;
424        case RESUMING:
425            stateChar = 'R';
426            break;
427        case ACTIVE:
428            stateChar = 'A';
429            break;
430        case PAUSING:
431            stateChar = 'p';
432            break;
433        case PAUSED:
434            stateChar = 'P';
435            break;
436        case FLUSHED:
437            stateChar = 'F';
438            break;
439        default:
440            stateChar = '?';
441            break;
442        }
443    }
444    char nowInUnderrun;
445    switch (mObservedUnderruns.mBitFields.mMostRecent) {
446    case UNDERRUN_FULL:
447        nowInUnderrun = ' ';
448        break;
449    case UNDERRUN_PARTIAL:
450        nowInUnderrun = '<';
451        break;
452    case UNDERRUN_EMPTY:
453        nowInUnderrun = '*';
454        break;
455    default:
456        nowInUnderrun = '?';
457        break;
458    }
459    snprintf(&buffer[7], size-7, " %6u %4u %3u %08X %7u %6u %1c %1d %5u %5.2g %5.2g  "
460                                 "%08X %08X %08X 0x%03X %9u%c\n",
461            (mClient == 0) ? getpid_cached : mClient->pid(),
462            mStreamType,
463            mFormat,
464            mChannelMask,
465            mSessionId,
466            mFrameCount,
467            stateChar,
468            mFillingUpStatus,
469            mAudioTrackServerProxy->getSampleRate(),
470            20.0 * log10((vlr & 0xFFFF) / 4096.0),
471            20.0 * log10((vlr >> 16) / 4096.0),
472            mCblk->server,
473            (int)mMainBuffer,
474            (int)mAuxBuffer,
475            mCblk->flags,
476            mUnderrunCount,
477            nowInUnderrun);
478}
479
480uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
481    return mAudioTrackServerProxy->getSampleRate();
482}
483
484// AudioBufferProvider interface
485status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
486        AudioBufferProvider::Buffer* buffer, int64_t pts)
487{
488    ServerProxy::Buffer buf;
489    size_t desiredFrames = buffer->frameCount;
490    buf.mFrameCount = desiredFrames;
491    status_t status = mServerProxy->obtainBuffer(&buf);
492    buffer->frameCount = buf.mFrameCount;
493    buffer->raw = buf.mRaw;
494    if (buf.mFrameCount == 0) {
495        // only implemented so far for normal tracks, not fast tracks
496        mCblk->u.mStreaming.mUnderrunFrames += desiredFrames;
497        // FIXME also wake futex so that underrun is noticed more quickly
498        (void) android_atomic_or(CBLK_UNDERRUN, &mCblk->flags);
499    }
500    return status;
501}
502
503// Note that framesReady() takes a mutex on the control block using tryLock().
504// This could result in priority inversion if framesReady() is called by the normal mixer,
505// as the normal mixer thread runs at lower
506// priority than the client's callback thread:  there is a short window within framesReady()
507// during which the normal mixer could be preempted, and the client callback would block.
508// Another problem can occur if framesReady() is called by the fast mixer:
509// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
510// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
511size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
512    return mAudioTrackServerProxy->framesReady();
513}
514
515// Don't call for fast tracks; the framesReady() could result in priority inversion
516bool AudioFlinger::PlaybackThread::Track::isReady() const {
517    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
518        return true;
519    }
520
521    if (framesReady() >= mFrameCount ||
522            (mCblk->flags & CBLK_FORCEREADY)) {
523        mFillingUpStatus = FS_FILLED;
524        android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
525        return true;
526    }
527    return false;
528}
529
530status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
531                                                    int triggerSession)
532{
533    status_t status = NO_ERROR;
534    ALOGV("start(%d), calling pid %d session %d",
535            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
536
537    sp<ThreadBase> thread = mThread.promote();
538    if (thread != 0) {
539        Mutex::Autolock _l(thread->mLock);
540        track_state state = mState;
541        // here the track could be either new, or restarted
542        // in both cases "unstop" the track
543
544        if (state == PAUSED) {
545            if (mResumeToStopping) {
546                // happened we need to resume to STOPPING_1
547                mState = TrackBase::STOPPING_1;
548                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
549            } else {
550                mState = TrackBase::RESUMING;
551                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
552            }
553        } else {
554            mState = TrackBase::ACTIVE;
555            ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
556        }
557
558        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
559        status = playbackThread->addTrack_l(this);
560        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
561            triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
562            //  restore previous state if start was rejected by policy manager
563            if (status == PERMISSION_DENIED) {
564                mState = state;
565            }
566        }
567        // track was already in the active list, not a problem
568        if (status == ALREADY_EXISTS) {
569            status = NO_ERROR;
570        }
571    } else {
572        status = BAD_VALUE;
573    }
574    return status;
575}
576
577void AudioFlinger::PlaybackThread::Track::stop()
578{
579    ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
580    sp<ThreadBase> thread = mThread.promote();
581    if (thread != 0) {
582        Mutex::Autolock _l(thread->mLock);
583        track_state state = mState;
584        if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
585            // If the track is not active (PAUSED and buffers full), flush buffers
586            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
587            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
588                reset();
589                mState = STOPPED;
590            } else if (!isFastTrack() && !isOffloaded()) {
591                mState = STOPPED;
592            } else {
593                // For fast tracks prepareTracks_l() will set state to STOPPING_2
594                // presentation is complete
595                // For an offloaded track this starts a drain and state will
596                // move to STOPPING_2 when drain completes and then STOPPED
597                mState = STOPPING_1;
598            }
599            ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
600                    playbackThread);
601        }
602    }
603}
604
605void AudioFlinger::PlaybackThread::Track::pause()
606{
607    ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
608    sp<ThreadBase> thread = mThread.promote();
609    if (thread != 0) {
610        Mutex::Autolock _l(thread->mLock);
611        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
612        switch (mState) {
613        case STOPPING_1:
614        case STOPPING_2:
615            if (!isOffloaded()) {
616                /* nothing to do if track is not offloaded */
617                break;
618            }
619
620            // Offloaded track was draining, we need to carry on draining when resumed
621            mResumeToStopping = true;
622            // fall through...
623        case ACTIVE:
624        case RESUMING:
625            mState = PAUSING;
626            ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
627            playbackThread->signal_l();
628            break;
629
630        default:
631            break;
632        }
633    }
634}
635
636void AudioFlinger::PlaybackThread::Track::flush()
637{
638    ALOGV("flush(%d)", mName);
639    sp<ThreadBase> thread = mThread.promote();
640    if (thread != 0) {
641        Mutex::Autolock _l(thread->mLock);
642        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
643
644        if (isOffloaded()) {
645            // If offloaded we allow flush during any state except terminated
646            // and keep the track active to avoid problems if user is seeking
647            // rapidly and underlying hardware has a significant delay handling
648            // a pause
649            if (isTerminated()) {
650                return;
651            }
652
653            ALOGV("flush: offload flush");
654            reset();
655
656            if (mState == STOPPING_1 || mState == STOPPING_2) {
657                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
658                mState = ACTIVE;
659            }
660
661            if (mState == ACTIVE) {
662                ALOGV("flush called in active state, resetting buffer time out retry count");
663                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
664            }
665
666            mResumeToStopping = false;
667        } else {
668            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
669                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
670                return;
671            }
672            // No point remaining in PAUSED state after a flush => go to
673            // FLUSHED state
674            mState = FLUSHED;
675            // do not reset the track if it is still in the process of being stopped or paused.
676            // this will be done by prepareTracks_l() when the track is stopped.
677            // prepareTracks_l() will see mState == FLUSHED, then
678            // remove from active track list, reset(), and trigger presentation complete
679            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
680                reset();
681            }
682        }
683        // Prevent flush being lost if the track is flushed and then resumed
684        // before mixer thread can run. This is important when offloading
685        // because the hardware buffer could hold a large amount of audio
686        playbackThread->flushOutput_l();
687        playbackThread->signal_l();
688    }
689}
690
691void AudioFlinger::PlaybackThread::Track::reset()
692{
693    // Do not reset twice to avoid discarding data written just after a flush and before
694    // the audioflinger thread detects the track is stopped.
695    if (!mResetDone) {
696        // Force underrun condition to avoid false underrun callback until first data is
697        // written to buffer
698        android_atomic_and(~CBLK_FORCEREADY, &mCblk->flags);
699        mFillingUpStatus = FS_FILLING;
700        mResetDone = true;
701        if (mState == FLUSHED) {
702            mState = IDLE;
703        }
704    }
705}
706
707status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
708{
709    sp<ThreadBase> thread = mThread.promote();
710    if (thread == 0) {
711        ALOGE("thread is dead");
712        return FAILED_TRANSACTION;
713    } else if ((thread->type() == ThreadBase::DIRECT) ||
714                    (thread->type() == ThreadBase::OFFLOAD)) {
715        return thread->setParameters(keyValuePairs);
716    } else {
717        return PERMISSION_DENIED;
718    }
719}
720
721status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
722{
723    status_t status = DEAD_OBJECT;
724    sp<ThreadBase> thread = mThread.promote();
725    if (thread != 0) {
726        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
727        sp<AudioFlinger> af = mClient->audioFlinger();
728
729        Mutex::Autolock _l(af->mLock);
730
731        sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
732
733        if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
734            Mutex::Autolock _dl(playbackThread->mLock);
735            Mutex::Autolock _sl(srcThread->mLock);
736            sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
737            if (chain == 0) {
738                return INVALID_OPERATION;
739            }
740
741            sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
742            if (effect == 0) {
743                return INVALID_OPERATION;
744            }
745            srcThread->removeEffect_l(effect);
746            playbackThread->addEffect_l(effect);
747            // removeEffect_l() has stopped the effect if it was active so it must be restarted
748            if (effect->state() == EffectModule::ACTIVE ||
749                    effect->state() == EffectModule::STOPPING) {
750                effect->start();
751            }
752
753            sp<EffectChain> dstChain = effect->chain().promote();
754            if (dstChain == 0) {
755                srcThread->addEffect_l(effect);
756                return INVALID_OPERATION;
757            }
758            AudioSystem::unregisterEffect(effect->id());
759            AudioSystem::registerEffect(&effect->desc(),
760                                        srcThread->id(),
761                                        dstChain->strategy(),
762                                        AUDIO_SESSION_OUTPUT_MIX,
763                                        effect->id());
764        }
765        status = playbackThread->attachAuxEffect(this, EffectId);
766    }
767    return status;
768}
769
770void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
771{
772    mAuxEffectId = EffectId;
773    mAuxBuffer = buffer;
774}
775
776bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
777                                                         size_t audioHalFrames)
778{
779    // a track is considered presented when the total number of frames written to audio HAL
780    // corresponds to the number of frames written when presentationComplete() is called for the
781    // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
782    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
783    // to detect when all frames have been played. In this case framesWritten isn't
784    // useful because it doesn't always reflect whether there is data in the h/w
785    // buffers, particularly if a track has been paused and resumed during draining
786    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
787                      mPresentationCompleteFrames, framesWritten);
788    if (mPresentationCompleteFrames == 0) {
789        mPresentationCompleteFrames = framesWritten + audioHalFrames;
790        ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
791                  mPresentationCompleteFrames, audioHalFrames);
792    }
793
794    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
795        ALOGV("presentationComplete() session %d complete: framesWritten %d",
796                  mSessionId, framesWritten);
797        triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
798        mAudioTrackServerProxy->setStreamEndDone();
799        return true;
800    }
801    return false;
802}
803
804void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
805{
806    for (int i = 0; i < (int)mSyncEvents.size(); i++) {
807        if (mSyncEvents[i]->type() == type) {
808            mSyncEvents[i]->trigger();
809            mSyncEvents.removeAt(i);
810            i--;
811        }
812    }
813}
814
815// implement VolumeBufferProvider interface
816
817uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
818{
819    // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
820    ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
821    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
822    uint32_t vl = vlr & 0xFFFF;
823    uint32_t vr = vlr >> 16;
824    // track volumes come from shared memory, so can't be trusted and must be clamped
825    if (vl > MAX_GAIN_INT) {
826        vl = MAX_GAIN_INT;
827    }
828    if (vr > MAX_GAIN_INT) {
829        vr = MAX_GAIN_INT;
830    }
831    // now apply the cached master volume and stream type volume;
832    // this is trusted but lacks any synchronization or barrier so may be stale
833    float v = mCachedVolume;
834    vl *= v;
835    vr *= v;
836    // re-combine into U4.16
837    vlr = (vr << 16) | (vl & 0xFFFF);
838    // FIXME look at mute, pause, and stop flags
839    return vlr;
840}
841
842status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
843{
844    if (isTerminated() || mState == PAUSED ||
845            ((framesReady() == 0) && ((mSharedBuffer != 0) ||
846                                      (mState == STOPPED)))) {
847        ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
848              mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
849        event->cancel();
850        return INVALID_OPERATION;
851    }
852    (void) TrackBase::setSyncEvent(event);
853    return NO_ERROR;
854}
855
856void AudioFlinger::PlaybackThread::Track::invalidate()
857{
858    // FIXME should use proxy, and needs work
859    audio_track_cblk_t* cblk = mCblk;
860    android_atomic_or(CBLK_INVALID, &cblk->flags);
861    android_atomic_release_store(0x40000000, &cblk->mFutex);
862    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
863    (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
864    mIsInvalid = true;
865}
866
867// ----------------------------------------------------------------------------
868
869sp<AudioFlinger::PlaybackThread::TimedTrack>
870AudioFlinger::PlaybackThread::TimedTrack::create(
871            PlaybackThread *thread,
872            const sp<Client>& client,
873            audio_stream_type_t streamType,
874            uint32_t sampleRate,
875            audio_format_t format,
876            audio_channel_mask_t channelMask,
877            size_t frameCount,
878            const sp<IMemory>& sharedBuffer,
879            int sessionId) {
880    if (!client->reserveTimedTrack())
881        return 0;
882
883    return new TimedTrack(
884        thread, client, streamType, sampleRate, format, channelMask, frameCount,
885        sharedBuffer, sessionId);
886}
887
888AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
889            PlaybackThread *thread,
890            const sp<Client>& client,
891            audio_stream_type_t streamType,
892            uint32_t sampleRate,
893            audio_format_t format,
894            audio_channel_mask_t channelMask,
895            size_t frameCount,
896            const sp<IMemory>& sharedBuffer,
897            int sessionId)
898    : Track(thread, client, streamType, sampleRate, format, channelMask,
899            frameCount, sharedBuffer, sessionId, IAudioFlinger::TRACK_TIMED),
900      mQueueHeadInFlight(false),
901      mTrimQueueHeadOnRelease(false),
902      mFramesPendingInQueue(0),
903      mTimedSilenceBuffer(NULL),
904      mTimedSilenceBufferSize(0),
905      mTimedAudioOutputOnTime(false),
906      mMediaTimeTransformValid(false)
907{
908    LocalClock lc;
909    mLocalTimeFreq = lc.getLocalFreq();
910
911    mLocalTimeToSampleTransform.a_zero = 0;
912    mLocalTimeToSampleTransform.b_zero = 0;
913    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
914    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
915    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
916                            &mLocalTimeToSampleTransform.a_to_b_denom);
917
918    mMediaTimeToSampleTransform.a_zero = 0;
919    mMediaTimeToSampleTransform.b_zero = 0;
920    mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
921    mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
922    LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
923                            &mMediaTimeToSampleTransform.a_to_b_denom);
924}
925
926AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
927    mClient->releaseTimedTrack();
928    delete [] mTimedSilenceBuffer;
929}
930
931status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
932    size_t size, sp<IMemory>* buffer) {
933
934    Mutex::Autolock _l(mTimedBufferQueueLock);
935
936    trimTimedBufferQueue_l();
937
938    // lazily initialize the shared memory heap for timed buffers
939    if (mTimedMemoryDealer == NULL) {
940        const int kTimedBufferHeapSize = 512 << 10;
941
942        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
943                                              "AudioFlingerTimed");
944        if (mTimedMemoryDealer == NULL)
945            return NO_MEMORY;
946    }
947
948    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
949    if (newBuffer == NULL) {
950        newBuffer = mTimedMemoryDealer->allocate(size);
951        if (newBuffer == NULL)
952            return NO_MEMORY;
953    }
954
955    *buffer = newBuffer;
956    return NO_ERROR;
957}
958
959// caller must hold mTimedBufferQueueLock
960void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
961    int64_t mediaTimeNow;
962    {
963        Mutex::Autolock mttLock(mMediaTimeTransformLock);
964        if (!mMediaTimeTransformValid)
965            return;
966
967        int64_t targetTimeNow;
968        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
969            ? mCCHelper.getCommonTime(&targetTimeNow)
970            : mCCHelper.getLocalTime(&targetTimeNow);
971
972        if (OK != res)
973            return;
974
975        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
976                                                    &mediaTimeNow)) {
977            return;
978        }
979    }
980
981    size_t trimEnd;
982    for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
983        int64_t bufEnd;
984
985        if ((trimEnd + 1) < mTimedBufferQueue.size()) {
986            // We have a next buffer.  Just use its PTS as the PTS of the frame
987            // following the last frame in this buffer.  If the stream is sparse
988            // (ie, there are deliberate gaps left in the stream which should be
989            // filled with silence by the TimedAudioTrack), then this can result
990            // in one extra buffer being left un-trimmed when it could have
991            // been.  In general, this is not typical, and we would rather
992            // optimized away the TS calculation below for the more common case
993            // where PTSes are contiguous.
994            bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
995        } else {
996            // We have no next buffer.  Compute the PTS of the frame following
997            // the last frame in this buffer by computing the duration of of
998            // this frame in media time units and adding it to the PTS of the
999            // buffer.
1000            int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
1001                               / mFrameSize;
1002
1003            if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
1004                                                                &bufEnd)) {
1005                ALOGE("Failed to convert frame count of %lld to media time"
1006                      " duration" " (scale factor %d/%u) in %s",
1007                      frameCount,
1008                      mMediaTimeToSampleTransform.a_to_b_numer,
1009                      mMediaTimeToSampleTransform.a_to_b_denom,
1010                      __PRETTY_FUNCTION__);
1011                break;
1012            }
1013            bufEnd += mTimedBufferQueue[trimEnd].pts();
1014        }
1015
1016        if (bufEnd > mediaTimeNow)
1017            break;
1018
1019        // Is the buffer we want to use in the middle of a mix operation right
1020        // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
1021        // from the mixer which should be coming back shortly.
1022        if (!trimEnd && mQueueHeadInFlight) {
1023            mTrimQueueHeadOnRelease = true;
1024        }
1025    }
1026
1027    size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1028    if (trimStart < trimEnd) {
1029        // Update the bookkeeping for framesReady()
1030        for (size_t i = trimStart; i < trimEnd; ++i) {
1031            updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1032        }
1033
1034        // Now actually remove the buffers from the queue.
1035        mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1036    }
1037}
1038
1039void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1040        const char* logTag) {
1041    ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1042                "%s called (reason \"%s\"), but timed buffer queue has no"
1043                " elements to trim.", __FUNCTION__, logTag);
1044
1045    updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1046    mTimedBufferQueue.removeAt(0);
1047}
1048
1049void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1050        const TimedBuffer& buf,
1051        const char* logTag) {
1052    uint32_t bufBytes        = buf.buffer()->size();
1053    uint32_t consumedAlready = buf.position();
1054
1055    ALOG_ASSERT(consumedAlready <= bufBytes,
1056                "Bad bookkeeping while updating frames pending.  Timed buffer is"
1057                " only %u bytes long, but claims to have consumed %u"
1058                " bytes.  (update reason: \"%s\")",
1059                bufBytes, consumedAlready, logTag);
1060
1061    uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1062    ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1063                "Bad bookkeeping while updating frames pending.  Should have at"
1064                " least %u queued frames, but we think we have only %u.  (update"
1065                " reason: \"%s\")",
1066                bufFrames, mFramesPendingInQueue, logTag);
1067
1068    mFramesPendingInQueue -= bufFrames;
1069}
1070
1071status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1072    const sp<IMemory>& buffer, int64_t pts) {
1073
1074    {
1075        Mutex::Autolock mttLock(mMediaTimeTransformLock);
1076        if (!mMediaTimeTransformValid)
1077            return INVALID_OPERATION;
1078    }
1079
1080    Mutex::Autolock _l(mTimedBufferQueueLock);
1081
1082    uint32_t bufFrames = buffer->size() / mFrameSize;
1083    mFramesPendingInQueue += bufFrames;
1084    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1085
1086    return NO_ERROR;
1087}
1088
1089status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1090    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1091
1092    ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1093           xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1094           target);
1095
1096    if (!(target == TimedAudioTrack::LOCAL_TIME ||
1097          target == TimedAudioTrack::COMMON_TIME)) {
1098        return BAD_VALUE;
1099    }
1100
1101    Mutex::Autolock lock(mMediaTimeTransformLock);
1102    mMediaTimeTransform = xform;
1103    mMediaTimeTransformTarget = target;
1104    mMediaTimeTransformValid = true;
1105
1106    return NO_ERROR;
1107}
1108
1109#define min(a, b) ((a) < (b) ? (a) : (b))
1110
1111// implementation of getNextBuffer for tracks whose buffers have timestamps
1112status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1113    AudioBufferProvider::Buffer* buffer, int64_t pts)
1114{
1115    if (pts == AudioBufferProvider::kInvalidPTS) {
1116        buffer->raw = NULL;
1117        buffer->frameCount = 0;
1118        mTimedAudioOutputOnTime = false;
1119        return INVALID_OPERATION;
1120    }
1121
1122    Mutex::Autolock _l(mTimedBufferQueueLock);
1123
1124    ALOG_ASSERT(!mQueueHeadInFlight,
1125                "getNextBuffer called without releaseBuffer!");
1126
1127    while (true) {
1128
1129        // if we have no timed buffers, then fail
1130        if (mTimedBufferQueue.isEmpty()) {
1131            buffer->raw = NULL;
1132            buffer->frameCount = 0;
1133            return NOT_ENOUGH_DATA;
1134        }
1135
1136        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1137
1138        // calculate the PTS of the head of the timed buffer queue expressed in
1139        // local time
1140        int64_t headLocalPTS;
1141        {
1142            Mutex::Autolock mttLock(mMediaTimeTransformLock);
1143
1144            ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1145
1146            if (mMediaTimeTransform.a_to_b_denom == 0) {
1147                // the transform represents a pause, so yield silence
1148                timedYieldSilence_l(buffer->frameCount, buffer);
1149                return NO_ERROR;
1150            }
1151
1152            int64_t transformedPTS;
1153            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1154                                                        &transformedPTS)) {
1155                // the transform failed.  this shouldn't happen, but if it does
1156                // then just drop this buffer
1157                ALOGW("timedGetNextBuffer transform failed");
1158                buffer->raw = NULL;
1159                buffer->frameCount = 0;
1160                trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1161                return NO_ERROR;
1162            }
1163
1164            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1165                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1166                                                          &headLocalPTS)) {
1167                    buffer->raw = NULL;
1168                    buffer->frameCount = 0;
1169                    return INVALID_OPERATION;
1170                }
1171            } else {
1172                headLocalPTS = transformedPTS;
1173            }
1174        }
1175
1176        // adjust the head buffer's PTS to reflect the portion of the head buffer
1177        // that has already been consumed
1178        int64_t effectivePTS = headLocalPTS +
1179                ((head.position() / mFrameSize) * mLocalTimeFreq / sampleRate());
1180
1181        // Calculate the delta in samples between the head of the input buffer
1182        // queue and the start of the next output buffer that will be written.
1183        // If the transformation fails because of over or underflow, it means
1184        // that the sample's position in the output stream is so far out of
1185        // whack that it should just be dropped.
1186        int64_t sampleDelta;
1187        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1188            ALOGV("*** head buffer is too far from PTS: dropped buffer");
1189            trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1190                                       " mix");
1191            continue;
1192        }
1193        if (!mLocalTimeToSampleTransform.doForwardTransform(
1194                (effectivePTS - pts) << 32, &sampleDelta)) {
1195            ALOGV("*** too late during sample rate transform: dropped buffer");
1196            trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1197            continue;
1198        }
1199
1200        ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1201               " sampleDelta=[%d.%08x]",
1202               head.pts(), head.position(), pts,
1203               static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1204                   + (sampleDelta >> 32)),
1205               static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1206
1207        // if the delta between the ideal placement for the next input sample and
1208        // the current output position is within this threshold, then we will
1209        // concatenate the next input samples to the previous output
1210        const int64_t kSampleContinuityThreshold =
1211                (static_cast<int64_t>(sampleRate()) << 32) / 250;
1212
1213        // if this is the first buffer of audio that we're emitting from this track
1214        // then it should be almost exactly on time.
1215        const int64_t kSampleStartupThreshold = 1LL << 32;
1216
1217        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1218           (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1219            // the next input is close enough to being on time, so concatenate it
1220            // with the last output
1221            timedYieldSamples_l(buffer);
1222
1223            ALOGVV("*** on time: head.pos=%d frameCount=%u",
1224                    head.position(), buffer->frameCount);
1225            return NO_ERROR;
1226        }
1227
1228        // Looks like our output is not on time.  Reset our on timed status.
1229        // Next time we mix samples from our input queue, then should be within
1230        // the StartupThreshold.
1231        mTimedAudioOutputOnTime = false;
1232        if (sampleDelta > 0) {
1233            // the gap between the current output position and the proper start of
1234            // the next input sample is too big, so fill it with silence
1235            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1236
1237            timedYieldSilence_l(framesUntilNextInput, buffer);
1238            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1239            return NO_ERROR;
1240        } else {
1241            // the next input sample is late
1242            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1243            size_t onTimeSamplePosition =
1244                    head.position() + lateFrames * mFrameSize;
1245
1246            if (onTimeSamplePosition > head.buffer()->size()) {
1247                // all the remaining samples in the head are too late, so
1248                // drop it and move on
1249                ALOGV("*** too late: dropped buffer");
1250                trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1251                continue;
1252            } else {
1253                // skip over the late samples
1254                head.setPosition(onTimeSamplePosition);
1255
1256                // yield the available samples
1257                timedYieldSamples_l(buffer);
1258
1259                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1260                return NO_ERROR;
1261            }
1262        }
1263    }
1264}
1265
1266// Yield samples from the timed buffer queue head up to the given output
1267// buffer's capacity.
1268//
1269// Caller must hold mTimedBufferQueueLock
1270void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1271    AudioBufferProvider::Buffer* buffer) {
1272
1273    const TimedBuffer& head = mTimedBufferQueue[0];
1274
1275    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1276                   head.position());
1277
1278    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1279                                 mFrameSize);
1280    size_t framesRequested = buffer->frameCount;
1281    buffer->frameCount = min(framesLeftInHead, framesRequested);
1282
1283    mQueueHeadInFlight = true;
1284    mTimedAudioOutputOnTime = true;
1285}
1286
1287// Yield samples of silence up to the given output buffer's capacity
1288//
1289// Caller must hold mTimedBufferQueueLock
1290void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1291    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1292
1293    // lazily allocate a buffer filled with silence
1294    if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1295        delete [] mTimedSilenceBuffer;
1296        mTimedSilenceBufferSize = numFrames * mFrameSize;
1297        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1298        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1299    }
1300
1301    buffer->raw = mTimedSilenceBuffer;
1302    size_t framesRequested = buffer->frameCount;
1303    buffer->frameCount = min(numFrames, framesRequested);
1304
1305    mTimedAudioOutputOnTime = false;
1306}
1307
1308// AudioBufferProvider interface
1309void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1310    AudioBufferProvider::Buffer* buffer) {
1311
1312    Mutex::Autolock _l(mTimedBufferQueueLock);
1313
1314    // If the buffer which was just released is part of the buffer at the head
1315    // of the queue, be sure to update the amt of the buffer which has been
1316    // consumed.  If the buffer being returned is not part of the head of the
1317    // queue, its either because the buffer is part of the silence buffer, or
1318    // because the head of the timed queue was trimmed after the mixer called
1319    // getNextBuffer but before the mixer called releaseBuffer.
1320    if (buffer->raw == mTimedSilenceBuffer) {
1321        ALOG_ASSERT(!mQueueHeadInFlight,
1322                    "Queue head in flight during release of silence buffer!");
1323        goto done;
1324    }
1325
1326    ALOG_ASSERT(mQueueHeadInFlight,
1327                "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1328                " head in flight.");
1329
1330    if (mTimedBufferQueue.size()) {
1331        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1332
1333        void* start = head.buffer()->pointer();
1334        void* end   = reinterpret_cast<void*>(
1335                        reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1336                        + head.buffer()->size());
1337
1338        ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1339                    "released buffer not within the head of the timed buffer"
1340                    " queue; qHead = [%p, %p], released buffer = %p",
1341                    start, end, buffer->raw);
1342
1343        head.setPosition(head.position() +
1344                (buffer->frameCount * mFrameSize));
1345        mQueueHeadInFlight = false;
1346
1347        ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1348                    "Bad bookkeeping during releaseBuffer!  Should have at"
1349                    " least %u queued frames, but we think we have only %u",
1350                    buffer->frameCount, mFramesPendingInQueue);
1351
1352        mFramesPendingInQueue -= buffer->frameCount;
1353
1354        if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1355            || mTrimQueueHeadOnRelease) {
1356            trimTimedBufferQueueHead_l("releaseBuffer");
1357            mTrimQueueHeadOnRelease = false;
1358        }
1359    } else {
1360        LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1361                  " buffers in the timed buffer queue");
1362    }
1363
1364done:
1365    buffer->raw = 0;
1366    buffer->frameCount = 0;
1367}
1368
1369size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1370    Mutex::Autolock _l(mTimedBufferQueueLock);
1371    return mFramesPendingInQueue;
1372}
1373
1374AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1375        : mPTS(0), mPosition(0) {}
1376
1377AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1378    const sp<IMemory>& buffer, int64_t pts)
1379        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1380
1381
1382// ----------------------------------------------------------------------------
1383
1384AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1385            PlaybackThread *playbackThread,
1386            DuplicatingThread *sourceThread,
1387            uint32_t sampleRate,
1388            audio_format_t format,
1389            audio_channel_mask_t channelMask,
1390            size_t frameCount)
1391    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
1392                NULL, 0, IAudioFlinger::TRACK_DEFAULT),
1393    mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
1394{
1395
1396    if (mCblk != NULL) {
1397        mOutBuffer.frameCount = 0;
1398        playbackThread->mTracks.add(this);
1399        ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
1400                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
1401                mCblk, mBuffer,
1402                mCblk->frameCount_, mChannelMask);
1403        // since client and server are in the same process,
1404        // the buffer has the same virtual address on both sides
1405        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
1406        mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
1407        mClientProxy->setSendLevel(0.0);
1408        mClientProxy->setSampleRate(sampleRate);
1409        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1410                true /*clientInServer*/);
1411    } else {
1412        ALOGW("Error creating output track on thread %p", playbackThread);
1413    }
1414}
1415
1416AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1417{
1418    clearBufferQueue();
1419    delete mClientProxy;
1420    // superclass destructor will now delete the server proxy and shared memory both refer to
1421}
1422
1423status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1424                                                          int triggerSession)
1425{
1426    status_t status = Track::start(event, triggerSession);
1427    if (status != NO_ERROR) {
1428        return status;
1429    }
1430
1431    mActive = true;
1432    mRetryCount = 127;
1433    return status;
1434}
1435
1436void AudioFlinger::PlaybackThread::OutputTrack::stop()
1437{
1438    Track::stop();
1439    clearBufferQueue();
1440    mOutBuffer.frameCount = 0;
1441    mActive = false;
1442}
1443
1444bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1445{
1446    Buffer *pInBuffer;
1447    Buffer inBuffer;
1448    uint32_t channelCount = mChannelCount;
1449    bool outputBufferFull = false;
1450    inBuffer.frameCount = frames;
1451    inBuffer.i16 = data;
1452
1453    uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1454
1455    if (!mActive && frames != 0) {
1456        start();
1457        sp<ThreadBase> thread = mThread.promote();
1458        if (thread != 0) {
1459            MixerThread *mixerThread = (MixerThread *)thread.get();
1460            if (mFrameCount > frames) {
1461                if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1462                    uint32_t startFrames = (mFrameCount - frames);
1463                    pInBuffer = new Buffer;
1464                    pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1465                    pInBuffer->frameCount = startFrames;
1466                    pInBuffer->i16 = pInBuffer->mBuffer;
1467                    memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1468                    mBufferQueue.add(pInBuffer);
1469                } else {
1470                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
1471                }
1472            }
1473        }
1474    }
1475
1476    while (waitTimeLeftMs) {
1477        // First write pending buffers, then new data
1478        if (mBufferQueue.size()) {
1479            pInBuffer = mBufferQueue.itemAt(0);
1480        } else {
1481            pInBuffer = &inBuffer;
1482        }
1483
1484        if (pInBuffer->frameCount == 0) {
1485            break;
1486        }
1487
1488        if (mOutBuffer.frameCount == 0) {
1489            mOutBuffer.frameCount = pInBuffer->frameCount;
1490            nsecs_t startTime = systemTime();
1491            status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1492            if (status != NO_ERROR) {
1493                ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
1494                        mThread.unsafe_get(), status);
1495                outputBufferFull = true;
1496                break;
1497            }
1498            uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1499            if (waitTimeLeftMs >= waitTimeMs) {
1500                waitTimeLeftMs -= waitTimeMs;
1501            } else {
1502                waitTimeLeftMs = 0;
1503            }
1504        }
1505
1506        uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1507                pInBuffer->frameCount;
1508        memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1509        Proxy::Buffer buf;
1510        buf.mFrameCount = outFrames;
1511        buf.mRaw = NULL;
1512        mClientProxy->releaseBuffer(&buf);
1513        pInBuffer->frameCount -= outFrames;
1514        pInBuffer->i16 += outFrames * channelCount;
1515        mOutBuffer.frameCount -= outFrames;
1516        mOutBuffer.i16 += outFrames * channelCount;
1517
1518        if (pInBuffer->frameCount == 0) {
1519            if (mBufferQueue.size()) {
1520                mBufferQueue.removeAt(0);
1521                delete [] pInBuffer->mBuffer;
1522                delete pInBuffer;
1523                ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1524                        mThread.unsafe_get(), mBufferQueue.size());
1525            } else {
1526                break;
1527            }
1528        }
1529    }
1530
1531    // If we could not write all frames, allocate a buffer and queue it for next time.
1532    if (inBuffer.frameCount) {
1533        sp<ThreadBase> thread = mThread.promote();
1534        if (thread != 0 && !thread->standby()) {
1535            if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1536                pInBuffer = new Buffer;
1537                pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1538                pInBuffer->frameCount = inBuffer.frameCount;
1539                pInBuffer->i16 = pInBuffer->mBuffer;
1540                memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1541                        sizeof(int16_t));
1542                mBufferQueue.add(pInBuffer);
1543                ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1544                        mThread.unsafe_get(), mBufferQueue.size());
1545            } else {
1546                ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1547                        mThread.unsafe_get(), this);
1548            }
1549        }
1550    }
1551
1552    // Calling write() with a 0 length buffer, means that no more data will be written:
1553    // If no more buffers are pending, fill output track buffer to make sure it is started
1554    // by output mixer.
1555    if (frames == 0 && mBufferQueue.size() == 0) {
1556        // FIXME borken, replace by getting framesReady() from proxy
1557        size_t user = 0;    // was mCblk->user
1558        if (user < mFrameCount) {
1559            frames = mFrameCount - user;
1560            pInBuffer = new Buffer;
1561            pInBuffer->mBuffer = new int16_t[frames * channelCount];
1562            pInBuffer->frameCount = frames;
1563            pInBuffer->i16 = pInBuffer->mBuffer;
1564            memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1565            mBufferQueue.add(pInBuffer);
1566        } else if (mActive) {
1567            stop();
1568        }
1569    }
1570
1571    return outputBufferFull;
1572}
1573
1574status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1575        AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1576{
1577    ClientProxy::Buffer buf;
1578    buf.mFrameCount = buffer->frameCount;
1579    struct timespec timeout;
1580    timeout.tv_sec = waitTimeMs / 1000;
1581    timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
1582    status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
1583    buffer->frameCount = buf.mFrameCount;
1584    buffer->raw = buf.mRaw;
1585    return status;
1586}
1587
1588void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1589{
1590    size_t size = mBufferQueue.size();
1591
1592    for (size_t i = 0; i < size; i++) {
1593        Buffer *pBuffer = mBufferQueue.itemAt(i);
1594        delete [] pBuffer->mBuffer;
1595        delete pBuffer;
1596    }
1597    mBufferQueue.clear();
1598}
1599
1600
1601// ----------------------------------------------------------------------------
1602//      Record
1603// ----------------------------------------------------------------------------
1604
1605AudioFlinger::RecordHandle::RecordHandle(
1606        const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1607    : BnAudioRecord(),
1608    mRecordTrack(recordTrack)
1609{
1610}
1611
1612AudioFlinger::RecordHandle::~RecordHandle() {
1613    stop_nonvirtual();
1614    mRecordTrack->destroy();
1615}
1616
1617sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
1618    return mRecordTrack->getCblk();
1619}
1620
1621status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1622        int triggerSession) {
1623    ALOGV("RecordHandle::start()");
1624    return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1625}
1626
1627void AudioFlinger::RecordHandle::stop() {
1628    stop_nonvirtual();
1629}
1630
1631void AudioFlinger::RecordHandle::stop_nonvirtual() {
1632    ALOGV("RecordHandle::stop()");
1633    mRecordTrack->stop();
1634}
1635
1636status_t AudioFlinger::RecordHandle::onTransact(
1637    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1638{
1639    return BnAudioRecord::onTransact(code, data, reply, flags);
1640}
1641
1642// ----------------------------------------------------------------------------
1643
1644// RecordTrack constructor must be called with AudioFlinger::mLock held
1645AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1646            RecordThread *thread,
1647            const sp<Client>& client,
1648            uint32_t sampleRate,
1649            audio_format_t format,
1650            audio_channel_mask_t channelMask,
1651            size_t frameCount,
1652            int sessionId)
1653    :   TrackBase(thread, client, sampleRate, format,
1654                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, false /*isOut*/),
1655        mOverflow(false)
1656{
1657    ALOGV("RecordTrack constructor");
1658    if (mCblk != NULL) {
1659        mAudioRecordServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount,
1660                mFrameSize);
1661        mServerProxy = mAudioRecordServerProxy;
1662    }
1663}
1664
1665AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
1666{
1667    ALOGV("%s", __func__);
1668}
1669
1670// AudioBufferProvider interface
1671status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
1672        int64_t pts)
1673{
1674    ServerProxy::Buffer buf;
1675    buf.mFrameCount = buffer->frameCount;
1676    status_t status = mServerProxy->obtainBuffer(&buf);
1677    buffer->frameCount = buf.mFrameCount;
1678    buffer->raw = buf.mRaw;
1679    if (buf.mFrameCount == 0) {
1680        // FIXME also wake futex so that overrun is noticed more quickly
1681        (void) android_atomic_or(CBLK_OVERRUN, &mCblk->flags);
1682    }
1683    return status;
1684}
1685
1686status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
1687                                                        int triggerSession)
1688{
1689    sp<ThreadBase> thread = mThread.promote();
1690    if (thread != 0) {
1691        RecordThread *recordThread = (RecordThread *)thread.get();
1692        return recordThread->start(this, event, triggerSession);
1693    } else {
1694        return BAD_VALUE;
1695    }
1696}
1697
1698void AudioFlinger::RecordThread::RecordTrack::stop()
1699{
1700    sp<ThreadBase> thread = mThread.promote();
1701    if (thread != 0) {
1702        RecordThread *recordThread = (RecordThread *)thread.get();
1703        if (recordThread->stop(this)) {
1704            AudioSystem::stopInput(recordThread->id());
1705        }
1706    }
1707}
1708
1709void AudioFlinger::RecordThread::RecordTrack::destroy()
1710{
1711    // see comments at AudioFlinger::PlaybackThread::Track::destroy()
1712    sp<RecordTrack> keep(this);
1713    {
1714        sp<ThreadBase> thread = mThread.promote();
1715        if (thread != 0) {
1716            if (mState == ACTIVE || mState == RESUMING) {
1717                AudioSystem::stopInput(thread->id());
1718            }
1719            AudioSystem::releaseInput(thread->id());
1720            Mutex::Autolock _l(thread->mLock);
1721            RecordThread *recordThread = (RecordThread *) thread.get();
1722            recordThread->destroyTrack_l(this);
1723        }
1724    }
1725}
1726
1727
1728/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
1729{
1730    result.append("Client Fmt Chn mask Session S   Server fCount\n");
1731}
1732
1733void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
1734{
1735    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6u\n",
1736            (mClient == 0) ? getpid_cached : mClient->pid(),
1737            mFormat,
1738            mChannelMask,
1739            mSessionId,
1740            mState,
1741            mCblk->server,
1742            mFrameCount);
1743}
1744
1745}; // namespace android
1746