AudioTrack.cpp revision 3f02be2ceeaa4b67dc0b1a81aebcfa049276fad8
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41template <typename T>
42const T &min(const T &x, const T &y) {
43    return x < y ? x : y;
44}
45
46static int64_t convertTimespecToUs(const struct timespec &tv)
47{
48    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
49}
50
51// current monotonic time in microseconds.
52static int64_t getNowUs()
53{
54    struct timespec tv;
55    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
56    return convertTimespecToUs(tv);
57}
58
59// static
60status_t AudioTrack::getMinFrameCount(
61        size_t* frameCount,
62        audio_stream_type_t streamType,
63        uint32_t sampleRate)
64{
65    if (frameCount == NULL) {
66        return BAD_VALUE;
67    }
68
69    // FIXME handle in server, like createTrack_l(), possible missing info:
70    //          audio_io_handle_t output
71    //          audio_format_t format
72    //          audio_channel_mask_t channelMask
73    //          audio_output_flags_t flags (FAST)
74    uint32_t afSampleRate;
75    status_t status;
76    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
77    if (status != NO_ERROR) {
78        ALOGE("Unable to query output sample rate for stream type %d; status %d",
79                streamType, status);
80        return status;
81    }
82    size_t afFrameCount;
83    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
84    if (status != NO_ERROR) {
85        ALOGE("Unable to query output frame count for stream type %d; status %d",
86                streamType, status);
87        return status;
88    }
89    uint32_t afLatency;
90    status = AudioSystem::getOutputLatency(&afLatency, streamType);
91    if (status != NO_ERROR) {
92        ALOGE("Unable to query output latency for stream type %d; status %d",
93                streamType, status);
94        return status;
95    }
96
97    // Ensure that buffer depth covers at least audio hardware latency
98    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
99    if (minBufCount < 2) {
100        minBufCount = 2;
101    }
102
103    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
104    // The formula above should always produce a non-zero value under normal circumstances:
105    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
106    // Return error in the unlikely event that it does not, as that's part of the API contract.
107    if (*frameCount == 0) {
108        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
109                streamType, sampleRate);
110        return BAD_VALUE;
111    }
112    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
113            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
114    return NO_ERROR;
115}
116
117// ---------------------------------------------------------------------------
118
119AudioTrack::AudioTrack()
120    : mStatus(NO_INIT),
121      mIsTimed(false),
122      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
123      mPreviousSchedulingGroup(SP_DEFAULT),
124      mPausedPosition(0)
125{
126    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
127    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
128    mAttributes.flags = 0x0;
129    strcpy(mAttributes.tags, "");
130}
131
132AudioTrack::AudioTrack(
133        audio_stream_type_t streamType,
134        uint32_t sampleRate,
135        audio_format_t format,
136        audio_channel_mask_t channelMask,
137        size_t frameCount,
138        audio_output_flags_t flags,
139        callback_t cbf,
140        void* user,
141        uint32_t notificationFrames,
142        int sessionId,
143        transfer_type transferType,
144        const audio_offload_info_t *offloadInfo,
145        int uid,
146        pid_t pid,
147        const audio_attributes_t* pAttributes)
148    : mStatus(NO_INIT),
149      mIsTimed(false),
150      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
151      mPreviousSchedulingGroup(SP_DEFAULT),
152      mPausedPosition(0)
153{
154    mStatus = set(streamType, sampleRate, format, channelMask,
155            frameCount, flags, cbf, user, notificationFrames,
156            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
157            offloadInfo, uid, pid, pAttributes);
158}
159
160AudioTrack::AudioTrack(
161        audio_stream_type_t streamType,
162        uint32_t sampleRate,
163        audio_format_t format,
164        audio_channel_mask_t channelMask,
165        const sp<IMemory>& sharedBuffer,
166        audio_output_flags_t flags,
167        callback_t cbf,
168        void* user,
169        uint32_t notificationFrames,
170        int sessionId,
171        transfer_type transferType,
172        const audio_offload_info_t *offloadInfo,
173        int uid,
174        pid_t pid,
175        const audio_attributes_t* pAttributes)
176    : mStatus(NO_INIT),
177      mIsTimed(false),
178      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
179      mPreviousSchedulingGroup(SP_DEFAULT),
180      mPausedPosition(0)
181{
182    mStatus = set(streamType, sampleRate, format, channelMask,
183            0 /*frameCount*/, flags, cbf, user, notificationFrames,
184            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
185            uid, pid, pAttributes);
186}
187
188AudioTrack::~AudioTrack()
189{
190    if (mStatus == NO_ERROR) {
191        // Make sure that callback function exits in the case where
192        // it is looping on buffer full condition in obtainBuffer().
193        // Otherwise the callback thread will never exit.
194        stop();
195        if (mAudioTrackThread != 0) {
196            mProxy->interrupt();
197            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
198            mAudioTrackThread->requestExitAndWait();
199            mAudioTrackThread.clear();
200        }
201        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
202        mAudioTrack.clear();
203        mCblkMemory.clear();
204        mSharedBuffer.clear();
205        IPCThreadState::self()->flushCommands();
206        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
207                IPCThreadState::self()->getCallingPid(), mClientPid);
208        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
209    }
210}
211
212status_t AudioTrack::set(
213        audio_stream_type_t streamType,
214        uint32_t sampleRate,
215        audio_format_t format,
216        audio_channel_mask_t channelMask,
217        size_t frameCount,
218        audio_output_flags_t flags,
219        callback_t cbf,
220        void* user,
221        uint32_t notificationFrames,
222        const sp<IMemory>& sharedBuffer,
223        bool threadCanCallJava,
224        int sessionId,
225        transfer_type transferType,
226        const audio_offload_info_t *offloadInfo,
227        int uid,
228        pid_t pid,
229        const audio_attributes_t* pAttributes)
230{
231    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
232          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
233          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
234          sessionId, transferType);
235
236    switch (transferType) {
237    case TRANSFER_DEFAULT:
238        if (sharedBuffer != 0) {
239            transferType = TRANSFER_SHARED;
240        } else if (cbf == NULL || threadCanCallJava) {
241            transferType = TRANSFER_SYNC;
242        } else {
243            transferType = TRANSFER_CALLBACK;
244        }
245        break;
246    case TRANSFER_CALLBACK:
247        if (cbf == NULL || sharedBuffer != 0) {
248            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
249            return BAD_VALUE;
250        }
251        break;
252    case TRANSFER_OBTAIN:
253    case TRANSFER_SYNC:
254        if (sharedBuffer != 0) {
255            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
256            return BAD_VALUE;
257        }
258        break;
259    case TRANSFER_SHARED:
260        if (sharedBuffer == 0) {
261            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
262            return BAD_VALUE;
263        }
264        break;
265    default:
266        ALOGE("Invalid transfer type %d", transferType);
267        return BAD_VALUE;
268    }
269    mSharedBuffer = sharedBuffer;
270    mTransfer = transferType;
271
272    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
273            sharedBuffer->size());
274
275    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
276
277    AutoMutex lock(mLock);
278
279    // invariant that mAudioTrack != 0 is true only after set() returns successfully
280    if (mAudioTrack != 0) {
281        ALOGE("Track already in use");
282        return INVALID_OPERATION;
283    }
284
285    // handle default values first.
286    if (streamType == AUDIO_STREAM_DEFAULT) {
287        streamType = AUDIO_STREAM_MUSIC;
288    }
289    if (pAttributes == NULL) {
290        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
291            ALOGE("Invalid stream type %d", streamType);
292            return BAD_VALUE;
293        }
294        mStreamType = streamType;
295
296    } else {
297        // stream type shouldn't be looked at, this track has audio attributes
298        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
299        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
300                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
301        mStreamType = AUDIO_STREAM_DEFAULT;
302    }
303
304    // these below should probably come from the audioFlinger too...
305    if (format == AUDIO_FORMAT_DEFAULT) {
306        format = AUDIO_FORMAT_PCM_16_BIT;
307    }
308
309    // validate parameters
310    if (!audio_is_valid_format(format)) {
311        ALOGE("Invalid format %#x", format);
312        return BAD_VALUE;
313    }
314    mFormat = format;
315
316    if (!audio_is_output_channel(channelMask)) {
317        ALOGE("Invalid channel mask %#x", channelMask);
318        return BAD_VALUE;
319    }
320    mChannelMask = channelMask;
321    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
322    mChannelCount = channelCount;
323
324    // force direct flag if format is not linear PCM
325    // or offload was requested
326    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
327            || !audio_is_linear_pcm(format)) {
328        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
329                    ? "Offload request, forcing to Direct Output"
330                    : "Not linear PCM, forcing to Direct Output");
331        flags = (audio_output_flags_t)
332                // FIXME why can't we allow direct AND fast?
333                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
334    }
335
336    // force direct flag if HW A/V sync requested
337    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
338        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
339    }
340
341    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
342        if (audio_is_linear_pcm(format)) {
343            mFrameSize = channelCount * audio_bytes_per_sample(format);
344        } else {
345            mFrameSize = sizeof(uint8_t);
346        }
347    } else {
348        ALOG_ASSERT(audio_is_linear_pcm(format));
349        mFrameSize = channelCount * audio_bytes_per_sample(format);
350        // createTrack will return an error if PCM format is not supported by server,
351        // so no need to check for specific PCM formats here
352    }
353
354    // sampling rate must be specified for direct outputs
355    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
356        return BAD_VALUE;
357    }
358    mSampleRate = sampleRate;
359
360    // Make copy of input parameter offloadInfo so that in the future:
361    //  (a) createTrack_l doesn't need it as an input parameter
362    //  (b) we can support re-creation of offloaded tracks
363    if (offloadInfo != NULL) {
364        mOffloadInfoCopy = *offloadInfo;
365        mOffloadInfo = &mOffloadInfoCopy;
366    } else {
367        mOffloadInfo = NULL;
368    }
369
370    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
371    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
372    mSendLevel = 0.0f;
373    // mFrameCount is initialized in createTrack_l
374    mReqFrameCount = frameCount;
375    mNotificationFramesReq = notificationFrames;
376    mNotificationFramesAct = 0;
377    if (sessionId == AUDIO_SESSION_ALLOCATE) {
378        mSessionId = AudioSystem::newAudioUniqueId();
379    } else {
380        mSessionId = sessionId;
381    }
382    int callingpid = IPCThreadState::self()->getCallingPid();
383    int mypid = getpid();
384    if (uid == -1 || (callingpid != mypid)) {
385        mClientUid = IPCThreadState::self()->getCallingUid();
386    } else {
387        mClientUid = uid;
388    }
389    if (pid == -1 || (callingpid != mypid)) {
390        mClientPid = callingpid;
391    } else {
392        mClientPid = pid;
393    }
394    mAuxEffectId = 0;
395    mFlags = flags;
396    mCbf = cbf;
397
398    if (cbf != NULL) {
399        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
400        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
401    }
402
403    // create the IAudioTrack
404    status_t status = createTrack_l();
405
406    if (status != NO_ERROR) {
407        if (mAudioTrackThread != 0) {
408            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
409            mAudioTrackThread->requestExitAndWait();
410            mAudioTrackThread.clear();
411        }
412        return status;
413    }
414
415    mStatus = NO_ERROR;
416    mState = STATE_STOPPED;
417    mUserData = user;
418    mLoopCount = 0;
419    mLoopStart = 0;
420    mLoopEnd = 0;
421    mLoopCountNotified = 0;
422    mMarkerPosition = 0;
423    mMarkerReached = false;
424    mNewPosition = 0;
425    mUpdatePeriod = 0;
426    mServer = 0;
427    mPosition = 0;
428    mReleased = 0;
429    mStartUs = 0;
430    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
431    mSequence = 1;
432    mObservedSequence = mSequence;
433    mInUnderrun = false;
434
435    return NO_ERROR;
436}
437
438// -------------------------------------------------------------------------
439
440status_t AudioTrack::start()
441{
442    AutoMutex lock(mLock);
443
444    if (mState == STATE_ACTIVE) {
445        return INVALID_OPERATION;
446    }
447
448    mInUnderrun = true;
449
450    State previousState = mState;
451    if (previousState == STATE_PAUSED_STOPPING) {
452        mState = STATE_STOPPING;
453    } else {
454        mState = STATE_ACTIVE;
455    }
456    (void) updateAndGetPosition_l();
457    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
458        // reset current position as seen by client to 0
459        mPosition = 0;
460        // For offloaded tracks, we don't know if the hardware counters are really zero here,
461        // since the flush is asynchronous and stop may not fully drain.
462        // We save the time when the track is started to later verify whether
463        // the counters are realistic (i.e. start from zero after this time).
464        mStartUs = getNowUs();
465
466        // force refresh of remaining frames by processAudioBuffer() as last
467        // write before stop could be partial.
468        mRefreshRemaining = true;
469    }
470    mNewPosition = mPosition + mUpdatePeriod;
471    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
472
473    sp<AudioTrackThread> t = mAudioTrackThread;
474    if (t != 0) {
475        if (previousState == STATE_STOPPING) {
476            mProxy->interrupt();
477        } else {
478            t->resume();
479        }
480    } else {
481        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
482        get_sched_policy(0, &mPreviousSchedulingGroup);
483        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
484    }
485
486    status_t status = NO_ERROR;
487    if (!(flags & CBLK_INVALID)) {
488        status = mAudioTrack->start();
489        if (status == DEAD_OBJECT) {
490            flags |= CBLK_INVALID;
491        }
492    }
493    if (flags & CBLK_INVALID) {
494        status = restoreTrack_l("start");
495    }
496
497    if (status != NO_ERROR) {
498        ALOGE("start() status %d", status);
499        mState = previousState;
500        if (t != 0) {
501            if (previousState != STATE_STOPPING) {
502                t->pause();
503            }
504        } else {
505            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
506            set_sched_policy(0, mPreviousSchedulingGroup);
507        }
508    }
509
510    return status;
511}
512
513void AudioTrack::stop()
514{
515    AutoMutex lock(mLock);
516    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
517        return;
518    }
519
520    if (isOffloaded_l()) {
521        mState = STATE_STOPPING;
522    } else {
523        mState = STATE_STOPPED;
524        mReleased = 0;
525    }
526
527    mProxy->interrupt();
528    mAudioTrack->stop();
529    // the playback head position will reset to 0, so if a marker is set, we need
530    // to activate it again
531    mMarkerReached = false;
532
533    if (mSharedBuffer != 0) {
534        // clear buffer position and loop count.
535        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
536                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
537    }
538
539    sp<AudioTrackThread> t = mAudioTrackThread;
540    if (t != 0) {
541        if (!isOffloaded_l()) {
542            t->pause();
543        }
544    } else {
545        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
546        set_sched_policy(0, mPreviousSchedulingGroup);
547    }
548}
549
550bool AudioTrack::stopped() const
551{
552    AutoMutex lock(mLock);
553    return mState != STATE_ACTIVE;
554}
555
556void AudioTrack::flush()
557{
558    if (mSharedBuffer != 0) {
559        return;
560    }
561    AutoMutex lock(mLock);
562    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
563        return;
564    }
565    flush_l();
566}
567
568void AudioTrack::flush_l()
569{
570    ALOG_ASSERT(mState != STATE_ACTIVE);
571
572    // clear playback marker and periodic update counter
573    mMarkerPosition = 0;
574    mMarkerReached = false;
575    mUpdatePeriod = 0;
576    mRefreshRemaining = true;
577
578    mState = STATE_FLUSHED;
579    mReleased = 0;
580    if (isOffloaded_l()) {
581        mProxy->interrupt();
582    }
583    mProxy->flush();
584    mAudioTrack->flush();
585}
586
587void AudioTrack::pause()
588{
589    AutoMutex lock(mLock);
590    if (mState == STATE_ACTIVE) {
591        mState = STATE_PAUSED;
592    } else if (mState == STATE_STOPPING) {
593        mState = STATE_PAUSED_STOPPING;
594    } else {
595        return;
596    }
597    mProxy->interrupt();
598    mAudioTrack->pause();
599
600    if (isOffloaded_l()) {
601        if (mOutput != AUDIO_IO_HANDLE_NONE) {
602            // An offload output can be re-used between two audio tracks having
603            // the same configuration. A timestamp query for a paused track
604            // while the other is running would return an incorrect time.
605            // To fix this, cache the playback position on a pause() and return
606            // this time when requested until the track is resumed.
607
608            // OffloadThread sends HAL pause in its threadLoop. Time saved
609            // here can be slightly off.
610
611            // TODO: check return code for getRenderPosition.
612
613            uint32_t halFrames;
614            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
615            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
616        }
617    }
618}
619
620status_t AudioTrack::setVolume(float left, float right)
621{
622    // This duplicates a test by AudioTrack JNI, but that is not the only caller
623    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
624            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
625        return BAD_VALUE;
626    }
627
628    AutoMutex lock(mLock);
629    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
630    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
631
632    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
633
634    if (isOffloaded_l()) {
635        mAudioTrack->signal();
636    }
637    return NO_ERROR;
638}
639
640status_t AudioTrack::setVolume(float volume)
641{
642    return setVolume(volume, volume);
643}
644
645status_t AudioTrack::setAuxEffectSendLevel(float level)
646{
647    // This duplicates a test by AudioTrack JNI, but that is not the only caller
648    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
649        return BAD_VALUE;
650    }
651
652    AutoMutex lock(mLock);
653    mSendLevel = level;
654    mProxy->setSendLevel(level);
655
656    return NO_ERROR;
657}
658
659void AudioTrack::getAuxEffectSendLevel(float* level) const
660{
661    if (level != NULL) {
662        *level = mSendLevel;
663    }
664}
665
666status_t AudioTrack::setSampleRate(uint32_t rate)
667{
668    if (mIsTimed || isOffloadedOrDirect()) {
669        return INVALID_OPERATION;
670    }
671
672    AutoMutex lock(mLock);
673    if (mOutput == AUDIO_IO_HANDLE_NONE) {
674        return NO_INIT;
675    }
676    uint32_t afSamplingRate;
677    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
678        return NO_INIT;
679    }
680    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
681        return BAD_VALUE;
682    }
683
684    mSampleRate = rate;
685    mProxy->setSampleRate(rate);
686
687    return NO_ERROR;
688}
689
690uint32_t AudioTrack::getSampleRate() const
691{
692    if (mIsTimed) {
693        return 0;
694    }
695
696    AutoMutex lock(mLock);
697
698    // sample rate can be updated during playback by the offloaded decoder so we need to
699    // query the HAL and update if needed.
700// FIXME use Proxy return channel to update the rate from server and avoid polling here
701    if (isOffloadedOrDirect_l()) {
702        if (mOutput != AUDIO_IO_HANDLE_NONE) {
703            uint32_t sampleRate = 0;
704            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
705            if (status == NO_ERROR) {
706                mSampleRate = sampleRate;
707            }
708        }
709    }
710    return mSampleRate;
711}
712
713status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
714{
715    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
716        return INVALID_OPERATION;
717    }
718
719    if (loopCount == 0) {
720        ;
721    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
722            loopEnd - loopStart >= MIN_LOOP) {
723        ;
724    } else {
725        return BAD_VALUE;
726    }
727
728    AutoMutex lock(mLock);
729    // See setPosition() regarding setting parameters such as loop points or position while active
730    if (mState == STATE_ACTIVE) {
731        return INVALID_OPERATION;
732    }
733    setLoop_l(loopStart, loopEnd, loopCount);
734    return NO_ERROR;
735}
736
737void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
738{
739    // We do not update the periodic notification point.
740    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
741    mLoopCount = loopCount;
742    mLoopEnd = loopEnd;
743    mLoopStart = loopStart;
744    mLoopCountNotified = loopCount;
745    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
746
747    // Waking the AudioTrackThread is not needed as this cannot be called when active.
748}
749
750status_t AudioTrack::setMarkerPosition(uint32_t marker)
751{
752    // The only purpose of setting marker position is to get a callback
753    if (mCbf == NULL || isOffloadedOrDirect()) {
754        return INVALID_OPERATION;
755    }
756
757    AutoMutex lock(mLock);
758    mMarkerPosition = marker;
759    mMarkerReached = false;
760
761    sp<AudioTrackThread> t = mAudioTrackThread;
762    if (t != 0) {
763        t->wake();
764    }
765    return NO_ERROR;
766}
767
768status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
769{
770    if (isOffloadedOrDirect()) {
771        return INVALID_OPERATION;
772    }
773    if (marker == NULL) {
774        return BAD_VALUE;
775    }
776
777    AutoMutex lock(mLock);
778    *marker = mMarkerPosition;
779
780    return NO_ERROR;
781}
782
783status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
784{
785    // The only purpose of setting position update period is to get a callback
786    if (mCbf == NULL || isOffloadedOrDirect()) {
787        return INVALID_OPERATION;
788    }
789
790    AutoMutex lock(mLock);
791    mNewPosition = updateAndGetPosition_l() + updatePeriod;
792    mUpdatePeriod = updatePeriod;
793
794    sp<AudioTrackThread> t = mAudioTrackThread;
795    if (t != 0) {
796        t->wake();
797    }
798    return NO_ERROR;
799}
800
801status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
802{
803    if (isOffloadedOrDirect()) {
804        return INVALID_OPERATION;
805    }
806    if (updatePeriod == NULL) {
807        return BAD_VALUE;
808    }
809
810    AutoMutex lock(mLock);
811    *updatePeriod = mUpdatePeriod;
812
813    return NO_ERROR;
814}
815
816status_t AudioTrack::setPosition(uint32_t position)
817{
818    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
819        return INVALID_OPERATION;
820    }
821    if (position > mFrameCount) {
822        return BAD_VALUE;
823    }
824
825    AutoMutex lock(mLock);
826    // Currently we require that the player is inactive before setting parameters such as position
827    // or loop points.  Otherwise, there could be a race condition: the application could read the
828    // current position, compute a new position or loop parameters, and then set that position or
829    // loop parameters but it would do the "wrong" thing since the position has continued to advance
830    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
831    // to specify how it wants to handle such scenarios.
832    if (mState == STATE_ACTIVE) {
833        return INVALID_OPERATION;
834    }
835    // After setting the position, use full update period before notification.
836    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
837    mStaticProxy->setBufferPosition(position);
838
839    // Waking the AudioTrackThread is not needed as this cannot be called when active.
840    return NO_ERROR;
841}
842
843status_t AudioTrack::getPosition(uint32_t *position)
844{
845    if (position == NULL) {
846        return BAD_VALUE;
847    }
848
849    AutoMutex lock(mLock);
850    if (isOffloadedOrDirect_l()) {
851        uint32_t dspFrames = 0;
852
853        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
854            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
855            *position = mPausedPosition;
856            return NO_ERROR;
857        }
858
859        if (mOutput != AUDIO_IO_HANDLE_NONE) {
860            uint32_t halFrames;
861            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
862        }
863        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
864        // due to hardware latency. We leave this behavior for now.
865        *position = dspFrames;
866    } else {
867        if (mCblk->mFlags & CBLK_INVALID) {
868            restoreTrack_l("getPosition");
869        }
870
871        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
872        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
873                0 : updateAndGetPosition_l();
874    }
875    return NO_ERROR;
876}
877
878status_t AudioTrack::getBufferPosition(uint32_t *position)
879{
880    if (mSharedBuffer == 0 || mIsTimed) {
881        return INVALID_OPERATION;
882    }
883    if (position == NULL) {
884        return BAD_VALUE;
885    }
886
887    AutoMutex lock(mLock);
888    *position = mStaticProxy->getBufferPosition();
889    return NO_ERROR;
890}
891
892status_t AudioTrack::reload()
893{
894    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
895        return INVALID_OPERATION;
896    }
897
898    AutoMutex lock(mLock);
899    // See setPosition() regarding setting parameters such as loop points or position while active
900    if (mState == STATE_ACTIVE) {
901        return INVALID_OPERATION;
902    }
903    mNewPosition = mUpdatePeriod;
904    (void) updateAndGetPosition_l();
905    mPosition = 0;
906#if 0
907    // The documentation is not clear on the behavior of reload() and the restoration
908    // of loop count. Historically we have not restored loop count, start, end,
909    // but it makes sense if one desires to repeat playing a particular sound.
910    if (mLoopCount != 0) {
911        mLoopCountNotified = mLoopCount;
912        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
913    }
914#endif
915    mStaticProxy->setBufferPosition(0);
916    return NO_ERROR;
917}
918
919audio_io_handle_t AudioTrack::getOutput() const
920{
921    AutoMutex lock(mLock);
922    return mOutput;
923}
924
925status_t AudioTrack::attachAuxEffect(int effectId)
926{
927    AutoMutex lock(mLock);
928    status_t status = mAudioTrack->attachAuxEffect(effectId);
929    if (status == NO_ERROR) {
930        mAuxEffectId = effectId;
931    }
932    return status;
933}
934
935audio_stream_type_t AudioTrack::streamType() const
936{
937    if (mStreamType == AUDIO_STREAM_DEFAULT) {
938        return audio_attributes_to_stream_type(&mAttributes);
939    }
940    return mStreamType;
941}
942
943// -------------------------------------------------------------------------
944
945// must be called with mLock held
946status_t AudioTrack::createTrack_l()
947{
948    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
949    if (audioFlinger == 0) {
950        ALOGE("Could not get audioflinger");
951        return NO_INIT;
952    }
953
954    audio_io_handle_t output;
955    audio_stream_type_t streamType = mStreamType;
956    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
957    status_t status = AudioSystem::getOutputForAttr(attr, &output,
958                                                    (audio_session_t)mSessionId, &streamType,
959                                                    mSampleRate, mFormat, mChannelMask,
960                                                    mFlags, mOffloadInfo);
961
962
963    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
964        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
965              " channel mask %#x, flags %#x",
966              streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
967        return BAD_VALUE;
968    }
969    {
970    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
971    // we must release it ourselves if anything goes wrong.
972
973    // Not all of these values are needed under all conditions, but it is easier to get them all
974
975    uint32_t afLatency;
976    status = AudioSystem::getLatency(output, &afLatency);
977    if (status != NO_ERROR) {
978        ALOGE("getLatency(%d) failed status %d", output, status);
979        goto release;
980    }
981
982    size_t afFrameCount;
983    status = AudioSystem::getFrameCount(output, &afFrameCount);
984    if (status != NO_ERROR) {
985        ALOGE("getFrameCount(output=%d) status %d", output, status);
986        goto release;
987    }
988
989    uint32_t afSampleRate;
990    status = AudioSystem::getSamplingRate(output, &afSampleRate);
991    if (status != NO_ERROR) {
992        ALOGE("getSamplingRate(output=%d) status %d", output, status);
993        goto release;
994    }
995    if (mSampleRate == 0) {
996        mSampleRate = afSampleRate;
997    }
998    // Client decides whether the track is TIMED (see below), but can only express a preference
999    // for FAST.  Server will perform additional tests.
1000    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
1001            // either of these use cases:
1002            // use case 1: shared buffer
1003            (mSharedBuffer != 0) ||
1004            // use case 2: callback transfer mode
1005            (mTransfer == TRANSFER_CALLBACK)) &&
1006            // matching sample rate
1007            (mSampleRate == afSampleRate))) {
1008        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
1009        // once denied, do not request again if IAudioTrack is re-created
1010        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1011    }
1012    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
1013
1014    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1015    //  n = 1   fast track with single buffering; nBuffering is ignored
1016    //  n = 2   fast track with double buffering
1017    //  n = 2   normal track, (including those with sample rate conversion)
1018    //  n >= 3  very high latency or very small notification interval (unused).
1019    const uint32_t nBuffering = 2;
1020
1021    mNotificationFramesAct = mNotificationFramesReq;
1022
1023    size_t frameCount = mReqFrameCount;
1024    if (!audio_is_linear_pcm(mFormat)) {
1025
1026        if (mSharedBuffer != 0) {
1027            // Same comment as below about ignoring frameCount parameter for set()
1028            frameCount = mSharedBuffer->size();
1029        } else if (frameCount == 0) {
1030            frameCount = afFrameCount;
1031        }
1032        if (mNotificationFramesAct != frameCount) {
1033            mNotificationFramesAct = frameCount;
1034        }
1035    } else if (mSharedBuffer != 0) {
1036        // FIXME: Ensure client side memory buffers need
1037        // not have additional alignment beyond sample
1038        // (e.g. 16 bit stereo accessed as 32 bit frame).
1039        size_t alignment = audio_bytes_per_sample(mFormat);
1040        if (alignment & 1) {
1041            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1042            alignment = 1;
1043        }
1044        if (mChannelCount > 1) {
1045            // More than 2 channels does not require stronger alignment than stereo
1046            alignment <<= 1;
1047        }
1048        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1049            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1050                    mSharedBuffer->pointer(), mChannelCount);
1051            status = BAD_VALUE;
1052            goto release;
1053        }
1054
1055        // When initializing a shared buffer AudioTrack via constructors,
1056        // there's no frameCount parameter.
1057        // But when initializing a shared buffer AudioTrack via set(),
1058        // there _is_ a frameCount parameter.  We silently ignore it.
1059        frameCount = mSharedBuffer->size() / mFrameSize;
1060    } else {
1061        // For fast and normal streaming tracks,
1062        // the frame count calculations and checks are done by server
1063    }
1064
1065    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1066    if (mIsTimed) {
1067        trackFlags |= IAudioFlinger::TRACK_TIMED;
1068    }
1069
1070    pid_t tid = -1;
1071    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1072        trackFlags |= IAudioFlinger::TRACK_FAST;
1073        if (mAudioTrackThread != 0) {
1074            tid = mAudioTrackThread->getTid();
1075        }
1076    }
1077
1078    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1079        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1080    }
1081
1082    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1083        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1084    }
1085
1086    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1087                                // but we will still need the original value also
1088    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1089                                                      mSampleRate,
1090                                                      mFormat,
1091                                                      mChannelMask,
1092                                                      &temp,
1093                                                      &trackFlags,
1094                                                      mSharedBuffer,
1095                                                      output,
1096                                                      tid,
1097                                                      &mSessionId,
1098                                                      mClientUid,
1099                                                      &status);
1100
1101    if (status != NO_ERROR) {
1102        ALOGE("AudioFlinger could not create track, status: %d", status);
1103        goto release;
1104    }
1105    ALOG_ASSERT(track != 0);
1106
1107    // AudioFlinger now owns the reference to the I/O handle,
1108    // so we are no longer responsible for releasing it.
1109
1110    sp<IMemory> iMem = track->getCblk();
1111    if (iMem == 0) {
1112        ALOGE("Could not get control block");
1113        return NO_INIT;
1114    }
1115    void *iMemPointer = iMem->pointer();
1116    if (iMemPointer == NULL) {
1117        ALOGE("Could not get control block pointer");
1118        return NO_INIT;
1119    }
1120    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1121    if (mAudioTrack != 0) {
1122        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1123        mDeathNotifier.clear();
1124    }
1125    mAudioTrack = track;
1126    mCblkMemory = iMem;
1127    IPCThreadState::self()->flushCommands();
1128
1129    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1130    mCblk = cblk;
1131    // note that temp is the (possibly revised) value of frameCount
1132    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1133        // In current design, AudioTrack client checks and ensures frame count validity before
1134        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1135        // for fast track as it uses a special method of assigning frame count.
1136        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1137    }
1138    frameCount = temp;
1139
1140    mAwaitBoost = false;
1141    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1142        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1143            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1144            mAwaitBoost = true;
1145        } else {
1146            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1147            // once denied, do not request again if IAudioTrack is re-created
1148            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1149        }
1150    }
1151    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1152        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1153            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1154        } else {
1155            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1156            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1157            // FIXME This is a warning, not an error, so don't return error status
1158            //return NO_INIT;
1159        }
1160    }
1161    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1162        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1163            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1164        } else {
1165            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1166            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1167            // FIXME This is a warning, not an error, so don't return error status
1168            //return NO_INIT;
1169        }
1170    }
1171    // Make sure that application is notified with sufficient margin before underrun
1172    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1173        // Theoretically double-buffering is not required for fast tracks,
1174        // due to tighter scheduling.  But in practice, to accommodate kernels with
1175        // scheduling jitter, and apps with computation jitter, we use double-buffering
1176        // for fast tracks just like normal streaming tracks.
1177        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
1178            mNotificationFramesAct = frameCount / nBuffering;
1179        }
1180    }
1181
1182    // We retain a copy of the I/O handle, but don't own the reference
1183    mOutput = output;
1184    mRefreshRemaining = true;
1185
1186    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1187    // is the value of pointer() for the shared buffer, otherwise buffers points
1188    // immediately after the control block.  This address is for the mapping within client
1189    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1190    void* buffers;
1191    if (mSharedBuffer == 0) {
1192        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1193    } else {
1194        buffers = mSharedBuffer->pointer();
1195    }
1196
1197    mAudioTrack->attachAuxEffect(mAuxEffectId);
1198    // FIXME don't believe this lie
1199    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1200
1201    mFrameCount = frameCount;
1202    // If IAudioTrack is re-created, don't let the requested frameCount
1203    // decrease.  This can confuse clients that cache frameCount().
1204    if (frameCount > mReqFrameCount) {
1205        mReqFrameCount = frameCount;
1206    }
1207
1208    // update proxy
1209    if (mSharedBuffer == 0) {
1210        mStaticProxy.clear();
1211        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1212    } else {
1213        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1214        mProxy = mStaticProxy;
1215    }
1216
1217    mProxy->setVolumeLR(gain_minifloat_pack(
1218            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1219            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1220
1221    mProxy->setSendLevel(mSendLevel);
1222    mProxy->setSampleRate(mSampleRate);
1223    mProxy->setMinimum(mNotificationFramesAct);
1224
1225    mDeathNotifier = new DeathNotifier(this);
1226    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1227
1228    return NO_ERROR;
1229    }
1230
1231release:
1232    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1233    if (status == NO_ERROR) {
1234        status = NO_INIT;
1235    }
1236    return status;
1237}
1238
1239status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1240{
1241    if (audioBuffer == NULL) {
1242        return BAD_VALUE;
1243    }
1244    if (mTransfer != TRANSFER_OBTAIN) {
1245        audioBuffer->frameCount = 0;
1246        audioBuffer->size = 0;
1247        audioBuffer->raw = NULL;
1248        return INVALID_OPERATION;
1249    }
1250
1251    const struct timespec *requested;
1252    struct timespec timeout;
1253    if (waitCount == -1) {
1254        requested = &ClientProxy::kForever;
1255    } else if (waitCount == 0) {
1256        requested = &ClientProxy::kNonBlocking;
1257    } else if (waitCount > 0) {
1258        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1259        timeout.tv_sec = ms / 1000;
1260        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1261        requested = &timeout;
1262    } else {
1263        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1264        requested = NULL;
1265    }
1266    return obtainBuffer(audioBuffer, requested);
1267}
1268
1269status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1270        struct timespec *elapsed, size_t *nonContig)
1271{
1272    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1273    uint32_t oldSequence = 0;
1274    uint32_t newSequence;
1275
1276    Proxy::Buffer buffer;
1277    status_t status = NO_ERROR;
1278
1279    static const int32_t kMaxTries = 5;
1280    int32_t tryCounter = kMaxTries;
1281
1282    do {
1283        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1284        // keep them from going away if another thread re-creates the track during obtainBuffer()
1285        sp<AudioTrackClientProxy> proxy;
1286        sp<IMemory> iMem;
1287
1288        {   // start of lock scope
1289            AutoMutex lock(mLock);
1290
1291            newSequence = mSequence;
1292            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1293            if (status == DEAD_OBJECT) {
1294                // re-create track, unless someone else has already done so
1295                if (newSequence == oldSequence) {
1296                    status = restoreTrack_l("obtainBuffer");
1297                    if (status != NO_ERROR) {
1298                        buffer.mFrameCount = 0;
1299                        buffer.mRaw = NULL;
1300                        buffer.mNonContig = 0;
1301                        break;
1302                    }
1303                }
1304            }
1305            oldSequence = newSequence;
1306
1307            // Keep the extra references
1308            proxy = mProxy;
1309            iMem = mCblkMemory;
1310
1311            if (mState == STATE_STOPPING) {
1312                status = -EINTR;
1313                buffer.mFrameCount = 0;
1314                buffer.mRaw = NULL;
1315                buffer.mNonContig = 0;
1316                break;
1317            }
1318
1319            // Non-blocking if track is stopped or paused
1320            if (mState != STATE_ACTIVE) {
1321                requested = &ClientProxy::kNonBlocking;
1322            }
1323
1324        }   // end of lock scope
1325
1326        buffer.mFrameCount = audioBuffer->frameCount;
1327        // FIXME starts the requested timeout and elapsed over from scratch
1328        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1329
1330    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1331
1332    audioBuffer->frameCount = buffer.mFrameCount;
1333    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1334    audioBuffer->raw = buffer.mRaw;
1335    if (nonContig != NULL) {
1336        *nonContig = buffer.mNonContig;
1337    }
1338    return status;
1339}
1340
1341void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1342{
1343    // FIXME add error checking on mode, by adding an internal version
1344    if (mTransfer == TRANSFER_SHARED) {
1345        return;
1346    }
1347
1348    size_t stepCount = audioBuffer->size / mFrameSize;
1349    if (stepCount == 0) {
1350        return;
1351    }
1352
1353    Proxy::Buffer buffer;
1354    buffer.mFrameCount = stepCount;
1355    buffer.mRaw = audioBuffer->raw;
1356
1357    AutoMutex lock(mLock);
1358    mReleased += stepCount;
1359    mInUnderrun = false;
1360    mProxy->releaseBuffer(&buffer);
1361
1362    // restart track if it was disabled by audioflinger due to previous underrun
1363    if (mState == STATE_ACTIVE) {
1364        audio_track_cblk_t* cblk = mCblk;
1365        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1366            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1367            // FIXME ignoring status
1368            mAudioTrack->start();
1369        }
1370    }
1371}
1372
1373// -------------------------------------------------------------------------
1374
1375ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1376{
1377    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1378        return INVALID_OPERATION;
1379    }
1380
1381    if (isDirect()) {
1382        AutoMutex lock(mLock);
1383        int32_t flags = android_atomic_and(
1384                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1385                            &mCblk->mFlags);
1386        if (flags & CBLK_INVALID) {
1387            return DEAD_OBJECT;
1388        }
1389    }
1390
1391    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1392        // Sanity-check: user is most-likely passing an error code, and it would
1393        // make the return value ambiguous (actualSize vs error).
1394        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1395        return BAD_VALUE;
1396    }
1397
1398    size_t written = 0;
1399    Buffer audioBuffer;
1400
1401    while (userSize >= mFrameSize) {
1402        audioBuffer.frameCount = userSize / mFrameSize;
1403
1404        status_t err = obtainBuffer(&audioBuffer,
1405                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1406        if (err < 0) {
1407            if (written > 0) {
1408                break;
1409            }
1410            return ssize_t(err);
1411        }
1412
1413        size_t toWrite;
1414        toWrite = audioBuffer.size;
1415        memcpy(audioBuffer.i8, buffer, toWrite);
1416        buffer = ((const char *) buffer) + toWrite;
1417        userSize -= toWrite;
1418        written += toWrite;
1419
1420        releaseBuffer(&audioBuffer);
1421    }
1422
1423    return written;
1424}
1425
1426// -------------------------------------------------------------------------
1427
1428TimedAudioTrack::TimedAudioTrack() {
1429    mIsTimed = true;
1430}
1431
1432status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1433{
1434    AutoMutex lock(mLock);
1435    status_t result = UNKNOWN_ERROR;
1436
1437#if 1
1438    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1439    // while we are accessing the cblk
1440    sp<IAudioTrack> audioTrack = mAudioTrack;
1441    sp<IMemory> iMem = mCblkMemory;
1442#endif
1443
1444    // If the track is not invalid already, try to allocate a buffer.  alloc
1445    // fails indicating that the server is dead, flag the track as invalid so
1446    // we can attempt to restore in just a bit.
1447    audio_track_cblk_t* cblk = mCblk;
1448    if (!(cblk->mFlags & CBLK_INVALID)) {
1449        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1450        if (result == DEAD_OBJECT) {
1451            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1452        }
1453    }
1454
1455    // If the track is invalid at this point, attempt to restore it. and try the
1456    // allocation one more time.
1457    if (cblk->mFlags & CBLK_INVALID) {
1458        result = restoreTrack_l("allocateTimedBuffer");
1459
1460        if (result == NO_ERROR) {
1461            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1462        }
1463    }
1464
1465    return result;
1466}
1467
1468status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1469                                           int64_t pts)
1470{
1471    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1472    {
1473        AutoMutex lock(mLock);
1474        audio_track_cblk_t* cblk = mCblk;
1475        // restart track if it was disabled by audioflinger due to previous underrun
1476        if (buffer->size() != 0 && status == NO_ERROR &&
1477                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1478            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1479            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1480            // FIXME ignoring status
1481            mAudioTrack->start();
1482        }
1483    }
1484    return status;
1485}
1486
1487status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1488                                                TargetTimeline target)
1489{
1490    return mAudioTrack->setMediaTimeTransform(xform, target);
1491}
1492
1493// -------------------------------------------------------------------------
1494
1495nsecs_t AudioTrack::processAudioBuffer()
1496{
1497    // Currently the AudioTrack thread is not created if there are no callbacks.
1498    // Would it ever make sense to run the thread, even without callbacks?
1499    // If so, then replace this by checks at each use for mCbf != NULL.
1500    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1501
1502    mLock.lock();
1503    if (mAwaitBoost) {
1504        mAwaitBoost = false;
1505        mLock.unlock();
1506        static const int32_t kMaxTries = 5;
1507        int32_t tryCounter = kMaxTries;
1508        uint32_t pollUs = 10000;
1509        do {
1510            int policy = sched_getscheduler(0);
1511            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1512                break;
1513            }
1514            usleep(pollUs);
1515            pollUs <<= 1;
1516        } while (tryCounter-- > 0);
1517        if (tryCounter < 0) {
1518            ALOGE("did not receive expected priority boost on time");
1519        }
1520        // Run again immediately
1521        return 0;
1522    }
1523
1524    // Can only reference mCblk while locked
1525    int32_t flags = android_atomic_and(
1526        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1527
1528    // Check for track invalidation
1529    if (flags & CBLK_INVALID) {
1530        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1531        // AudioSystem cache. We should not exit here but after calling the callback so
1532        // that the upper layers can recreate the track
1533        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1534            status_t status = restoreTrack_l("processAudioBuffer");
1535            // after restoration, continue below to make sure that the loop and buffer events
1536            // are notified because they have been cleared from mCblk->mFlags above.
1537        }
1538    }
1539
1540    bool waitStreamEnd = mState == STATE_STOPPING;
1541    bool active = mState == STATE_ACTIVE;
1542
1543    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1544    bool newUnderrun = false;
1545    if (flags & CBLK_UNDERRUN) {
1546#if 0
1547        // Currently in shared buffer mode, when the server reaches the end of buffer,
1548        // the track stays active in continuous underrun state.  It's up to the application
1549        // to pause or stop the track, or set the position to a new offset within buffer.
1550        // This was some experimental code to auto-pause on underrun.   Keeping it here
1551        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1552        if (mTransfer == TRANSFER_SHARED) {
1553            mState = STATE_PAUSED;
1554            active = false;
1555        }
1556#endif
1557        if (!mInUnderrun) {
1558            mInUnderrun = true;
1559            newUnderrun = true;
1560        }
1561    }
1562
1563    // Get current position of server
1564    size_t position = updateAndGetPosition_l();
1565
1566    // Manage marker callback
1567    bool markerReached = false;
1568    size_t markerPosition = mMarkerPosition;
1569    // FIXME fails for wraparound, need 64 bits
1570    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1571        mMarkerReached = markerReached = true;
1572    }
1573
1574    // Determine number of new position callback(s) that will be needed, while locked
1575    size_t newPosCount = 0;
1576    size_t newPosition = mNewPosition;
1577    size_t updatePeriod = mUpdatePeriod;
1578    // FIXME fails for wraparound, need 64 bits
1579    if (updatePeriod > 0 && position >= newPosition) {
1580        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1581        mNewPosition += updatePeriod * newPosCount;
1582    }
1583
1584    // Cache other fields that will be needed soon
1585    uint32_t sampleRate = mSampleRate;
1586    uint32_t notificationFrames = mNotificationFramesAct;
1587    if (mRefreshRemaining) {
1588        mRefreshRemaining = false;
1589        mRemainingFrames = notificationFrames;
1590        mRetryOnPartialBuffer = false;
1591    }
1592    size_t misalignment = mProxy->getMisalignment();
1593    uint32_t sequence = mSequence;
1594    sp<AudioTrackClientProxy> proxy = mProxy;
1595
1596    // Determine the number of new loop callback(s) that will be needed, while locked.
1597    int loopCountNotifications = 0;
1598    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1599
1600    if (mLoopCount > 0) {
1601        int loopCount;
1602        size_t bufferPosition;
1603        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1604        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1605        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1606        mLoopCountNotified = loopCount; // discard any excess notifications
1607    } else if (mLoopCount < 0) {
1608        // FIXME: We're not accurate with notification count and position with infinite looping
1609        // since loopCount from server side will always return -1 (we could decrement it).
1610        size_t bufferPosition = mStaticProxy->getBufferPosition();
1611        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1612        loopPeriod = mLoopEnd - bufferPosition;
1613    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1614        size_t bufferPosition = mStaticProxy->getBufferPosition();
1615        loopPeriod = mFrameCount - bufferPosition;
1616    }
1617
1618    // These fields don't need to be cached, because they are assigned only by set():
1619    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1620    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1621
1622    mLock.unlock();
1623
1624    if (waitStreamEnd) {
1625        struct timespec timeout;
1626        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1627        timeout.tv_nsec = 0;
1628
1629        status_t status = proxy->waitStreamEndDone(&timeout);
1630        switch (status) {
1631        case NO_ERROR:
1632        case DEAD_OBJECT:
1633        case TIMED_OUT:
1634            mCbf(EVENT_STREAM_END, mUserData, NULL);
1635            {
1636                AutoMutex lock(mLock);
1637                // The previously assigned value of waitStreamEnd is no longer valid,
1638                // since the mutex has been unlocked and either the callback handler
1639                // or another thread could have re-started the AudioTrack during that time.
1640                waitStreamEnd = mState == STATE_STOPPING;
1641                if (waitStreamEnd) {
1642                    mState = STATE_STOPPED;
1643                    mReleased = 0;
1644                }
1645            }
1646            if (waitStreamEnd && status != DEAD_OBJECT) {
1647               return NS_INACTIVE;
1648            }
1649            break;
1650        }
1651        return 0;
1652    }
1653
1654    // perform callbacks while unlocked
1655    if (newUnderrun) {
1656        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1657    }
1658    while (loopCountNotifications > 0) {
1659        mCbf(EVENT_LOOP_END, mUserData, NULL);
1660        --loopCountNotifications;
1661    }
1662    if (flags & CBLK_BUFFER_END) {
1663        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1664    }
1665    if (markerReached) {
1666        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1667    }
1668    while (newPosCount > 0) {
1669        size_t temp = newPosition;
1670        mCbf(EVENT_NEW_POS, mUserData, &temp);
1671        newPosition += updatePeriod;
1672        newPosCount--;
1673    }
1674
1675    if (mObservedSequence != sequence) {
1676        mObservedSequence = sequence;
1677        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1678        // for offloaded tracks, just wait for the upper layers to recreate the track
1679        if (isOffloadedOrDirect()) {
1680            return NS_INACTIVE;
1681        }
1682    }
1683
1684    // if inactive, then don't run me again until re-started
1685    if (!active) {
1686        return NS_INACTIVE;
1687    }
1688
1689    // Compute the estimated time until the next timed event (position, markers, loops)
1690    // FIXME only for non-compressed audio
1691    uint32_t minFrames = ~0;
1692    if (!markerReached && position < markerPosition) {
1693        minFrames = markerPosition - position;
1694    }
1695    if (loopPeriod > 0 && loopPeriod < minFrames) {
1696        // loopPeriod is already adjusted for actual position.
1697        minFrames = loopPeriod;
1698    }
1699    if (updatePeriod > 0) {
1700        minFrames = min(minFrames, uint32_t(newPosition - position));
1701    }
1702
1703    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1704    static const uint32_t kPoll = 0;
1705    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1706        minFrames = kPoll * notificationFrames;
1707    }
1708
1709    // Convert frame units to time units
1710    nsecs_t ns = NS_WHENEVER;
1711    if (minFrames != (uint32_t) ~0) {
1712        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1713        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1714        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1715    }
1716
1717    // If not supplying data by EVENT_MORE_DATA, then we're done
1718    if (mTransfer != TRANSFER_CALLBACK) {
1719        return ns;
1720    }
1721
1722    struct timespec timeout;
1723    const struct timespec *requested = &ClientProxy::kForever;
1724    if (ns != NS_WHENEVER) {
1725        timeout.tv_sec = ns / 1000000000LL;
1726        timeout.tv_nsec = ns % 1000000000LL;
1727        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1728        requested = &timeout;
1729    }
1730
1731    while (mRemainingFrames > 0) {
1732
1733        Buffer audioBuffer;
1734        audioBuffer.frameCount = mRemainingFrames;
1735        size_t nonContig;
1736        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1737        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1738                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1739        requested = &ClientProxy::kNonBlocking;
1740        size_t avail = audioBuffer.frameCount + nonContig;
1741        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1742                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1743        if (err != NO_ERROR) {
1744            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1745                    (isOffloaded() && (err == DEAD_OBJECT))) {
1746                return 0;
1747            }
1748            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1749            return NS_NEVER;
1750        }
1751
1752        if (mRetryOnPartialBuffer && !isOffloaded()) {
1753            mRetryOnPartialBuffer = false;
1754            if (avail < mRemainingFrames) {
1755                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1756                if (ns < 0 || myns < ns) {
1757                    ns = myns;
1758                }
1759                return ns;
1760            }
1761        }
1762
1763        size_t reqSize = audioBuffer.size;
1764        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1765        size_t writtenSize = audioBuffer.size;
1766
1767        // Sanity check on returned size
1768        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1769            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1770                    reqSize, ssize_t(writtenSize));
1771            return NS_NEVER;
1772        }
1773
1774        if (writtenSize == 0) {
1775            // The callback is done filling buffers
1776            // Keep this thread going to handle timed events and
1777            // still try to get more data in intervals of WAIT_PERIOD_MS
1778            // but don't just loop and block the CPU, so wait
1779            return WAIT_PERIOD_MS * 1000000LL;
1780        }
1781
1782        size_t releasedFrames = audioBuffer.size / mFrameSize;
1783        audioBuffer.frameCount = releasedFrames;
1784        mRemainingFrames -= releasedFrames;
1785        if (misalignment >= releasedFrames) {
1786            misalignment -= releasedFrames;
1787        } else {
1788            misalignment = 0;
1789        }
1790
1791        releaseBuffer(&audioBuffer);
1792
1793        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1794        // if callback doesn't like to accept the full chunk
1795        if (writtenSize < reqSize) {
1796            continue;
1797        }
1798
1799        // There could be enough non-contiguous frames available to satisfy the remaining request
1800        if (mRemainingFrames <= nonContig) {
1801            continue;
1802        }
1803
1804#if 0
1805        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1806        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1807        // that total to a sum == notificationFrames.
1808        if (0 < misalignment && misalignment <= mRemainingFrames) {
1809            mRemainingFrames = misalignment;
1810            return (mRemainingFrames * 1100000000LL) / sampleRate;
1811        }
1812#endif
1813
1814    }
1815    mRemainingFrames = notificationFrames;
1816    mRetryOnPartialBuffer = true;
1817
1818    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1819    return 0;
1820}
1821
1822status_t AudioTrack::restoreTrack_l(const char *from)
1823{
1824    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1825          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1826    ++mSequence;
1827    status_t result;
1828
1829    // refresh the audio configuration cache in this process to make sure we get new
1830    // output parameters and new IAudioFlinger in createTrack_l()
1831    AudioSystem::clearAudioConfigCache();
1832
1833    if (isOffloadedOrDirect_l()) {
1834        // FIXME re-creation of offloaded tracks is not yet implemented
1835        return DEAD_OBJECT;
1836    }
1837
1838    // save the old static buffer position
1839    size_t bufferPosition = 0;
1840    int loopCount = 0;
1841    if (mStaticProxy != 0) {
1842        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1843    }
1844
1845    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1846    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1847    // It will also delete the strong references on previous IAudioTrack and IMemory.
1848    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1849    result = createTrack_l();
1850
1851    // take the frames that will be lost by track recreation into account in saved position
1852    // For streaming tracks, this is the amount we obtained from the user/client
1853    // (not the number actually consumed at the server - those are already lost).
1854    (void) updateAndGetPosition_l();
1855    if (mStaticProxy != 0) {
1856        mPosition = mReleased;
1857    }
1858
1859    if (result == NO_ERROR) {
1860        // Continue playback from last known position and restore loop.
1861        if (mStaticProxy != 0) {
1862            if (loopCount != 0) {
1863                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
1864                        mLoopStart, mLoopEnd, loopCount);
1865            } else {
1866                mStaticProxy->setBufferPosition(bufferPosition);
1867                if (bufferPosition == mFrameCount) {
1868                    ALOGD("restoring track at end of static buffer");
1869                }
1870            }
1871        }
1872        if (mState == STATE_ACTIVE) {
1873            result = mAudioTrack->start();
1874        }
1875    }
1876    if (result != NO_ERROR) {
1877        ALOGW("restoreTrack_l() failed status %d", result);
1878        mState = STATE_STOPPED;
1879        mReleased = 0;
1880    }
1881
1882    return result;
1883}
1884
1885uint32_t AudioTrack::updateAndGetPosition_l()
1886{
1887    // This is the sole place to read server consumed frames
1888    uint32_t newServer = mProxy->getPosition();
1889    int32_t delta = newServer - mServer;
1890    mServer = newServer;
1891    // TODO There is controversy about whether there can be "negative jitter" in server position.
1892    //      This should be investigated further, and if possible, it should be addressed.
1893    //      A more definite failure mode is infrequent polling by client.
1894    //      One could call (void)getPosition_l() in releaseBuffer(),
1895    //      so mReleased and mPosition are always lock-step as best possible.
1896    //      That should ensure delta never goes negative for infrequent polling
1897    //      unless the server has more than 2^31 frames in its buffer,
1898    //      in which case the use of uint32_t for these counters has bigger issues.
1899    if (delta < 0) {
1900        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1901        delta = 0;
1902    }
1903    return mPosition += (uint32_t) delta;
1904}
1905
1906status_t AudioTrack::setParameters(const String8& keyValuePairs)
1907{
1908    AutoMutex lock(mLock);
1909    return mAudioTrack->setParameters(keyValuePairs);
1910}
1911
1912status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1913{
1914    AutoMutex lock(mLock);
1915    // FIXME not implemented for fast tracks; should use proxy and SSQ
1916    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1917        return INVALID_OPERATION;
1918    }
1919
1920    switch (mState) {
1921    case STATE_ACTIVE:
1922    case STATE_PAUSED:
1923        break; // handle below
1924    case STATE_FLUSHED:
1925    case STATE_STOPPED:
1926        return WOULD_BLOCK;
1927    case STATE_STOPPING:
1928    case STATE_PAUSED_STOPPING:
1929        if (!isOffloaded_l()) {
1930            return INVALID_OPERATION;
1931        }
1932        break; // offloaded tracks handled below
1933    default:
1934        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1935        break;
1936    }
1937
1938    if (mCblk->mFlags & CBLK_INVALID) {
1939        restoreTrack_l("getTimestamp");
1940    }
1941
1942    // The presented frame count must always lag behind the consumed frame count.
1943    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1944    status_t status = mAudioTrack->getTimestamp(timestamp);
1945    if (status != NO_ERROR) {
1946        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1947        return status;
1948    }
1949    if (isOffloadedOrDirect_l()) {
1950        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1951            // use cached paused position in case another offloaded track is running.
1952            timestamp.mPosition = mPausedPosition;
1953            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1954            return NO_ERROR;
1955        }
1956
1957        // Check whether a pending flush or stop has completed, as those commands may
1958        // be asynchronous or return near finish.
1959        if (mStartUs != 0 && mSampleRate != 0) {
1960            static const int kTimeJitterUs = 100000; // 100 ms
1961            static const int k1SecUs = 1000000;
1962
1963            const int64_t timeNow = getNowUs();
1964
1965            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1966                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1967                if (timestampTimeUs < mStartUs) {
1968                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1969                }
1970                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1971                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1972
1973                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1974                    // Verify that the counter can't count faster than the sample rate
1975                    // since the start time.  If greater, then that means we have failed
1976                    // to completely flush or stop the previous playing track.
1977                    ALOGW("incomplete flush or stop:"
1978                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1979                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
1980                            timestamp.mPosition);
1981                    return WOULD_BLOCK;
1982                }
1983            }
1984            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
1985        }
1986    } else {
1987        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
1988        (void) updateAndGetPosition_l();
1989        // Server consumed (mServer) and presented both use the same server time base,
1990        // and server consumed is always >= presented.
1991        // The delta between these represents the number of frames in the buffer pipeline.
1992        // If this delta between these is greater than the client position, it means that
1993        // actually presented is still stuck at the starting line (figuratively speaking),
1994        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
1995        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
1996            return INVALID_OPERATION;
1997        }
1998        // Convert timestamp position from server time base to client time base.
1999        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2000        // But if we change it to 64-bit then this could fail.
2001        // If (mPosition - mServer) can be negative then should use:
2002        //   (int32_t)(mPosition - mServer)
2003        timestamp.mPosition += mPosition - mServer;
2004        // Immediately after a call to getPosition_l(), mPosition and
2005        // mServer both represent the same frame position.  mPosition is
2006        // in client's point of view, and mServer is in server's point of
2007        // view.  So the difference between them is the "fudge factor"
2008        // between client and server views due to stop() and/or new
2009        // IAudioTrack.  And timestamp.mPosition is initially in server's
2010        // point of view, so we need to apply the same fudge factor to it.
2011    }
2012    return status;
2013}
2014
2015String8 AudioTrack::getParameters(const String8& keys)
2016{
2017    audio_io_handle_t output = getOutput();
2018    if (output != AUDIO_IO_HANDLE_NONE) {
2019        return AudioSystem::getParameters(output, keys);
2020    } else {
2021        return String8::empty();
2022    }
2023}
2024
2025bool AudioTrack::isOffloaded() const
2026{
2027    AutoMutex lock(mLock);
2028    return isOffloaded_l();
2029}
2030
2031bool AudioTrack::isDirect() const
2032{
2033    AutoMutex lock(mLock);
2034    return isDirect_l();
2035}
2036
2037bool AudioTrack::isOffloadedOrDirect() const
2038{
2039    AutoMutex lock(mLock);
2040    return isOffloadedOrDirect_l();
2041}
2042
2043
2044status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2045{
2046
2047    const size_t SIZE = 256;
2048    char buffer[SIZE];
2049    String8 result;
2050
2051    result.append(" AudioTrack::dump\n");
2052    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2053            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2054    result.append(buffer);
2055    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2056            mChannelCount, mFrameCount);
2057    result.append(buffer);
2058    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2059    result.append(buffer);
2060    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2061    result.append(buffer);
2062    ::write(fd, result.string(), result.size());
2063    return NO_ERROR;
2064}
2065
2066uint32_t AudioTrack::getUnderrunFrames() const
2067{
2068    AutoMutex lock(mLock);
2069    return mProxy->getUnderrunFrames();
2070}
2071
2072// =========================================================================
2073
2074void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2075{
2076    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2077    if (audioTrack != 0) {
2078        AutoMutex lock(audioTrack->mLock);
2079        audioTrack->mProxy->binderDied();
2080    }
2081}
2082
2083// =========================================================================
2084
2085AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2086    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2087      mIgnoreNextPausedInt(false)
2088{
2089}
2090
2091AudioTrack::AudioTrackThread::~AudioTrackThread()
2092{
2093}
2094
2095bool AudioTrack::AudioTrackThread::threadLoop()
2096{
2097    {
2098        AutoMutex _l(mMyLock);
2099        if (mPaused) {
2100            mMyCond.wait(mMyLock);
2101            // caller will check for exitPending()
2102            return true;
2103        }
2104        if (mIgnoreNextPausedInt) {
2105            mIgnoreNextPausedInt = false;
2106            mPausedInt = false;
2107        }
2108        if (mPausedInt) {
2109            if (mPausedNs > 0) {
2110                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2111            } else {
2112                mMyCond.wait(mMyLock);
2113            }
2114            mPausedInt = false;
2115            return true;
2116        }
2117    }
2118    if (exitPending()) {
2119        return false;
2120    }
2121    nsecs_t ns = mReceiver.processAudioBuffer();
2122    switch (ns) {
2123    case 0:
2124        return true;
2125    case NS_INACTIVE:
2126        pauseInternal();
2127        return true;
2128    case NS_NEVER:
2129        return false;
2130    case NS_WHENEVER:
2131        // Event driven: call wake() when callback notifications conditions change.
2132        ns = INT64_MAX;
2133        // fall through
2134    default:
2135        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2136        pauseInternal(ns);
2137        return true;
2138    }
2139}
2140
2141void AudioTrack::AudioTrackThread::requestExit()
2142{
2143    // must be in this order to avoid a race condition
2144    Thread::requestExit();
2145    resume();
2146}
2147
2148void AudioTrack::AudioTrackThread::pause()
2149{
2150    AutoMutex _l(mMyLock);
2151    mPaused = true;
2152}
2153
2154void AudioTrack::AudioTrackThread::resume()
2155{
2156    AutoMutex _l(mMyLock);
2157    mIgnoreNextPausedInt = true;
2158    if (mPaused || mPausedInt) {
2159        mPaused = false;
2160        mPausedInt = false;
2161        mMyCond.signal();
2162    }
2163}
2164
2165void AudioTrack::AudioTrackThread::wake()
2166{
2167    AutoMutex _l(mMyLock);
2168    if (!mPaused && mPausedInt && mPausedNs > 0) {
2169        // audio track is active and internally paused with timeout.
2170        mIgnoreNextPausedInt = true;
2171        mPausedInt = false;
2172        mMyCond.signal();
2173    }
2174}
2175
2176void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2177{
2178    AutoMutex _l(mMyLock);
2179    mPausedInt = true;
2180    mPausedNs = ns;
2181}
2182
2183}; // namespace android
2184