AudioTrack.cpp revision 3bcffa136909c1fb6e88ee4efd12ccac18360a85
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <math.h>
23#include <sys/resource.h>
24#include <audio_utils/primitives.h>
25#include <binder/IPCThreadState.h>
26#include <media/AudioTrack.h>
27#include <utils/Log.h>
28#include <private/media/AudioTrackShared.h>
29#include <media/IAudioFlinger.h>
30
31#define WAIT_PERIOD_MS                  10
32#define WAIT_STREAM_END_TIMEOUT_SEC     120
33
34
35namespace android {
36// ---------------------------------------------------------------------------
37
38// static
39status_t AudioTrack::getMinFrameCount(
40        size_t* frameCount,
41        audio_stream_type_t streamType,
42        uint32_t sampleRate)
43{
44    if (frameCount == NULL) {
45        return BAD_VALUE;
46    }
47
48    // FIXME merge with similar code in createTrack_l(), except we're missing
49    //       some information here that is available in createTrack_l():
50    //          audio_io_handle_t output
51    //          audio_format_t format
52    //          audio_channel_mask_t channelMask
53    //          audio_output_flags_t flags
54    uint32_t afSampleRate;
55    status_t status;
56    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
57    if (status != NO_ERROR) {
58        ALOGE("Unable to query output sample rate for stream type %d; status %d",
59                streamType, status);
60        return status;
61    }
62    size_t afFrameCount;
63    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
64    if (status != NO_ERROR) {
65        ALOGE("Unable to query output frame count for stream type %d; status %d",
66                streamType, status);
67        return status;
68    }
69    uint32_t afLatency;
70    status = AudioSystem::getOutputLatency(&afLatency, streamType);
71    if (status != NO_ERROR) {
72        ALOGE("Unable to query output latency for stream type %d; status %d",
73                streamType, status);
74        return status;
75    }
76
77    // Ensure that buffer depth covers at least audio hardware latency
78    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
79    if (minBufCount < 2) {
80        minBufCount = 2;
81    }
82
83    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
84            afFrameCount * minBufCount * sampleRate / afSampleRate;
85    // The formula above should always produce a non-zero value, but return an error
86    // in the unlikely event that it does not, as that's part of the API contract.
87    if (*frameCount == 0) {
88        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
89                streamType, sampleRate);
90        return BAD_VALUE;
91    }
92    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
93            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
94    return NO_ERROR;
95}
96
97// ---------------------------------------------------------------------------
98
99AudioTrack::AudioTrack()
100    : mStatus(NO_INIT),
101      mIsTimed(false),
102      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
103      mPreviousSchedulingGroup(SP_DEFAULT),
104      mPausedPosition(0)
105{
106}
107
108AudioTrack::AudioTrack(
109        audio_stream_type_t streamType,
110        uint32_t sampleRate,
111        audio_format_t format,
112        audio_channel_mask_t channelMask,
113        size_t frameCount,
114        audio_output_flags_t flags,
115        callback_t cbf,
116        void* user,
117        uint32_t notificationFrames,
118        int sessionId,
119        transfer_type transferType,
120        const audio_offload_info_t *offloadInfo,
121        int uid,
122        pid_t pid)
123    : mStatus(NO_INIT),
124      mIsTimed(false),
125      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
126      mPreviousSchedulingGroup(SP_DEFAULT),
127      mPausedPosition(0)
128{
129    mStatus = set(streamType, sampleRate, format, channelMask,
130            frameCount, flags, cbf, user, notificationFrames,
131            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
132            offloadInfo, uid, pid);
133}
134
135AudioTrack::AudioTrack(
136        audio_stream_type_t streamType,
137        uint32_t sampleRate,
138        audio_format_t format,
139        audio_channel_mask_t channelMask,
140        const sp<IMemory>& sharedBuffer,
141        audio_output_flags_t flags,
142        callback_t cbf,
143        void* user,
144        uint32_t notificationFrames,
145        int sessionId,
146        transfer_type transferType,
147        const audio_offload_info_t *offloadInfo,
148        int uid,
149        pid_t pid)
150    : mStatus(NO_INIT),
151      mIsTimed(false),
152      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
153      mPreviousSchedulingGroup(SP_DEFAULT),
154      mPausedPosition(0)
155{
156    mStatus = set(streamType, sampleRate, format, channelMask,
157            0 /*frameCount*/, flags, cbf, user, notificationFrames,
158            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
159            uid, pid);
160}
161
162AudioTrack::~AudioTrack()
163{
164    if (mStatus == NO_ERROR) {
165        // Make sure that callback function exits in the case where
166        // it is looping on buffer full condition in obtainBuffer().
167        // Otherwise the callback thread will never exit.
168        stop();
169        if (mAudioTrackThread != 0) {
170            mProxy->interrupt();
171            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
172            mAudioTrackThread->requestExitAndWait();
173            mAudioTrackThread.clear();
174        }
175        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
176        mAudioTrack.clear();
177        mCblkMemory.clear();
178        mSharedBuffer.clear();
179        IPCThreadState::self()->flushCommands();
180        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
181                IPCThreadState::self()->getCallingPid(), mClientPid);
182        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
183    }
184}
185
186status_t AudioTrack::set(
187        audio_stream_type_t streamType,
188        uint32_t sampleRate,
189        audio_format_t format,
190        audio_channel_mask_t channelMask,
191        size_t frameCount,
192        audio_output_flags_t flags,
193        callback_t cbf,
194        void* user,
195        uint32_t notificationFrames,
196        const sp<IMemory>& sharedBuffer,
197        bool threadCanCallJava,
198        int sessionId,
199        transfer_type transferType,
200        const audio_offload_info_t *offloadInfo,
201        int uid,
202        pid_t pid)
203{
204    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
205          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
206          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
207          sessionId, transferType);
208
209    switch (transferType) {
210    case TRANSFER_DEFAULT:
211        if (sharedBuffer != 0) {
212            transferType = TRANSFER_SHARED;
213        } else if (cbf == NULL || threadCanCallJava) {
214            transferType = TRANSFER_SYNC;
215        } else {
216            transferType = TRANSFER_CALLBACK;
217        }
218        break;
219    case TRANSFER_CALLBACK:
220        if (cbf == NULL || sharedBuffer != 0) {
221            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
222            return BAD_VALUE;
223        }
224        break;
225    case TRANSFER_OBTAIN:
226    case TRANSFER_SYNC:
227        if (sharedBuffer != 0) {
228            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
229            return BAD_VALUE;
230        }
231        break;
232    case TRANSFER_SHARED:
233        if (sharedBuffer == 0) {
234            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
235            return BAD_VALUE;
236        }
237        break;
238    default:
239        ALOGE("Invalid transfer type %d", transferType);
240        return BAD_VALUE;
241    }
242    mSharedBuffer = sharedBuffer;
243    mTransfer = transferType;
244
245    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
246            sharedBuffer->size());
247
248    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
249
250    AutoMutex lock(mLock);
251
252    // invariant that mAudioTrack != 0 is true only after set() returns successfully
253    if (mAudioTrack != 0) {
254        ALOGE("Track already in use");
255        return INVALID_OPERATION;
256    }
257
258    // handle default values first.
259    if (streamType == AUDIO_STREAM_DEFAULT) {
260        streamType = AUDIO_STREAM_MUSIC;
261    }
262    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
263        ALOGE("Invalid stream type %d", streamType);
264        return BAD_VALUE;
265    }
266    mStreamType = streamType;
267
268    status_t status;
269    if (sampleRate == 0) {
270        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
271        if (status != NO_ERROR) {
272            ALOGE("Could not get output sample rate for stream type %d; status %d",
273                    streamType, status);
274            return status;
275        }
276    }
277    mSampleRate = sampleRate;
278
279    // these below should probably come from the audioFlinger too...
280    if (format == AUDIO_FORMAT_DEFAULT) {
281        format = AUDIO_FORMAT_PCM_16_BIT;
282    }
283
284    // validate parameters
285    if (!audio_is_valid_format(format)) {
286        ALOGE("Invalid format %#x", format);
287        return BAD_VALUE;
288    }
289    mFormat = format;
290
291    if (!audio_is_output_channel(channelMask)) {
292        ALOGE("Invalid channel mask %#x", channelMask);
293        return BAD_VALUE;
294    }
295    mChannelMask = channelMask;
296    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
297    mChannelCount = channelCount;
298
299    // AudioFlinger does not currently support 8-bit data in shared memory
300    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
301        ALOGE("8-bit data in shared memory is not supported");
302        return BAD_VALUE;
303    }
304
305    // force direct flag if format is not linear PCM
306    // or offload was requested
307    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
308            || !audio_is_linear_pcm(format)) {
309        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
310                    ? "Offload request, forcing to Direct Output"
311                    : "Not linear PCM, forcing to Direct Output");
312        flags = (audio_output_flags_t)
313                // FIXME why can't we allow direct AND fast?
314                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
315    }
316    // only allow deep buffering for music stream type
317    if (streamType != AUDIO_STREAM_MUSIC) {
318        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
319    }
320
321    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
322        if (audio_is_linear_pcm(format)) {
323            mFrameSize = channelCount * audio_bytes_per_sample(format);
324        } else {
325            mFrameSize = sizeof(uint8_t);
326        }
327        mFrameSizeAF = mFrameSize;
328    } else {
329        ALOG_ASSERT(audio_is_linear_pcm(format));
330        mFrameSize = channelCount * audio_bytes_per_sample(format);
331        mFrameSizeAF = channelCount * audio_bytes_per_sample(
332                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
333        // createTrack will return an error if PCM format is not supported by server,
334        // so no need to check for specific PCM formats here
335    }
336
337    // Make copy of input parameter offloadInfo so that in the future:
338    //  (a) createTrack_l doesn't need it as an input parameter
339    //  (b) we can support re-creation of offloaded tracks
340    if (offloadInfo != NULL) {
341        mOffloadInfoCopy = *offloadInfo;
342        mOffloadInfo = &mOffloadInfoCopy;
343    } else {
344        mOffloadInfo = NULL;
345    }
346
347    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
348    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
349    mSendLevel = 0.0f;
350    // mFrameCount is initialized in createTrack_l
351    mReqFrameCount = frameCount;
352    mNotificationFramesReq = notificationFrames;
353    mNotificationFramesAct = 0;
354    mSessionId = sessionId;
355    int callingpid = IPCThreadState::self()->getCallingPid();
356    int mypid = getpid();
357    if (uid == -1 || (callingpid != mypid)) {
358        mClientUid = IPCThreadState::self()->getCallingUid();
359    } else {
360        mClientUid = uid;
361    }
362    if (pid == -1 || (callingpid != mypid)) {
363        mClientPid = callingpid;
364    } else {
365        mClientPid = pid;
366    }
367    mAuxEffectId = 0;
368    mFlags = flags;
369    mCbf = cbf;
370
371    if (cbf != NULL) {
372        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
373        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
374    }
375
376    // create the IAudioTrack
377    status = createTrack_l(0 /*epoch*/);
378
379    if (status != NO_ERROR) {
380        if (mAudioTrackThread != 0) {
381            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
382            mAudioTrackThread->requestExitAndWait();
383            mAudioTrackThread.clear();
384        }
385        return status;
386    }
387
388    mStatus = NO_ERROR;
389    mState = STATE_STOPPED;
390    mUserData = user;
391    mLoopPeriod = 0;
392    mMarkerPosition = 0;
393    mMarkerReached = false;
394    mNewPosition = 0;
395    mUpdatePeriod = 0;
396    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
397    mSequence = 1;
398    mObservedSequence = mSequence;
399    mInUnderrun = false;
400
401    return NO_ERROR;
402}
403
404// -------------------------------------------------------------------------
405
406status_t AudioTrack::start()
407{
408    AutoMutex lock(mLock);
409
410    if (mState == STATE_ACTIVE) {
411        return INVALID_OPERATION;
412    }
413
414    mInUnderrun = true;
415
416    State previousState = mState;
417    if (previousState == STATE_PAUSED_STOPPING) {
418        mState = STATE_STOPPING;
419    } else {
420        mState = STATE_ACTIVE;
421    }
422    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
423        // reset current position as seen by client to 0
424        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
425        // force refresh of remaining frames by processAudioBuffer() as last
426        // write before stop could be partial.
427        mRefreshRemaining = true;
428    }
429    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
430    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
431
432    sp<AudioTrackThread> t = mAudioTrackThread;
433    if (t != 0) {
434        if (previousState == STATE_STOPPING) {
435            mProxy->interrupt();
436        } else {
437            t->resume();
438        }
439    } else {
440        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
441        get_sched_policy(0, &mPreviousSchedulingGroup);
442        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
443    }
444
445    status_t status = NO_ERROR;
446    if (!(flags & CBLK_INVALID)) {
447        status = mAudioTrack->start();
448        if (status == DEAD_OBJECT) {
449            flags |= CBLK_INVALID;
450        }
451    }
452    if (flags & CBLK_INVALID) {
453        status = restoreTrack_l("start");
454    }
455
456    if (status != NO_ERROR) {
457        ALOGE("start() status %d", status);
458        mState = previousState;
459        if (t != 0) {
460            if (previousState != STATE_STOPPING) {
461                t->pause();
462            }
463        } else {
464            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
465            set_sched_policy(0, mPreviousSchedulingGroup);
466        }
467    }
468
469    return status;
470}
471
472void AudioTrack::stop()
473{
474    AutoMutex lock(mLock);
475    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
476        return;
477    }
478
479    if (isOffloaded_l()) {
480        mState = STATE_STOPPING;
481    } else {
482        mState = STATE_STOPPED;
483    }
484
485    mProxy->interrupt();
486    mAudioTrack->stop();
487    // the playback head position will reset to 0, so if a marker is set, we need
488    // to activate it again
489    mMarkerReached = false;
490#if 0
491    // Force flush if a shared buffer is used otherwise audioflinger
492    // will not stop before end of buffer is reached.
493    // It may be needed to make sure that we stop playback, likely in case looping is on.
494    if (mSharedBuffer != 0) {
495        flush_l();
496    }
497#endif
498
499    sp<AudioTrackThread> t = mAudioTrackThread;
500    if (t != 0) {
501        if (!isOffloaded_l()) {
502            t->pause();
503        }
504    } else {
505        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
506        set_sched_policy(0, mPreviousSchedulingGroup);
507    }
508}
509
510bool AudioTrack::stopped() const
511{
512    AutoMutex lock(mLock);
513    return mState != STATE_ACTIVE;
514}
515
516void AudioTrack::flush()
517{
518    if (mSharedBuffer != 0) {
519        return;
520    }
521    AutoMutex lock(mLock);
522    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
523        return;
524    }
525    flush_l();
526}
527
528void AudioTrack::flush_l()
529{
530    ALOG_ASSERT(mState != STATE_ACTIVE);
531
532    // clear playback marker and periodic update counter
533    mMarkerPosition = 0;
534    mMarkerReached = false;
535    mUpdatePeriod = 0;
536    mRefreshRemaining = true;
537
538    mState = STATE_FLUSHED;
539    if (isOffloaded_l()) {
540        mProxy->interrupt();
541    }
542    mProxy->flush();
543    mAudioTrack->flush();
544}
545
546void AudioTrack::pause()
547{
548    AutoMutex lock(mLock);
549    if (mState == STATE_ACTIVE) {
550        mState = STATE_PAUSED;
551    } else if (mState == STATE_STOPPING) {
552        mState = STATE_PAUSED_STOPPING;
553    } else {
554        return;
555    }
556    mProxy->interrupt();
557    mAudioTrack->pause();
558
559    if (isOffloaded_l()) {
560        if (mOutput != AUDIO_IO_HANDLE_NONE) {
561            uint32_t halFrames;
562            // OffloadThread sends HAL pause in its threadLoop.. time saved
563            // here can be slightly off
564            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
565            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
566        }
567    }
568}
569
570status_t AudioTrack::setVolume(float left, float right)
571{
572    // This duplicates a test by AudioTrack JNI, but that is not the only caller
573    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
574            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
575        return BAD_VALUE;
576    }
577
578    AutoMutex lock(mLock);
579    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
580    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
581
582    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
583
584    if (isOffloaded_l()) {
585        mAudioTrack->signal();
586    }
587    return NO_ERROR;
588}
589
590status_t AudioTrack::setVolume(float volume)
591{
592    return setVolume(volume, volume);
593}
594
595status_t AudioTrack::setAuxEffectSendLevel(float level)
596{
597    // This duplicates a test by AudioTrack JNI, but that is not the only caller
598    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
599        return BAD_VALUE;
600    }
601
602    AutoMutex lock(mLock);
603    mSendLevel = level;
604    mProxy->setSendLevel(level);
605
606    return NO_ERROR;
607}
608
609void AudioTrack::getAuxEffectSendLevel(float* level) const
610{
611    if (level != NULL) {
612        *level = mSendLevel;
613    }
614}
615
616status_t AudioTrack::setSampleRate(uint32_t rate)
617{
618    if (mIsTimed || isOffloaded()) {
619        return INVALID_OPERATION;
620    }
621
622    uint32_t afSamplingRate;
623    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
624        return NO_INIT;
625    }
626    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
627    if (rate == 0 || rate > afSamplingRate*2 ) {
628        return BAD_VALUE;
629    }
630
631    AutoMutex lock(mLock);
632    mSampleRate = rate;
633    mProxy->setSampleRate(rate);
634
635    return NO_ERROR;
636}
637
638uint32_t AudioTrack::getSampleRate() const
639{
640    if (mIsTimed) {
641        return 0;
642    }
643
644    AutoMutex lock(mLock);
645
646    // sample rate can be updated during playback by the offloaded decoder so we need to
647    // query the HAL and update if needed.
648// FIXME use Proxy return channel to update the rate from server and avoid polling here
649    if (isOffloaded_l()) {
650        if (mOutput != AUDIO_IO_HANDLE_NONE) {
651            uint32_t sampleRate = 0;
652            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
653            if (status == NO_ERROR) {
654                mSampleRate = sampleRate;
655            }
656        }
657    }
658    return mSampleRate;
659}
660
661status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
662{
663    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
664        return INVALID_OPERATION;
665    }
666
667    if (loopCount == 0) {
668        ;
669    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
670            loopEnd - loopStart >= MIN_LOOP) {
671        ;
672    } else {
673        return BAD_VALUE;
674    }
675
676    AutoMutex lock(mLock);
677    // See setPosition() regarding setting parameters such as loop points or position while active
678    if (mState == STATE_ACTIVE) {
679        return INVALID_OPERATION;
680    }
681    setLoop_l(loopStart, loopEnd, loopCount);
682    return NO_ERROR;
683}
684
685void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
686{
687    // FIXME If setting a loop also sets position to start of loop, then
688    //       this is correct.  Otherwise it should be removed.
689    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
690    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
691    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
692}
693
694status_t AudioTrack::setMarkerPosition(uint32_t marker)
695{
696    // The only purpose of setting marker position is to get a callback
697    if (mCbf == NULL || isOffloaded()) {
698        return INVALID_OPERATION;
699    }
700
701    AutoMutex lock(mLock);
702    mMarkerPosition = marker;
703    mMarkerReached = false;
704
705    return NO_ERROR;
706}
707
708status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
709{
710    if (isOffloaded()) {
711        return INVALID_OPERATION;
712    }
713    if (marker == NULL) {
714        return BAD_VALUE;
715    }
716
717    AutoMutex lock(mLock);
718    *marker = mMarkerPosition;
719
720    return NO_ERROR;
721}
722
723status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
724{
725    // The only purpose of setting position update period is to get a callback
726    if (mCbf == NULL || isOffloaded()) {
727        return INVALID_OPERATION;
728    }
729
730    AutoMutex lock(mLock);
731    mNewPosition = mProxy->getPosition() + updatePeriod;
732    mUpdatePeriod = updatePeriod;
733
734    return NO_ERROR;
735}
736
737status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
738{
739    if (isOffloaded()) {
740        return INVALID_OPERATION;
741    }
742    if (updatePeriod == NULL) {
743        return BAD_VALUE;
744    }
745
746    AutoMutex lock(mLock);
747    *updatePeriod = mUpdatePeriod;
748
749    return NO_ERROR;
750}
751
752status_t AudioTrack::setPosition(uint32_t position)
753{
754    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
755        return INVALID_OPERATION;
756    }
757    if (position > mFrameCount) {
758        return BAD_VALUE;
759    }
760
761    AutoMutex lock(mLock);
762    // Currently we require that the player is inactive before setting parameters such as position
763    // or loop points.  Otherwise, there could be a race condition: the application could read the
764    // current position, compute a new position or loop parameters, and then set that position or
765    // loop parameters but it would do the "wrong" thing since the position has continued to advance
766    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
767    // to specify how it wants to handle such scenarios.
768    if (mState == STATE_ACTIVE) {
769        return INVALID_OPERATION;
770    }
771    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
772    mLoopPeriod = 0;
773    // FIXME Check whether loops and setting position are incompatible in old code.
774    // If we use setLoop for both purposes we lose the capability to set the position while looping.
775    mStaticProxy->setLoop(position, mFrameCount, 0);
776
777    return NO_ERROR;
778}
779
780status_t AudioTrack::getPosition(uint32_t *position) const
781{
782    if (position == NULL) {
783        return BAD_VALUE;
784    }
785
786    AutoMutex lock(mLock);
787    if (isOffloaded_l()) {
788        uint32_t dspFrames = 0;
789
790        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
791            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
792            *position = mPausedPosition;
793            return NO_ERROR;
794        }
795
796        if (mOutput != AUDIO_IO_HANDLE_NONE) {
797            uint32_t halFrames;
798            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
799        }
800        *position = dspFrames;
801    } else {
802        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
803        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
804                mProxy->getPosition();
805    }
806    return NO_ERROR;
807}
808
809status_t AudioTrack::getBufferPosition(uint32_t *position)
810{
811    if (mSharedBuffer == 0 || mIsTimed) {
812        return INVALID_OPERATION;
813    }
814    if (position == NULL) {
815        return BAD_VALUE;
816    }
817
818    AutoMutex lock(mLock);
819    *position = mStaticProxy->getBufferPosition();
820    return NO_ERROR;
821}
822
823status_t AudioTrack::reload()
824{
825    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
826        return INVALID_OPERATION;
827    }
828
829    AutoMutex lock(mLock);
830    // See setPosition() regarding setting parameters such as loop points or position while active
831    if (mState == STATE_ACTIVE) {
832        return INVALID_OPERATION;
833    }
834    mNewPosition = mUpdatePeriod;
835    mLoopPeriod = 0;
836    // FIXME The new code cannot reload while keeping a loop specified.
837    // Need to check how the old code handled this, and whether it's a significant change.
838    mStaticProxy->setLoop(0, mFrameCount, 0);
839    return NO_ERROR;
840}
841
842audio_io_handle_t AudioTrack::getOutput() const
843{
844    AutoMutex lock(mLock);
845    return mOutput;
846}
847
848status_t AudioTrack::attachAuxEffect(int effectId)
849{
850    AutoMutex lock(mLock);
851    status_t status = mAudioTrack->attachAuxEffect(effectId);
852    if (status == NO_ERROR) {
853        mAuxEffectId = effectId;
854    }
855    return status;
856}
857
858// -------------------------------------------------------------------------
859
860// must be called with mLock held
861status_t AudioTrack::createTrack_l(size_t epoch)
862{
863    status_t status;
864    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
865    if (audioFlinger == 0) {
866        ALOGE("Could not get audioflinger");
867        return NO_INIT;
868    }
869
870    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
871            mChannelMask, mFlags, mOffloadInfo);
872    if (output == AUDIO_IO_HANDLE_NONE) {
873        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
874              "channel mask %#x, flags %#x",
875              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
876        return BAD_VALUE;
877    }
878    {
879    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
880    // we must release it ourselves if anything goes wrong.
881
882    // Not all of these values are needed under all conditions, but it is easier to get them all
883
884    uint32_t afLatency;
885    status = AudioSystem::getLatency(output, &afLatency);
886    if (status != NO_ERROR) {
887        ALOGE("getLatency(%d) failed status %d", output, status);
888        goto release;
889    }
890
891    size_t afFrameCount;
892    status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount);
893    if (status != NO_ERROR) {
894        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status);
895        goto release;
896    }
897
898    uint32_t afSampleRate;
899    status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate);
900    if (status != NO_ERROR) {
901        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status);
902        goto release;
903    }
904
905    // Client decides whether the track is TIMED (see below), but can only express a preference
906    // for FAST.  Server will perform additional tests.
907    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
908            // either of these use cases:
909            // use case 1: shared buffer
910            (mSharedBuffer != 0) ||
911            // use case 2: callback transfer mode
912            (mTransfer == TRANSFER_CALLBACK)) &&
913            // matching sample rate
914            (mSampleRate == afSampleRate))) {
915        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
916        // once denied, do not request again if IAudioTrack is re-created
917        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
918    }
919    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
920
921    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
922    //  n = 1   fast track with single buffering; nBuffering is ignored
923    //  n = 2   fast track with double buffering
924    //  n = 2   normal track, no sample rate conversion
925    //  n = 3   normal track, with sample rate conversion
926    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
927    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
928    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
929
930    mNotificationFramesAct = mNotificationFramesReq;
931
932    size_t frameCount = mReqFrameCount;
933    if (!audio_is_linear_pcm(mFormat)) {
934
935        if (mSharedBuffer != 0) {
936            // Same comment as below about ignoring frameCount parameter for set()
937            frameCount = mSharedBuffer->size();
938        } else if (frameCount == 0) {
939            frameCount = afFrameCount;
940        }
941        if (mNotificationFramesAct != frameCount) {
942            mNotificationFramesAct = frameCount;
943        }
944    } else if (mSharedBuffer != 0) {
945
946        // Ensure that buffer alignment matches channel count
947        // 8-bit data in shared memory is not currently supported by AudioFlinger
948        size_t alignment = audio_bytes_per_sample(
949                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
950        if (alignment & 1) {
951            alignment = 1;
952        }
953        if (mChannelCount > 1) {
954            // More than 2 channels does not require stronger alignment than stereo
955            alignment <<= 1;
956        }
957        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
958            ALOGE("Invalid buffer alignment: address %p, channel count %u",
959                    mSharedBuffer->pointer(), mChannelCount);
960            status = BAD_VALUE;
961            goto release;
962        }
963
964        // When initializing a shared buffer AudioTrack via constructors,
965        // there's no frameCount parameter.
966        // But when initializing a shared buffer AudioTrack via set(),
967        // there _is_ a frameCount parameter.  We silently ignore it.
968        frameCount = mSharedBuffer->size() / mFrameSizeAF;
969
970    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
971
972        // FIXME move these calculations and associated checks to server
973
974        // Ensure that buffer depth covers at least audio hardware latency
975        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
976        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
977                afFrameCount, minBufCount, afSampleRate, afLatency);
978        if (minBufCount <= nBuffering) {
979            minBufCount = nBuffering;
980        }
981
982        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
983        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
984                ", afLatency=%d",
985                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
986
987        if (frameCount == 0) {
988            frameCount = minFrameCount;
989        } else if (frameCount < minFrameCount) {
990            // not ALOGW because it happens all the time when playing key clicks over A2DP
991            ALOGV("Minimum buffer size corrected from %d to %d",
992                     frameCount, minFrameCount);
993            frameCount = minFrameCount;
994        }
995        // Make sure that application is notified with sufficient margin before underrun
996        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
997            mNotificationFramesAct = frameCount/nBuffering;
998        }
999
1000    } else {
1001        // For fast tracks, the frame count calculations and checks are done by server
1002    }
1003
1004    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1005    if (mIsTimed) {
1006        trackFlags |= IAudioFlinger::TRACK_TIMED;
1007    }
1008
1009    pid_t tid = -1;
1010    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1011        trackFlags |= IAudioFlinger::TRACK_FAST;
1012        if (mAudioTrackThread != 0) {
1013            tid = mAudioTrackThread->getTid();
1014        }
1015    }
1016
1017    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1018        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1019    }
1020
1021    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1022                                // but we will still need the original value also
1023    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1024                                                      mSampleRate,
1025                                                      // AudioFlinger only sees 16-bit PCM
1026                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1027                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1028                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1029                                                      mChannelMask,
1030                                                      &temp,
1031                                                      &trackFlags,
1032                                                      mSharedBuffer,
1033                                                      output,
1034                                                      tid,
1035                                                      &mSessionId,
1036                                                      mClientUid,
1037                                                      &status);
1038
1039    if (status != NO_ERROR) {
1040        ALOGE("AudioFlinger could not create track, status: %d", status);
1041        goto release;
1042    }
1043    ALOG_ASSERT(track != 0);
1044
1045    // AudioFlinger now owns the reference to the I/O handle,
1046    // so we are no longer responsible for releasing it.
1047
1048    sp<IMemory> iMem = track->getCblk();
1049    if (iMem == 0) {
1050        ALOGE("Could not get control block");
1051        return NO_INIT;
1052    }
1053    void *iMemPointer = iMem->pointer();
1054    if (iMemPointer == NULL) {
1055        ALOGE("Could not get control block pointer");
1056        return NO_INIT;
1057    }
1058    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1059    if (mAudioTrack != 0) {
1060        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1061        mDeathNotifier.clear();
1062    }
1063    mAudioTrack = track;
1064    mCblkMemory = iMem;
1065    IPCThreadState::self()->flushCommands();
1066
1067    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1068    mCblk = cblk;
1069    // note that temp is the (possibly revised) value of frameCount
1070    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1071        // In current design, AudioTrack client checks and ensures frame count validity before
1072        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1073        // for fast track as it uses a special method of assigning frame count.
1074        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1075    }
1076    frameCount = temp;
1077
1078    mAwaitBoost = false;
1079    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1080        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1081            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1082            mAwaitBoost = true;
1083            if (mSharedBuffer == 0) {
1084                // Theoretically double-buffering is not required for fast tracks,
1085                // due to tighter scheduling.  But in practice, to accommodate kernels with
1086                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1087                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1088                    mNotificationFramesAct = frameCount/nBuffering;
1089                }
1090            }
1091        } else {
1092            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1093            // once denied, do not request again if IAudioTrack is re-created
1094            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1095            if (mSharedBuffer == 0) {
1096                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1097                    mNotificationFramesAct = frameCount/nBuffering;
1098                }
1099            }
1100        }
1101    }
1102    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1103        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1104            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1105        } else {
1106            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1107            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1108            // FIXME This is a warning, not an error, so don't return error status
1109            //return NO_INIT;
1110        }
1111    }
1112
1113    // We retain a copy of the I/O handle, but don't own the reference
1114    mOutput = output;
1115    mRefreshRemaining = true;
1116
1117    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1118    // is the value of pointer() for the shared buffer, otherwise buffers points
1119    // immediately after the control block.  This address is for the mapping within client
1120    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1121    void* buffers;
1122    if (mSharedBuffer == 0) {
1123        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1124    } else {
1125        buffers = mSharedBuffer->pointer();
1126    }
1127
1128    mAudioTrack->attachAuxEffect(mAuxEffectId);
1129    // FIXME don't believe this lie
1130    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1131
1132    mFrameCount = frameCount;
1133    // If IAudioTrack is re-created, don't let the requested frameCount
1134    // decrease.  This can confuse clients that cache frameCount().
1135    if (frameCount > mReqFrameCount) {
1136        mReqFrameCount = frameCount;
1137    }
1138
1139    // update proxy
1140    if (mSharedBuffer == 0) {
1141        mStaticProxy.clear();
1142        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1143    } else {
1144        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1145        mProxy = mStaticProxy;
1146    }
1147    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1148    mProxy->setSendLevel(mSendLevel);
1149    mProxy->setSampleRate(mSampleRate);
1150    mProxy->setEpoch(epoch);
1151    mProxy->setMinimum(mNotificationFramesAct);
1152
1153    mDeathNotifier = new DeathNotifier(this);
1154    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1155
1156    return NO_ERROR;
1157    }
1158
1159release:
1160    AudioSystem::releaseOutput(output);
1161    if (status == NO_ERROR) {
1162        status = NO_INIT;
1163    }
1164    return status;
1165}
1166
1167status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1168{
1169    if (audioBuffer == NULL) {
1170        return BAD_VALUE;
1171    }
1172    if (mTransfer != TRANSFER_OBTAIN) {
1173        audioBuffer->frameCount = 0;
1174        audioBuffer->size = 0;
1175        audioBuffer->raw = NULL;
1176        return INVALID_OPERATION;
1177    }
1178
1179    const struct timespec *requested;
1180    struct timespec timeout;
1181    if (waitCount == -1) {
1182        requested = &ClientProxy::kForever;
1183    } else if (waitCount == 0) {
1184        requested = &ClientProxy::kNonBlocking;
1185    } else if (waitCount > 0) {
1186        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1187        timeout.tv_sec = ms / 1000;
1188        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1189        requested = &timeout;
1190    } else {
1191        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1192        requested = NULL;
1193    }
1194    return obtainBuffer(audioBuffer, requested);
1195}
1196
1197status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1198        struct timespec *elapsed, size_t *nonContig)
1199{
1200    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1201    uint32_t oldSequence = 0;
1202    uint32_t newSequence;
1203
1204    Proxy::Buffer buffer;
1205    status_t status = NO_ERROR;
1206
1207    static const int32_t kMaxTries = 5;
1208    int32_t tryCounter = kMaxTries;
1209
1210    do {
1211        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1212        // keep them from going away if another thread re-creates the track during obtainBuffer()
1213        sp<AudioTrackClientProxy> proxy;
1214        sp<IMemory> iMem;
1215
1216        {   // start of lock scope
1217            AutoMutex lock(mLock);
1218
1219            newSequence = mSequence;
1220            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1221            if (status == DEAD_OBJECT) {
1222                // re-create track, unless someone else has already done so
1223                if (newSequence == oldSequence) {
1224                    status = restoreTrack_l("obtainBuffer");
1225                    if (status != NO_ERROR) {
1226                        buffer.mFrameCount = 0;
1227                        buffer.mRaw = NULL;
1228                        buffer.mNonContig = 0;
1229                        break;
1230                    }
1231                }
1232            }
1233            oldSequence = newSequence;
1234
1235            // Keep the extra references
1236            proxy = mProxy;
1237            iMem = mCblkMemory;
1238
1239            if (mState == STATE_STOPPING) {
1240                status = -EINTR;
1241                buffer.mFrameCount = 0;
1242                buffer.mRaw = NULL;
1243                buffer.mNonContig = 0;
1244                break;
1245            }
1246
1247            // Non-blocking if track is stopped or paused
1248            if (mState != STATE_ACTIVE) {
1249                requested = &ClientProxy::kNonBlocking;
1250            }
1251
1252        }   // end of lock scope
1253
1254        buffer.mFrameCount = audioBuffer->frameCount;
1255        // FIXME starts the requested timeout and elapsed over from scratch
1256        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1257
1258    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1259
1260    audioBuffer->frameCount = buffer.mFrameCount;
1261    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1262    audioBuffer->raw = buffer.mRaw;
1263    if (nonContig != NULL) {
1264        *nonContig = buffer.mNonContig;
1265    }
1266    return status;
1267}
1268
1269void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1270{
1271    if (mTransfer == TRANSFER_SHARED) {
1272        return;
1273    }
1274
1275    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1276    if (stepCount == 0) {
1277        return;
1278    }
1279
1280    Proxy::Buffer buffer;
1281    buffer.mFrameCount = stepCount;
1282    buffer.mRaw = audioBuffer->raw;
1283
1284    AutoMutex lock(mLock);
1285    mInUnderrun = false;
1286    mProxy->releaseBuffer(&buffer);
1287
1288    // restart track if it was disabled by audioflinger due to previous underrun
1289    if (mState == STATE_ACTIVE) {
1290        audio_track_cblk_t* cblk = mCblk;
1291        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1292            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1293            // FIXME ignoring status
1294            mAudioTrack->start();
1295        }
1296    }
1297}
1298
1299// -------------------------------------------------------------------------
1300
1301ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1302{
1303    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1304        return INVALID_OPERATION;
1305    }
1306
1307    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1308        // Sanity-check: user is most-likely passing an error code, and it would
1309        // make the return value ambiguous (actualSize vs error).
1310        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1311        return BAD_VALUE;
1312    }
1313
1314    size_t written = 0;
1315    Buffer audioBuffer;
1316
1317    while (userSize >= mFrameSize) {
1318        audioBuffer.frameCount = userSize / mFrameSize;
1319
1320        status_t err = obtainBuffer(&audioBuffer,
1321                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1322        if (err < 0) {
1323            if (written > 0) {
1324                break;
1325            }
1326            return ssize_t(err);
1327        }
1328
1329        size_t toWrite;
1330        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1331            // Divide capacity by 2 to take expansion into account
1332            toWrite = audioBuffer.size >> 1;
1333            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1334        } else {
1335            toWrite = audioBuffer.size;
1336            memcpy(audioBuffer.i8, buffer, toWrite);
1337        }
1338        buffer = ((const char *) buffer) + toWrite;
1339        userSize -= toWrite;
1340        written += toWrite;
1341
1342        releaseBuffer(&audioBuffer);
1343    }
1344
1345    return written;
1346}
1347
1348// -------------------------------------------------------------------------
1349
1350TimedAudioTrack::TimedAudioTrack() {
1351    mIsTimed = true;
1352}
1353
1354status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1355{
1356    AutoMutex lock(mLock);
1357    status_t result = UNKNOWN_ERROR;
1358
1359#if 1
1360    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1361    // while we are accessing the cblk
1362    sp<IAudioTrack> audioTrack = mAudioTrack;
1363    sp<IMemory> iMem = mCblkMemory;
1364#endif
1365
1366    // If the track is not invalid already, try to allocate a buffer.  alloc
1367    // fails indicating that the server is dead, flag the track as invalid so
1368    // we can attempt to restore in just a bit.
1369    audio_track_cblk_t* cblk = mCblk;
1370    if (!(cblk->mFlags & CBLK_INVALID)) {
1371        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1372        if (result == DEAD_OBJECT) {
1373            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1374        }
1375    }
1376
1377    // If the track is invalid at this point, attempt to restore it. and try the
1378    // allocation one more time.
1379    if (cblk->mFlags & CBLK_INVALID) {
1380        result = restoreTrack_l("allocateTimedBuffer");
1381
1382        if (result == NO_ERROR) {
1383            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1384        }
1385    }
1386
1387    return result;
1388}
1389
1390status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1391                                           int64_t pts)
1392{
1393    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1394    {
1395        AutoMutex lock(mLock);
1396        audio_track_cblk_t* cblk = mCblk;
1397        // restart track if it was disabled by audioflinger due to previous underrun
1398        if (buffer->size() != 0 && status == NO_ERROR &&
1399                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1400            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1401            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1402            // FIXME ignoring status
1403            mAudioTrack->start();
1404        }
1405    }
1406    return status;
1407}
1408
1409status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1410                                                TargetTimeline target)
1411{
1412    return mAudioTrack->setMediaTimeTransform(xform, target);
1413}
1414
1415// -------------------------------------------------------------------------
1416
1417nsecs_t AudioTrack::processAudioBuffer()
1418{
1419    // Currently the AudioTrack thread is not created if there are no callbacks.
1420    // Would it ever make sense to run the thread, even without callbacks?
1421    // If so, then replace this by checks at each use for mCbf != NULL.
1422    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1423
1424    mLock.lock();
1425    if (mAwaitBoost) {
1426        mAwaitBoost = false;
1427        mLock.unlock();
1428        static const int32_t kMaxTries = 5;
1429        int32_t tryCounter = kMaxTries;
1430        uint32_t pollUs = 10000;
1431        do {
1432            int policy = sched_getscheduler(0);
1433            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1434                break;
1435            }
1436            usleep(pollUs);
1437            pollUs <<= 1;
1438        } while (tryCounter-- > 0);
1439        if (tryCounter < 0) {
1440            ALOGE("did not receive expected priority boost on time");
1441        }
1442        // Run again immediately
1443        return 0;
1444    }
1445
1446    // Can only reference mCblk while locked
1447    int32_t flags = android_atomic_and(
1448        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1449
1450    // Check for track invalidation
1451    if (flags & CBLK_INVALID) {
1452        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1453        // AudioSystem cache. We should not exit here but after calling the callback so
1454        // that the upper layers can recreate the track
1455        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1456            status_t status = restoreTrack_l("processAudioBuffer");
1457            mLock.unlock();
1458            // Run again immediately, but with a new IAudioTrack
1459            return 0;
1460        }
1461    }
1462
1463    bool waitStreamEnd = mState == STATE_STOPPING;
1464    bool active = mState == STATE_ACTIVE;
1465
1466    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1467    bool newUnderrun = false;
1468    if (flags & CBLK_UNDERRUN) {
1469#if 0
1470        // Currently in shared buffer mode, when the server reaches the end of buffer,
1471        // the track stays active in continuous underrun state.  It's up to the application
1472        // to pause or stop the track, or set the position to a new offset within buffer.
1473        // This was some experimental code to auto-pause on underrun.   Keeping it here
1474        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1475        if (mTransfer == TRANSFER_SHARED) {
1476            mState = STATE_PAUSED;
1477            active = false;
1478        }
1479#endif
1480        if (!mInUnderrun) {
1481            mInUnderrun = true;
1482            newUnderrun = true;
1483        }
1484    }
1485
1486    // Get current position of server
1487    size_t position = mProxy->getPosition();
1488
1489    // Manage marker callback
1490    bool markerReached = false;
1491    size_t markerPosition = mMarkerPosition;
1492    // FIXME fails for wraparound, need 64 bits
1493    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1494        mMarkerReached = markerReached = true;
1495    }
1496
1497    // Determine number of new position callback(s) that will be needed, while locked
1498    size_t newPosCount = 0;
1499    size_t newPosition = mNewPosition;
1500    size_t updatePeriod = mUpdatePeriod;
1501    // FIXME fails for wraparound, need 64 bits
1502    if (updatePeriod > 0 && position >= newPosition) {
1503        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1504        mNewPosition += updatePeriod * newPosCount;
1505    }
1506
1507    // Cache other fields that will be needed soon
1508    uint32_t loopPeriod = mLoopPeriod;
1509    uint32_t sampleRate = mSampleRate;
1510    uint32_t notificationFrames = mNotificationFramesAct;
1511    if (mRefreshRemaining) {
1512        mRefreshRemaining = false;
1513        mRemainingFrames = notificationFrames;
1514        mRetryOnPartialBuffer = false;
1515    }
1516    size_t misalignment = mProxy->getMisalignment();
1517    uint32_t sequence = mSequence;
1518    sp<AudioTrackClientProxy> proxy = mProxy;
1519
1520    // These fields don't need to be cached, because they are assigned only by set():
1521    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1522    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1523
1524    mLock.unlock();
1525
1526    if (waitStreamEnd) {
1527        struct timespec timeout;
1528        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1529        timeout.tv_nsec = 0;
1530
1531        status_t status = proxy->waitStreamEndDone(&timeout);
1532        switch (status) {
1533        case NO_ERROR:
1534        case DEAD_OBJECT:
1535        case TIMED_OUT:
1536            mCbf(EVENT_STREAM_END, mUserData, NULL);
1537            {
1538                AutoMutex lock(mLock);
1539                // The previously assigned value of waitStreamEnd is no longer valid,
1540                // since the mutex has been unlocked and either the callback handler
1541                // or another thread could have re-started the AudioTrack during that time.
1542                waitStreamEnd = mState == STATE_STOPPING;
1543                if (waitStreamEnd) {
1544                    mState = STATE_STOPPED;
1545                }
1546            }
1547            if (waitStreamEnd && status != DEAD_OBJECT) {
1548               return NS_INACTIVE;
1549            }
1550            break;
1551        }
1552        return 0;
1553    }
1554
1555    // perform callbacks while unlocked
1556    if (newUnderrun) {
1557        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1558    }
1559    // FIXME we will miss loops if loop cycle was signaled several times since last call
1560    //       to processAudioBuffer()
1561    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1562        mCbf(EVENT_LOOP_END, mUserData, NULL);
1563    }
1564    if (flags & CBLK_BUFFER_END) {
1565        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1566    }
1567    if (markerReached) {
1568        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1569    }
1570    while (newPosCount > 0) {
1571        size_t temp = newPosition;
1572        mCbf(EVENT_NEW_POS, mUserData, &temp);
1573        newPosition += updatePeriod;
1574        newPosCount--;
1575    }
1576
1577    if (mObservedSequence != sequence) {
1578        mObservedSequence = sequence;
1579        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1580        // for offloaded tracks, just wait for the upper layers to recreate the track
1581        if (isOffloaded()) {
1582            return NS_INACTIVE;
1583        }
1584    }
1585
1586    // if inactive, then don't run me again until re-started
1587    if (!active) {
1588        return NS_INACTIVE;
1589    }
1590
1591    // Compute the estimated time until the next timed event (position, markers, loops)
1592    // FIXME only for non-compressed audio
1593    uint32_t minFrames = ~0;
1594    if (!markerReached && position < markerPosition) {
1595        minFrames = markerPosition - position;
1596    }
1597    if (loopPeriod > 0 && loopPeriod < minFrames) {
1598        minFrames = loopPeriod;
1599    }
1600    if (updatePeriod > 0 && updatePeriod < minFrames) {
1601        minFrames = updatePeriod;
1602    }
1603
1604    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1605    static const uint32_t kPoll = 0;
1606    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1607        minFrames = kPoll * notificationFrames;
1608    }
1609
1610    // Convert frame units to time units
1611    nsecs_t ns = NS_WHENEVER;
1612    if (minFrames != (uint32_t) ~0) {
1613        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1614        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1615        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1616    }
1617
1618    // If not supplying data by EVENT_MORE_DATA, then we're done
1619    if (mTransfer != TRANSFER_CALLBACK) {
1620        return ns;
1621    }
1622
1623    struct timespec timeout;
1624    const struct timespec *requested = &ClientProxy::kForever;
1625    if (ns != NS_WHENEVER) {
1626        timeout.tv_sec = ns / 1000000000LL;
1627        timeout.tv_nsec = ns % 1000000000LL;
1628        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1629        requested = &timeout;
1630    }
1631
1632    while (mRemainingFrames > 0) {
1633
1634        Buffer audioBuffer;
1635        audioBuffer.frameCount = mRemainingFrames;
1636        size_t nonContig;
1637        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1638        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1639                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1640        requested = &ClientProxy::kNonBlocking;
1641        size_t avail = audioBuffer.frameCount + nonContig;
1642        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1643                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1644        if (err != NO_ERROR) {
1645            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1646                    (isOffloaded() && (err == DEAD_OBJECT))) {
1647                return 0;
1648            }
1649            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1650            return NS_NEVER;
1651        }
1652
1653        if (mRetryOnPartialBuffer && !isOffloaded()) {
1654            mRetryOnPartialBuffer = false;
1655            if (avail < mRemainingFrames) {
1656                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1657                if (ns < 0 || myns < ns) {
1658                    ns = myns;
1659                }
1660                return ns;
1661            }
1662        }
1663
1664        // Divide buffer size by 2 to take into account the expansion
1665        // due to 8 to 16 bit conversion: the callback must fill only half
1666        // of the destination buffer
1667        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1668            audioBuffer.size >>= 1;
1669        }
1670
1671        size_t reqSize = audioBuffer.size;
1672        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1673        size_t writtenSize = audioBuffer.size;
1674
1675        // Sanity check on returned size
1676        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1677            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1678                    reqSize, (int) writtenSize);
1679            return NS_NEVER;
1680        }
1681
1682        if (writtenSize == 0) {
1683            // The callback is done filling buffers
1684            // Keep this thread going to handle timed events and
1685            // still try to get more data in intervals of WAIT_PERIOD_MS
1686            // but don't just loop and block the CPU, so wait
1687            return WAIT_PERIOD_MS * 1000000LL;
1688        }
1689
1690        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1691            // 8 to 16 bit conversion, note that source and destination are the same address
1692            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1693            audioBuffer.size <<= 1;
1694        }
1695
1696        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1697        audioBuffer.frameCount = releasedFrames;
1698        mRemainingFrames -= releasedFrames;
1699        if (misalignment >= releasedFrames) {
1700            misalignment -= releasedFrames;
1701        } else {
1702            misalignment = 0;
1703        }
1704
1705        releaseBuffer(&audioBuffer);
1706
1707        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1708        // if callback doesn't like to accept the full chunk
1709        if (writtenSize < reqSize) {
1710            continue;
1711        }
1712
1713        // There could be enough non-contiguous frames available to satisfy the remaining request
1714        if (mRemainingFrames <= nonContig) {
1715            continue;
1716        }
1717
1718#if 0
1719        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1720        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1721        // that total to a sum == notificationFrames.
1722        if (0 < misalignment && misalignment <= mRemainingFrames) {
1723            mRemainingFrames = misalignment;
1724            return (mRemainingFrames * 1100000000LL) / sampleRate;
1725        }
1726#endif
1727
1728    }
1729    mRemainingFrames = notificationFrames;
1730    mRetryOnPartialBuffer = true;
1731
1732    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1733    return 0;
1734}
1735
1736status_t AudioTrack::restoreTrack_l(const char *from)
1737{
1738    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1739          isOffloaded_l() ? "Offloaded" : "PCM", from);
1740    ++mSequence;
1741    status_t result;
1742
1743    // refresh the audio configuration cache in this process to make sure we get new
1744    // output parameters in createTrack_l()
1745    AudioSystem::clearAudioConfigCache();
1746
1747    if (isOffloaded_l()) {
1748        // FIXME re-creation of offloaded tracks is not yet implemented
1749        return DEAD_OBJECT;
1750    }
1751
1752    // if the new IAudioTrack is created, createTrack_l() will modify the
1753    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1754    // It will also delete the strong references on previous IAudioTrack and IMemory
1755
1756    // take the frames that will be lost by track recreation into account in saved position
1757    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1758    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1759    result = createTrack_l(position /*epoch*/);
1760
1761    if (result == NO_ERROR) {
1762        // continue playback from last known position, but
1763        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1764        if (mStaticProxy != NULL) {
1765            mLoopPeriod = 0;
1766            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1767        }
1768        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1769        //       track destruction have been played? This is critical for SoundPool implementation
1770        //       This must be broken, and needs to be tested/debugged.
1771#if 0
1772        // restore write index and set other indexes to reflect empty buffer status
1773        if (!strcmp(from, "start")) {
1774            // Make sure that a client relying on callback events indicating underrun or
1775            // the actual amount of audio frames played (e.g SoundPool) receives them.
1776            if (mSharedBuffer == 0) {
1777                // restart playback even if buffer is not completely filled.
1778                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1779            }
1780        }
1781#endif
1782        if (mState == STATE_ACTIVE) {
1783            result = mAudioTrack->start();
1784        }
1785    }
1786    if (result != NO_ERROR) {
1787        ALOGW("restoreTrack_l() failed status %d", result);
1788        mState = STATE_STOPPED;
1789    }
1790
1791    return result;
1792}
1793
1794status_t AudioTrack::setParameters(const String8& keyValuePairs)
1795{
1796    AutoMutex lock(mLock);
1797    return mAudioTrack->setParameters(keyValuePairs);
1798}
1799
1800status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1801{
1802    AutoMutex lock(mLock);
1803    // FIXME not implemented for fast tracks; should use proxy and SSQ
1804    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1805        return INVALID_OPERATION;
1806    }
1807    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1808        return INVALID_OPERATION;
1809    }
1810    status_t status = mAudioTrack->getTimestamp(timestamp);
1811    if (status == NO_ERROR) {
1812        timestamp.mPosition += mProxy->getEpoch();
1813    }
1814    return status;
1815}
1816
1817String8 AudioTrack::getParameters(const String8& keys)
1818{
1819    audio_io_handle_t output = getOutput();
1820    if (output != AUDIO_IO_HANDLE_NONE) {
1821        return AudioSystem::getParameters(output, keys);
1822    } else {
1823        return String8::empty();
1824    }
1825}
1826
1827bool AudioTrack::isOffloaded() const
1828{
1829    AutoMutex lock(mLock);
1830    return isOffloaded_l();
1831}
1832
1833status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1834{
1835
1836    const size_t SIZE = 256;
1837    char buffer[SIZE];
1838    String8 result;
1839
1840    result.append(" AudioTrack::dump\n");
1841    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1842            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1843    result.append(buffer);
1844    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1845            mChannelCount, mFrameCount);
1846    result.append(buffer);
1847    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1848    result.append(buffer);
1849    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1850    result.append(buffer);
1851    ::write(fd, result.string(), result.size());
1852    return NO_ERROR;
1853}
1854
1855uint32_t AudioTrack::getUnderrunFrames() const
1856{
1857    AutoMutex lock(mLock);
1858    return mProxy->getUnderrunFrames();
1859}
1860
1861// =========================================================================
1862
1863void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1864{
1865    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1866    if (audioTrack != 0) {
1867        AutoMutex lock(audioTrack->mLock);
1868        audioTrack->mProxy->binderDied();
1869    }
1870}
1871
1872// =========================================================================
1873
1874AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1875    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1876      mIgnoreNextPausedInt(false)
1877{
1878}
1879
1880AudioTrack::AudioTrackThread::~AudioTrackThread()
1881{
1882}
1883
1884bool AudioTrack::AudioTrackThread::threadLoop()
1885{
1886    {
1887        AutoMutex _l(mMyLock);
1888        if (mPaused) {
1889            mMyCond.wait(mMyLock);
1890            // caller will check for exitPending()
1891            return true;
1892        }
1893        if (mIgnoreNextPausedInt) {
1894            mIgnoreNextPausedInt = false;
1895            mPausedInt = false;
1896        }
1897        if (mPausedInt) {
1898            if (mPausedNs > 0) {
1899                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1900            } else {
1901                mMyCond.wait(mMyLock);
1902            }
1903            mPausedInt = false;
1904            return true;
1905        }
1906    }
1907    nsecs_t ns = mReceiver.processAudioBuffer();
1908    switch (ns) {
1909    case 0:
1910        return true;
1911    case NS_INACTIVE:
1912        pauseInternal();
1913        return true;
1914    case NS_NEVER:
1915        return false;
1916    case NS_WHENEVER:
1917        // FIXME increase poll interval, or make event-driven
1918        ns = 1000000000LL;
1919        // fall through
1920    default:
1921        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1922        pauseInternal(ns);
1923        return true;
1924    }
1925}
1926
1927void AudioTrack::AudioTrackThread::requestExit()
1928{
1929    // must be in this order to avoid a race condition
1930    Thread::requestExit();
1931    resume();
1932}
1933
1934void AudioTrack::AudioTrackThread::pause()
1935{
1936    AutoMutex _l(mMyLock);
1937    mPaused = true;
1938}
1939
1940void AudioTrack::AudioTrackThread::resume()
1941{
1942    AutoMutex _l(mMyLock);
1943    mIgnoreNextPausedInt = true;
1944    if (mPaused || mPausedInt) {
1945        mPaused = false;
1946        mPausedInt = false;
1947        mMyCond.signal();
1948    }
1949}
1950
1951void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1952{
1953    AutoMutex _l(mMyLock);
1954    mPausedInt = true;
1955    mPausedNs = ns;
1956}
1957
1958}; // namespace android
1959