AudioTrack.cpp revision b7f24b101d43139b4c747129bfbc4ecf5c468b86
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <math.h>
23#include <sys/resource.h>
24#include <audio_utils/primitives.h>
25#include <binder/IPCThreadState.h>
26#include <media/AudioTrack.h>
27#include <utils/Log.h>
28#include <private/media/AudioTrackShared.h>
29#include <media/IAudioFlinger.h>
30
31#define WAIT_PERIOD_MS                  10
32#define WAIT_STREAM_END_TIMEOUT_SEC     120
33
34
35namespace android {
36// ---------------------------------------------------------------------------
37
38// static
39status_t AudioTrack::getMinFrameCount(
40        size_t* frameCount,
41        audio_stream_type_t streamType,
42        uint32_t sampleRate)
43{
44    if (frameCount == NULL) {
45        return BAD_VALUE;
46    }
47
48    // FIXME merge with similar code in createTrack_l(), except we're missing
49    //       some information here that is available in createTrack_l():
50    //          audio_io_handle_t output
51    //          audio_format_t format
52    //          audio_channel_mask_t channelMask
53    //          audio_output_flags_t flags
54    uint32_t afSampleRate;
55    status_t status;
56    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
57    if (status != NO_ERROR) {
58        ALOGE("Unable to query output sample rate for stream type %d; status %d",
59                streamType, status);
60        return status;
61    }
62    size_t afFrameCount;
63    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
64    if (status != NO_ERROR) {
65        ALOGE("Unable to query output frame count for stream type %d; status %d",
66                streamType, status);
67        return status;
68    }
69    uint32_t afLatency;
70    status = AudioSystem::getOutputLatency(&afLatency, streamType);
71    if (status != NO_ERROR) {
72        ALOGE("Unable to query output latency for stream type %d; status %d",
73                streamType, status);
74        return status;
75    }
76
77    // Ensure that buffer depth covers at least audio hardware latency
78    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
79    if (minBufCount < 2) {
80        minBufCount = 2;
81    }
82
83    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
84            afFrameCount * minBufCount * sampleRate / afSampleRate;
85    // The formula above should always produce a non-zero value, but return an error
86    // in the unlikely event that it does not, as that's part of the API contract.
87    if (*frameCount == 0) {
88        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
89                streamType, sampleRate);
90        return BAD_VALUE;
91    }
92    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
93            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
94    return NO_ERROR;
95}
96
97// ---------------------------------------------------------------------------
98
99AudioTrack::AudioTrack()
100    : mStatus(NO_INIT),
101      mIsTimed(false),
102      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
103      mPreviousSchedulingGroup(SP_DEFAULT),
104      mPausedPosition(0)
105{
106}
107
108AudioTrack::AudioTrack(
109        audio_stream_type_t streamType,
110        uint32_t sampleRate,
111        audio_format_t format,
112        audio_channel_mask_t channelMask,
113        size_t frameCount,
114        audio_output_flags_t flags,
115        callback_t cbf,
116        void* user,
117        uint32_t notificationFrames,
118        int sessionId,
119        transfer_type transferType,
120        const audio_offload_info_t *offloadInfo,
121        int uid,
122        pid_t pid)
123    : mStatus(NO_INIT),
124      mIsTimed(false),
125      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
126      mPreviousSchedulingGroup(SP_DEFAULT),
127      mPausedPosition(0)
128{
129    mStatus = set(streamType, sampleRate, format, channelMask,
130            frameCount, flags, cbf, user, notificationFrames,
131            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
132            offloadInfo, uid, pid);
133}
134
135AudioTrack::AudioTrack(
136        audio_stream_type_t streamType,
137        uint32_t sampleRate,
138        audio_format_t format,
139        audio_channel_mask_t channelMask,
140        const sp<IMemory>& sharedBuffer,
141        audio_output_flags_t flags,
142        callback_t cbf,
143        void* user,
144        uint32_t notificationFrames,
145        int sessionId,
146        transfer_type transferType,
147        const audio_offload_info_t *offloadInfo,
148        int uid,
149        pid_t pid)
150    : mStatus(NO_INIT),
151      mIsTimed(false),
152      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
153      mPreviousSchedulingGroup(SP_DEFAULT),
154      mPausedPosition(0)
155{
156    mStatus = set(streamType, sampleRate, format, channelMask,
157            0 /*frameCount*/, flags, cbf, user, notificationFrames,
158            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
159            uid, pid);
160}
161
162AudioTrack::~AudioTrack()
163{
164    if (mStatus == NO_ERROR) {
165        // Make sure that callback function exits in the case where
166        // it is looping on buffer full condition in obtainBuffer().
167        // Otherwise the callback thread will never exit.
168        stop();
169        if (mAudioTrackThread != 0) {
170            mProxy->interrupt();
171            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
172            mAudioTrackThread->requestExitAndWait();
173            mAudioTrackThread.clear();
174        }
175        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
176        mAudioTrack.clear();
177        IPCThreadState::self()->flushCommands();
178        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
179                IPCThreadState::self()->getCallingPid(), mClientPid);
180        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
181    }
182}
183
184status_t AudioTrack::set(
185        audio_stream_type_t streamType,
186        uint32_t sampleRate,
187        audio_format_t format,
188        audio_channel_mask_t channelMask,
189        size_t frameCount,
190        audio_output_flags_t flags,
191        callback_t cbf,
192        void* user,
193        uint32_t notificationFrames,
194        const sp<IMemory>& sharedBuffer,
195        bool threadCanCallJava,
196        int sessionId,
197        transfer_type transferType,
198        const audio_offload_info_t *offloadInfo,
199        int uid,
200        pid_t pid)
201{
202    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
203          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
204          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
205          sessionId, transferType);
206
207    switch (transferType) {
208    case TRANSFER_DEFAULT:
209        if (sharedBuffer != 0) {
210            transferType = TRANSFER_SHARED;
211        } else if (cbf == NULL || threadCanCallJava) {
212            transferType = TRANSFER_SYNC;
213        } else {
214            transferType = TRANSFER_CALLBACK;
215        }
216        break;
217    case TRANSFER_CALLBACK:
218        if (cbf == NULL || sharedBuffer != 0) {
219            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
220            return BAD_VALUE;
221        }
222        break;
223    case TRANSFER_OBTAIN:
224    case TRANSFER_SYNC:
225        if (sharedBuffer != 0) {
226            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
227            return BAD_VALUE;
228        }
229        break;
230    case TRANSFER_SHARED:
231        if (sharedBuffer == 0) {
232            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
233            return BAD_VALUE;
234        }
235        break;
236    default:
237        ALOGE("Invalid transfer type %d", transferType);
238        return BAD_VALUE;
239    }
240    mSharedBuffer = sharedBuffer;
241    mTransfer = transferType;
242
243    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
244            sharedBuffer->size());
245
246    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
247
248    AutoMutex lock(mLock);
249
250    // invariant that mAudioTrack != 0 is true only after set() returns successfully
251    if (mAudioTrack != 0) {
252        ALOGE("Track already in use");
253        return INVALID_OPERATION;
254    }
255
256    // handle default values first.
257    if (streamType == AUDIO_STREAM_DEFAULT) {
258        streamType = AUDIO_STREAM_MUSIC;
259    }
260    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
261        ALOGE("Invalid stream type %d", streamType);
262        return BAD_VALUE;
263    }
264    mStreamType = streamType;
265
266    status_t status;
267    if (sampleRate == 0) {
268        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
269        if (status != NO_ERROR) {
270            ALOGE("Could not get output sample rate for stream type %d; status %d",
271                    streamType, status);
272            return status;
273        }
274    }
275    mSampleRate = sampleRate;
276
277    // these below should probably come from the audioFlinger too...
278    if (format == AUDIO_FORMAT_DEFAULT) {
279        format = AUDIO_FORMAT_PCM_16_BIT;
280    }
281
282    // validate parameters
283    if (!audio_is_valid_format(format)) {
284        ALOGE("Invalid format %#x", format);
285        return BAD_VALUE;
286    }
287    mFormat = format;
288
289    if (!audio_is_output_channel(channelMask)) {
290        ALOGE("Invalid channel mask %#x", channelMask);
291        return BAD_VALUE;
292    }
293    mChannelMask = channelMask;
294    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
295    mChannelCount = channelCount;
296
297    // AudioFlinger does not currently support 8-bit data in shared memory
298    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
299        ALOGE("8-bit data in shared memory is not supported");
300        return BAD_VALUE;
301    }
302
303    // force direct flag if format is not linear PCM
304    // or offload was requested
305    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
306            || !audio_is_linear_pcm(format)) {
307        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
308                    ? "Offload request, forcing to Direct Output"
309                    : "Not linear PCM, forcing to Direct Output");
310        flags = (audio_output_flags_t)
311                // FIXME why can't we allow direct AND fast?
312                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
313    }
314    // only allow deep buffering for music stream type
315    if (streamType != AUDIO_STREAM_MUSIC) {
316        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
317    }
318
319    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
320        if (audio_is_linear_pcm(format)) {
321            mFrameSize = channelCount * audio_bytes_per_sample(format);
322        } else {
323            mFrameSize = sizeof(uint8_t);
324        }
325        mFrameSizeAF = mFrameSize;
326    } else {
327        ALOG_ASSERT(audio_is_linear_pcm(format));
328        mFrameSize = channelCount * audio_bytes_per_sample(format);
329        mFrameSizeAF = channelCount * audio_bytes_per_sample(
330                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
331        // createTrack will return an error if PCM format is not supported by server,
332        // so no need to check for specific PCM formats here
333    }
334
335    // Make copy of input parameter offloadInfo so that in the future:
336    //  (a) createTrack_l doesn't need it as an input parameter
337    //  (b) we can support re-creation of offloaded tracks
338    if (offloadInfo != NULL) {
339        mOffloadInfoCopy = *offloadInfo;
340        mOffloadInfo = &mOffloadInfoCopy;
341    } else {
342        mOffloadInfo = NULL;
343    }
344
345    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
346    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
347    mSendLevel = 0.0f;
348    // mFrameCount is initialized in createTrack_l
349    mReqFrameCount = frameCount;
350    mNotificationFramesReq = notificationFrames;
351    mNotificationFramesAct = 0;
352    mSessionId = sessionId;
353    int callingpid = IPCThreadState::self()->getCallingPid();
354    int mypid = getpid();
355    if (uid == -1 || (callingpid != mypid)) {
356        mClientUid = IPCThreadState::self()->getCallingUid();
357    } else {
358        mClientUid = uid;
359    }
360    if (pid == -1 || (callingpid != mypid)) {
361        mClientPid = callingpid;
362    } else {
363        mClientPid = pid;
364    }
365    mAuxEffectId = 0;
366    mFlags = flags;
367    mCbf = cbf;
368
369    if (cbf != NULL) {
370        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
371        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
372    }
373
374    // create the IAudioTrack
375    status = createTrack_l(0 /*epoch*/);
376
377    if (status != NO_ERROR) {
378        if (mAudioTrackThread != 0) {
379            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
380            mAudioTrackThread->requestExitAndWait();
381            mAudioTrackThread.clear();
382        }
383        return status;
384    }
385
386    mStatus = NO_ERROR;
387    mState = STATE_STOPPED;
388    mUserData = user;
389    mLoopPeriod = 0;
390    mMarkerPosition = 0;
391    mMarkerReached = false;
392    mNewPosition = 0;
393    mUpdatePeriod = 0;
394    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
395    mSequence = 1;
396    mObservedSequence = mSequence;
397    mInUnderrun = false;
398
399    return NO_ERROR;
400}
401
402// -------------------------------------------------------------------------
403
404status_t AudioTrack::start()
405{
406    AutoMutex lock(mLock);
407
408    if (mState == STATE_ACTIVE) {
409        return INVALID_OPERATION;
410    }
411
412    mInUnderrun = true;
413
414    State previousState = mState;
415    if (previousState == STATE_PAUSED_STOPPING) {
416        mState = STATE_STOPPING;
417    } else {
418        mState = STATE_ACTIVE;
419    }
420    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
421        // reset current position as seen by client to 0
422        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
423        // force refresh of remaining frames by processAudioBuffer() as last
424        // write before stop could be partial.
425        mRefreshRemaining = true;
426    }
427    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
428    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
429
430    sp<AudioTrackThread> t = mAudioTrackThread;
431    if (t != 0) {
432        if (previousState == STATE_STOPPING) {
433            mProxy->interrupt();
434        } else {
435            t->resume();
436        }
437    } else {
438        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
439        get_sched_policy(0, &mPreviousSchedulingGroup);
440        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
441    }
442
443    status_t status = NO_ERROR;
444    if (!(flags & CBLK_INVALID)) {
445        status = mAudioTrack->start();
446        if (status == DEAD_OBJECT) {
447            flags |= CBLK_INVALID;
448        }
449    }
450    if (flags & CBLK_INVALID) {
451        status = restoreTrack_l("start");
452    }
453
454    if (status != NO_ERROR) {
455        ALOGE("start() status %d", status);
456        mState = previousState;
457        if (t != 0) {
458            if (previousState != STATE_STOPPING) {
459                t->pause();
460            }
461        } else {
462            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
463            set_sched_policy(0, mPreviousSchedulingGroup);
464        }
465    }
466
467    return status;
468}
469
470void AudioTrack::stop()
471{
472    AutoMutex lock(mLock);
473    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
474        return;
475    }
476
477    if (isOffloaded_l()) {
478        mState = STATE_STOPPING;
479    } else {
480        mState = STATE_STOPPED;
481    }
482
483    mProxy->interrupt();
484    mAudioTrack->stop();
485    // the playback head position will reset to 0, so if a marker is set, we need
486    // to activate it again
487    mMarkerReached = false;
488#if 0
489    // Force flush if a shared buffer is used otherwise audioflinger
490    // will not stop before end of buffer is reached.
491    // It may be needed to make sure that we stop playback, likely in case looping is on.
492    if (mSharedBuffer != 0) {
493        flush_l();
494    }
495#endif
496
497    sp<AudioTrackThread> t = mAudioTrackThread;
498    if (t != 0) {
499        if (!isOffloaded_l()) {
500            t->pause();
501        }
502    } else {
503        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
504        set_sched_policy(0, mPreviousSchedulingGroup);
505    }
506}
507
508bool AudioTrack::stopped() const
509{
510    AutoMutex lock(mLock);
511    return mState != STATE_ACTIVE;
512}
513
514void AudioTrack::flush()
515{
516    if (mSharedBuffer != 0) {
517        return;
518    }
519    AutoMutex lock(mLock);
520    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
521        return;
522    }
523    flush_l();
524}
525
526void AudioTrack::flush_l()
527{
528    ALOG_ASSERT(mState != STATE_ACTIVE);
529
530    // clear playback marker and periodic update counter
531    mMarkerPosition = 0;
532    mMarkerReached = false;
533    mUpdatePeriod = 0;
534    mRefreshRemaining = true;
535
536    mState = STATE_FLUSHED;
537    if (isOffloaded_l()) {
538        mProxy->interrupt();
539    }
540    mProxy->flush();
541    mAudioTrack->flush();
542}
543
544void AudioTrack::pause()
545{
546    AutoMutex lock(mLock);
547    if (mState == STATE_ACTIVE) {
548        mState = STATE_PAUSED;
549    } else if (mState == STATE_STOPPING) {
550        mState = STATE_PAUSED_STOPPING;
551    } else {
552        return;
553    }
554    mProxy->interrupt();
555    mAudioTrack->pause();
556
557    if (isOffloaded_l()) {
558        if (mOutput != AUDIO_IO_HANDLE_NONE) {
559            uint32_t halFrames;
560            // OffloadThread sends HAL pause in its threadLoop.. time saved
561            // here can be slightly off
562            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
563            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
564        }
565    }
566}
567
568status_t AudioTrack::setVolume(float left, float right)
569{
570    // This duplicates a test by AudioTrack JNI, but that is not the only caller
571    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
572            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
573        return BAD_VALUE;
574    }
575
576    AutoMutex lock(mLock);
577    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
578    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
579
580    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
581
582    if (isOffloaded_l()) {
583        mAudioTrack->signal();
584    }
585    return NO_ERROR;
586}
587
588status_t AudioTrack::setVolume(float volume)
589{
590    return setVolume(volume, volume);
591}
592
593status_t AudioTrack::setAuxEffectSendLevel(float level)
594{
595    // This duplicates a test by AudioTrack JNI, but that is not the only caller
596    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
597        return BAD_VALUE;
598    }
599
600    AutoMutex lock(mLock);
601    mSendLevel = level;
602    mProxy->setSendLevel(level);
603
604    return NO_ERROR;
605}
606
607void AudioTrack::getAuxEffectSendLevel(float* level) const
608{
609    if (level != NULL) {
610        *level = mSendLevel;
611    }
612}
613
614status_t AudioTrack::setSampleRate(uint32_t rate)
615{
616    if (mIsTimed || isOffloaded()) {
617        return INVALID_OPERATION;
618    }
619
620    uint32_t afSamplingRate;
621    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
622        return NO_INIT;
623    }
624    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
625    if (rate == 0 || rate > afSamplingRate*2 ) {
626        return BAD_VALUE;
627    }
628
629    AutoMutex lock(mLock);
630    mSampleRate = rate;
631    mProxy->setSampleRate(rate);
632
633    return NO_ERROR;
634}
635
636uint32_t AudioTrack::getSampleRate() const
637{
638    if (mIsTimed) {
639        return 0;
640    }
641
642    AutoMutex lock(mLock);
643
644    // sample rate can be updated during playback by the offloaded decoder so we need to
645    // query the HAL and update if needed.
646// FIXME use Proxy return channel to update the rate from server and avoid polling here
647    if (isOffloaded_l()) {
648        if (mOutput != AUDIO_IO_HANDLE_NONE) {
649            uint32_t sampleRate = 0;
650            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
651            if (status == NO_ERROR) {
652                mSampleRate = sampleRate;
653            }
654        }
655    }
656    return mSampleRate;
657}
658
659status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
660{
661    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
662        return INVALID_OPERATION;
663    }
664
665    if (loopCount == 0) {
666        ;
667    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
668            loopEnd - loopStart >= MIN_LOOP) {
669        ;
670    } else {
671        return BAD_VALUE;
672    }
673
674    AutoMutex lock(mLock);
675    // See setPosition() regarding setting parameters such as loop points or position while active
676    if (mState == STATE_ACTIVE) {
677        return INVALID_OPERATION;
678    }
679    setLoop_l(loopStart, loopEnd, loopCount);
680    return NO_ERROR;
681}
682
683void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
684{
685    // FIXME If setting a loop also sets position to start of loop, then
686    //       this is correct.  Otherwise it should be removed.
687    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
688    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
689    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
690}
691
692status_t AudioTrack::setMarkerPosition(uint32_t marker)
693{
694    // The only purpose of setting marker position is to get a callback
695    if (mCbf == NULL || isOffloaded()) {
696        return INVALID_OPERATION;
697    }
698
699    AutoMutex lock(mLock);
700    mMarkerPosition = marker;
701    mMarkerReached = false;
702
703    return NO_ERROR;
704}
705
706status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
707{
708    if (isOffloaded()) {
709        return INVALID_OPERATION;
710    }
711    if (marker == NULL) {
712        return BAD_VALUE;
713    }
714
715    AutoMutex lock(mLock);
716    *marker = mMarkerPosition;
717
718    return NO_ERROR;
719}
720
721status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
722{
723    // The only purpose of setting position update period is to get a callback
724    if (mCbf == NULL || isOffloaded()) {
725        return INVALID_OPERATION;
726    }
727
728    AutoMutex lock(mLock);
729    mNewPosition = mProxy->getPosition() + updatePeriod;
730    mUpdatePeriod = updatePeriod;
731
732    return NO_ERROR;
733}
734
735status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
736{
737    if (isOffloaded()) {
738        return INVALID_OPERATION;
739    }
740    if (updatePeriod == NULL) {
741        return BAD_VALUE;
742    }
743
744    AutoMutex lock(mLock);
745    *updatePeriod = mUpdatePeriod;
746
747    return NO_ERROR;
748}
749
750status_t AudioTrack::setPosition(uint32_t position)
751{
752    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
753        return INVALID_OPERATION;
754    }
755    if (position > mFrameCount) {
756        return BAD_VALUE;
757    }
758
759    AutoMutex lock(mLock);
760    // Currently we require that the player is inactive before setting parameters such as position
761    // or loop points.  Otherwise, there could be a race condition: the application could read the
762    // current position, compute a new position or loop parameters, and then set that position or
763    // loop parameters but it would do the "wrong" thing since the position has continued to advance
764    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
765    // to specify how it wants to handle such scenarios.
766    if (mState == STATE_ACTIVE) {
767        return INVALID_OPERATION;
768    }
769    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
770    mLoopPeriod = 0;
771    // FIXME Check whether loops and setting position are incompatible in old code.
772    // If we use setLoop for both purposes we lose the capability to set the position while looping.
773    mStaticProxy->setLoop(position, mFrameCount, 0);
774
775    return NO_ERROR;
776}
777
778status_t AudioTrack::getPosition(uint32_t *position) const
779{
780    if (position == NULL) {
781        return BAD_VALUE;
782    }
783
784    AutoMutex lock(mLock);
785    if (isOffloaded_l()) {
786        uint32_t dspFrames = 0;
787
788        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
789            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
790            *position = mPausedPosition;
791            return NO_ERROR;
792        }
793
794        if (mOutput != AUDIO_IO_HANDLE_NONE) {
795            uint32_t halFrames;
796            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
797        }
798        *position = dspFrames;
799    } else {
800        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
801        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
802                mProxy->getPosition();
803    }
804    return NO_ERROR;
805}
806
807status_t AudioTrack::getBufferPosition(uint32_t *position)
808{
809    if (mSharedBuffer == 0 || mIsTimed) {
810        return INVALID_OPERATION;
811    }
812    if (position == NULL) {
813        return BAD_VALUE;
814    }
815
816    AutoMutex lock(mLock);
817    *position = mStaticProxy->getBufferPosition();
818    return NO_ERROR;
819}
820
821status_t AudioTrack::reload()
822{
823    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
824        return INVALID_OPERATION;
825    }
826
827    AutoMutex lock(mLock);
828    // See setPosition() regarding setting parameters such as loop points or position while active
829    if (mState == STATE_ACTIVE) {
830        return INVALID_OPERATION;
831    }
832    mNewPosition = mUpdatePeriod;
833    mLoopPeriod = 0;
834    // FIXME The new code cannot reload while keeping a loop specified.
835    // Need to check how the old code handled this, and whether it's a significant change.
836    mStaticProxy->setLoop(0, mFrameCount, 0);
837    return NO_ERROR;
838}
839
840audio_io_handle_t AudioTrack::getOutput() const
841{
842    AutoMutex lock(mLock);
843    return mOutput;
844}
845
846status_t AudioTrack::attachAuxEffect(int effectId)
847{
848    AutoMutex lock(mLock);
849    status_t status = mAudioTrack->attachAuxEffect(effectId);
850    if (status == NO_ERROR) {
851        mAuxEffectId = effectId;
852    }
853    return status;
854}
855
856// -------------------------------------------------------------------------
857
858// must be called with mLock held
859status_t AudioTrack::createTrack_l(size_t epoch)
860{
861    status_t status;
862    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
863    if (audioFlinger == 0) {
864        ALOGE("Could not get audioflinger");
865        return NO_INIT;
866    }
867
868    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
869            mChannelMask, mFlags, mOffloadInfo);
870    if (output == AUDIO_IO_HANDLE_NONE) {
871        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
872              "channel mask %#x, flags %#x",
873              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
874        return BAD_VALUE;
875    }
876    {
877    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
878    // we must release it ourselves if anything goes wrong.
879
880    // Not all of these values are needed under all conditions, but it is easier to get them all
881
882    uint32_t afLatency;
883    status = AudioSystem::getLatency(output, &afLatency);
884    if (status != NO_ERROR) {
885        ALOGE("getLatency(%d) failed status %d", output, status);
886        goto release;
887    }
888
889    size_t afFrameCount;
890    status = AudioSystem::getFrameCount(output, &afFrameCount);
891    if (status != NO_ERROR) {
892        ALOGE("getFrameCount(output=%d) status %d", output, status);
893        goto release;
894    }
895
896    uint32_t afSampleRate;
897    status = AudioSystem::getSamplingRate(output, &afSampleRate);
898    if (status != NO_ERROR) {
899        ALOGE("getSamplingRate(output=%d) status %d", output, status);
900        goto release;
901    }
902
903    // Client decides whether the track is TIMED (see below), but can only express a preference
904    // for FAST.  Server will perform additional tests.
905    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
906            // either of these use cases:
907            // use case 1: shared buffer
908            (mSharedBuffer != 0) ||
909            // use case 2: callback transfer mode
910            (mTransfer == TRANSFER_CALLBACK)) &&
911            // matching sample rate
912            (mSampleRate == afSampleRate))) {
913        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
914        // once denied, do not request again if IAudioTrack is re-created
915        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
916    }
917    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
918
919    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
920    //  n = 1   fast track with single buffering; nBuffering is ignored
921    //  n = 2   fast track with double buffering
922    //  n = 2   normal track, no sample rate conversion
923    //  n = 3   normal track, with sample rate conversion
924    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
925    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
926    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
927
928    mNotificationFramesAct = mNotificationFramesReq;
929
930    size_t frameCount = mReqFrameCount;
931    if (!audio_is_linear_pcm(mFormat)) {
932
933        if (mSharedBuffer != 0) {
934            // Same comment as below about ignoring frameCount parameter for set()
935            frameCount = mSharedBuffer->size();
936        } else if (frameCount == 0) {
937            frameCount = afFrameCount;
938        }
939        if (mNotificationFramesAct != frameCount) {
940            mNotificationFramesAct = frameCount;
941        }
942    } else if (mSharedBuffer != 0) {
943
944        // Ensure that buffer alignment matches channel count
945        // 8-bit data in shared memory is not currently supported by AudioFlinger
946        size_t alignment = audio_bytes_per_sample(
947                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
948        if (alignment & 1) {
949            alignment = 1;
950        }
951        if (mChannelCount > 1) {
952            // More than 2 channels does not require stronger alignment than stereo
953            alignment <<= 1;
954        }
955        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
956            ALOGE("Invalid buffer alignment: address %p, channel count %u",
957                    mSharedBuffer->pointer(), mChannelCount);
958            status = BAD_VALUE;
959            goto release;
960        }
961
962        // When initializing a shared buffer AudioTrack via constructors,
963        // there's no frameCount parameter.
964        // But when initializing a shared buffer AudioTrack via set(),
965        // there _is_ a frameCount parameter.  We silently ignore it.
966        frameCount = mSharedBuffer->size() / mFrameSizeAF;
967
968    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
969
970        // FIXME move these calculations and associated checks to server
971
972        // Ensure that buffer depth covers at least audio hardware latency
973        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
974        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
975                afFrameCount, minBufCount, afSampleRate, afLatency);
976        if (minBufCount <= nBuffering) {
977            minBufCount = nBuffering;
978        }
979
980        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
981        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
982                ", afLatency=%d",
983                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
984
985        if (frameCount == 0) {
986            frameCount = minFrameCount;
987        } else if (frameCount < minFrameCount) {
988            // not ALOGW because it happens all the time when playing key clicks over A2DP
989            ALOGV("Minimum buffer size corrected from %d to %d",
990                     frameCount, minFrameCount);
991            frameCount = minFrameCount;
992        }
993        // Make sure that application is notified with sufficient margin before underrun
994        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
995            mNotificationFramesAct = frameCount/nBuffering;
996        }
997
998    } else {
999        // For fast tracks, the frame count calculations and checks are done by server
1000    }
1001
1002    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1003    if (mIsTimed) {
1004        trackFlags |= IAudioFlinger::TRACK_TIMED;
1005    }
1006
1007    pid_t tid = -1;
1008    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1009        trackFlags |= IAudioFlinger::TRACK_FAST;
1010        if (mAudioTrackThread != 0) {
1011            tid = mAudioTrackThread->getTid();
1012        }
1013    }
1014
1015    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1016        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1017    }
1018
1019    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1020                                // but we will still need the original value also
1021    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1022                                                      mSampleRate,
1023                                                      // AudioFlinger only sees 16-bit PCM
1024                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1025                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1026                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1027                                                      mChannelMask,
1028                                                      &temp,
1029                                                      &trackFlags,
1030                                                      mSharedBuffer,
1031                                                      output,
1032                                                      tid,
1033                                                      &mSessionId,
1034                                                      mClientUid,
1035                                                      &status);
1036
1037    if (status != NO_ERROR) {
1038        ALOGE("AudioFlinger could not create track, status: %d", status);
1039        goto release;
1040    }
1041    ALOG_ASSERT(track != 0);
1042
1043    // AudioFlinger now owns the reference to the I/O handle,
1044    // so we are no longer responsible for releasing it.
1045
1046    sp<IMemory> iMem = track->getCblk();
1047    if (iMem == 0) {
1048        ALOGE("Could not get control block");
1049        return NO_INIT;
1050    }
1051    void *iMemPointer = iMem->pointer();
1052    if (iMemPointer == NULL) {
1053        ALOGE("Could not get control block pointer");
1054        return NO_INIT;
1055    }
1056    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1057    if (mAudioTrack != 0) {
1058        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1059        mDeathNotifier.clear();
1060    }
1061    mAudioTrack = track;
1062
1063    mCblkMemory = iMem;
1064    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1065    mCblk = cblk;
1066    // note that temp is the (possibly revised) value of frameCount
1067    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1068        // In current design, AudioTrack client checks and ensures frame count validity before
1069        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1070        // for fast track as it uses a special method of assigning frame count.
1071        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1072    }
1073    frameCount = temp;
1074
1075    mAwaitBoost = false;
1076    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1077        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1078            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1079            mAwaitBoost = true;
1080            if (mSharedBuffer == 0) {
1081                // Theoretically double-buffering is not required for fast tracks,
1082                // due to tighter scheduling.  But in practice, to accommodate kernels with
1083                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1084                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1085                    mNotificationFramesAct = frameCount/nBuffering;
1086                }
1087            }
1088        } else {
1089            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1090            // once denied, do not request again if IAudioTrack is re-created
1091            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1092            if (mSharedBuffer == 0) {
1093                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1094                    mNotificationFramesAct = frameCount/nBuffering;
1095                }
1096            }
1097        }
1098    }
1099    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1100        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1101            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1102        } else {
1103            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1104            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1105            // FIXME This is a warning, not an error, so don't return error status
1106            //return NO_INIT;
1107        }
1108    }
1109
1110    // We retain a copy of the I/O handle, but don't own the reference
1111    mOutput = output;
1112    mRefreshRemaining = true;
1113
1114    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1115    // is the value of pointer() for the shared buffer, otherwise buffers points
1116    // immediately after the control block.  This address is for the mapping within client
1117    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1118    void* buffers;
1119    if (mSharedBuffer == 0) {
1120        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1121    } else {
1122        buffers = mSharedBuffer->pointer();
1123    }
1124
1125    mAudioTrack->attachAuxEffect(mAuxEffectId);
1126    // FIXME don't believe this lie
1127    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1128
1129    mFrameCount = frameCount;
1130    // If IAudioTrack is re-created, don't let the requested frameCount
1131    // decrease.  This can confuse clients that cache frameCount().
1132    if (frameCount > mReqFrameCount) {
1133        mReqFrameCount = frameCount;
1134    }
1135
1136    // update proxy
1137    if (mSharedBuffer == 0) {
1138        mStaticProxy.clear();
1139        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1140    } else {
1141        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1142        mProxy = mStaticProxy;
1143    }
1144    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1145    mProxy->setSendLevel(mSendLevel);
1146    mProxy->setSampleRate(mSampleRate);
1147    mProxy->setEpoch(epoch);
1148    mProxy->setMinimum(mNotificationFramesAct);
1149
1150    mDeathNotifier = new DeathNotifier(this);
1151    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1152
1153    return NO_ERROR;
1154    }
1155
1156release:
1157    AudioSystem::releaseOutput(output);
1158    if (status == NO_ERROR) {
1159        status = NO_INIT;
1160    }
1161    return status;
1162}
1163
1164status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1165{
1166    if (audioBuffer == NULL) {
1167        return BAD_VALUE;
1168    }
1169    if (mTransfer != TRANSFER_OBTAIN) {
1170        audioBuffer->frameCount = 0;
1171        audioBuffer->size = 0;
1172        audioBuffer->raw = NULL;
1173        return INVALID_OPERATION;
1174    }
1175
1176    const struct timespec *requested;
1177    struct timespec timeout;
1178    if (waitCount == -1) {
1179        requested = &ClientProxy::kForever;
1180    } else if (waitCount == 0) {
1181        requested = &ClientProxy::kNonBlocking;
1182    } else if (waitCount > 0) {
1183        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1184        timeout.tv_sec = ms / 1000;
1185        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1186        requested = &timeout;
1187    } else {
1188        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1189        requested = NULL;
1190    }
1191    return obtainBuffer(audioBuffer, requested);
1192}
1193
1194status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1195        struct timespec *elapsed, size_t *nonContig)
1196{
1197    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1198    uint32_t oldSequence = 0;
1199    uint32_t newSequence;
1200
1201    Proxy::Buffer buffer;
1202    status_t status = NO_ERROR;
1203
1204    static const int32_t kMaxTries = 5;
1205    int32_t tryCounter = kMaxTries;
1206
1207    do {
1208        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1209        // keep them from going away if another thread re-creates the track during obtainBuffer()
1210        sp<AudioTrackClientProxy> proxy;
1211        sp<IMemory> iMem;
1212
1213        {   // start of lock scope
1214            AutoMutex lock(mLock);
1215
1216            newSequence = mSequence;
1217            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1218            if (status == DEAD_OBJECT) {
1219                // re-create track, unless someone else has already done so
1220                if (newSequence == oldSequence) {
1221                    status = restoreTrack_l("obtainBuffer");
1222                    if (status != NO_ERROR) {
1223                        buffer.mFrameCount = 0;
1224                        buffer.mRaw = NULL;
1225                        buffer.mNonContig = 0;
1226                        break;
1227                    }
1228                }
1229            }
1230            oldSequence = newSequence;
1231
1232            // Keep the extra references
1233            proxy = mProxy;
1234            iMem = mCblkMemory;
1235
1236            if (mState == STATE_STOPPING) {
1237                status = -EINTR;
1238                buffer.mFrameCount = 0;
1239                buffer.mRaw = NULL;
1240                buffer.mNonContig = 0;
1241                break;
1242            }
1243
1244            // Non-blocking if track is stopped or paused
1245            if (mState != STATE_ACTIVE) {
1246                requested = &ClientProxy::kNonBlocking;
1247            }
1248
1249        }   // end of lock scope
1250
1251        buffer.mFrameCount = audioBuffer->frameCount;
1252        // FIXME starts the requested timeout and elapsed over from scratch
1253        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1254
1255    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1256
1257    audioBuffer->frameCount = buffer.mFrameCount;
1258    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1259    audioBuffer->raw = buffer.mRaw;
1260    if (nonContig != NULL) {
1261        *nonContig = buffer.mNonContig;
1262    }
1263    return status;
1264}
1265
1266void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1267{
1268    if (mTransfer == TRANSFER_SHARED) {
1269        return;
1270    }
1271
1272    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1273    if (stepCount == 0) {
1274        return;
1275    }
1276
1277    Proxy::Buffer buffer;
1278    buffer.mFrameCount = stepCount;
1279    buffer.mRaw = audioBuffer->raw;
1280
1281    AutoMutex lock(mLock);
1282    mInUnderrun = false;
1283    mProxy->releaseBuffer(&buffer);
1284
1285    // restart track if it was disabled by audioflinger due to previous underrun
1286    if (mState == STATE_ACTIVE) {
1287        audio_track_cblk_t* cblk = mCblk;
1288        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1289            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1290            // FIXME ignoring status
1291            mAudioTrack->start();
1292        }
1293    }
1294}
1295
1296// -------------------------------------------------------------------------
1297
1298ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1299{
1300    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1301        return INVALID_OPERATION;
1302    }
1303
1304    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1305        // Sanity-check: user is most-likely passing an error code, and it would
1306        // make the return value ambiguous (actualSize vs error).
1307        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1308        return BAD_VALUE;
1309    }
1310
1311    size_t written = 0;
1312    Buffer audioBuffer;
1313
1314    while (userSize >= mFrameSize) {
1315        audioBuffer.frameCount = userSize / mFrameSize;
1316
1317        status_t err = obtainBuffer(&audioBuffer,
1318                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1319        if (err < 0) {
1320            if (written > 0) {
1321                break;
1322            }
1323            return ssize_t(err);
1324        }
1325
1326        size_t toWrite;
1327        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1328            // Divide capacity by 2 to take expansion into account
1329            toWrite = audioBuffer.size >> 1;
1330            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1331        } else {
1332            toWrite = audioBuffer.size;
1333            memcpy(audioBuffer.i8, buffer, toWrite);
1334        }
1335        buffer = ((const char *) buffer) + toWrite;
1336        userSize -= toWrite;
1337        written += toWrite;
1338
1339        releaseBuffer(&audioBuffer);
1340    }
1341
1342    return written;
1343}
1344
1345// -------------------------------------------------------------------------
1346
1347TimedAudioTrack::TimedAudioTrack() {
1348    mIsTimed = true;
1349}
1350
1351status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1352{
1353    AutoMutex lock(mLock);
1354    status_t result = UNKNOWN_ERROR;
1355
1356#if 1
1357    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1358    // while we are accessing the cblk
1359    sp<IAudioTrack> audioTrack = mAudioTrack;
1360    sp<IMemory> iMem = mCblkMemory;
1361#endif
1362
1363    // If the track is not invalid already, try to allocate a buffer.  alloc
1364    // fails indicating that the server is dead, flag the track as invalid so
1365    // we can attempt to restore in just a bit.
1366    audio_track_cblk_t* cblk = mCblk;
1367    if (!(cblk->mFlags & CBLK_INVALID)) {
1368        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1369        if (result == DEAD_OBJECT) {
1370            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1371        }
1372    }
1373
1374    // If the track is invalid at this point, attempt to restore it. and try the
1375    // allocation one more time.
1376    if (cblk->mFlags & CBLK_INVALID) {
1377        result = restoreTrack_l("allocateTimedBuffer");
1378
1379        if (result == NO_ERROR) {
1380            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1381        }
1382    }
1383
1384    return result;
1385}
1386
1387status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1388                                           int64_t pts)
1389{
1390    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1391    {
1392        AutoMutex lock(mLock);
1393        audio_track_cblk_t* cblk = mCblk;
1394        // restart track if it was disabled by audioflinger due to previous underrun
1395        if (buffer->size() != 0 && status == NO_ERROR &&
1396                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1397            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1398            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1399            // FIXME ignoring status
1400            mAudioTrack->start();
1401        }
1402    }
1403    return status;
1404}
1405
1406status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1407                                                TargetTimeline target)
1408{
1409    return mAudioTrack->setMediaTimeTransform(xform, target);
1410}
1411
1412// -------------------------------------------------------------------------
1413
1414nsecs_t AudioTrack::processAudioBuffer()
1415{
1416    // Currently the AudioTrack thread is not created if there are no callbacks.
1417    // Would it ever make sense to run the thread, even without callbacks?
1418    // If so, then replace this by checks at each use for mCbf != NULL.
1419    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1420
1421    mLock.lock();
1422    if (mAwaitBoost) {
1423        mAwaitBoost = false;
1424        mLock.unlock();
1425        static const int32_t kMaxTries = 5;
1426        int32_t tryCounter = kMaxTries;
1427        uint32_t pollUs = 10000;
1428        do {
1429            int policy = sched_getscheduler(0);
1430            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1431                break;
1432            }
1433            usleep(pollUs);
1434            pollUs <<= 1;
1435        } while (tryCounter-- > 0);
1436        if (tryCounter < 0) {
1437            ALOGE("did not receive expected priority boost on time");
1438        }
1439        // Run again immediately
1440        return 0;
1441    }
1442
1443    // Can only reference mCblk while locked
1444    int32_t flags = android_atomic_and(
1445        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1446
1447    // Check for track invalidation
1448    if (flags & CBLK_INVALID) {
1449        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1450        // AudioSystem cache. We should not exit here but after calling the callback so
1451        // that the upper layers can recreate the track
1452        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1453            status_t status = restoreTrack_l("processAudioBuffer");
1454            mLock.unlock();
1455            // Run again immediately, but with a new IAudioTrack
1456            return 0;
1457        }
1458    }
1459
1460    bool waitStreamEnd = mState == STATE_STOPPING;
1461    bool active = mState == STATE_ACTIVE;
1462
1463    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1464    bool newUnderrun = false;
1465    if (flags & CBLK_UNDERRUN) {
1466#if 0
1467        // Currently in shared buffer mode, when the server reaches the end of buffer,
1468        // the track stays active in continuous underrun state.  It's up to the application
1469        // to pause or stop the track, or set the position to a new offset within buffer.
1470        // This was some experimental code to auto-pause on underrun.   Keeping it here
1471        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1472        if (mTransfer == TRANSFER_SHARED) {
1473            mState = STATE_PAUSED;
1474            active = false;
1475        }
1476#endif
1477        if (!mInUnderrun) {
1478            mInUnderrun = true;
1479            newUnderrun = true;
1480        }
1481    }
1482
1483    // Get current position of server
1484    size_t position = mProxy->getPosition();
1485
1486    // Manage marker callback
1487    bool markerReached = false;
1488    size_t markerPosition = mMarkerPosition;
1489    // FIXME fails for wraparound, need 64 bits
1490    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1491        mMarkerReached = markerReached = true;
1492    }
1493
1494    // Determine number of new position callback(s) that will be needed, while locked
1495    size_t newPosCount = 0;
1496    size_t newPosition = mNewPosition;
1497    size_t updatePeriod = mUpdatePeriod;
1498    // FIXME fails for wraparound, need 64 bits
1499    if (updatePeriod > 0 && position >= newPosition) {
1500        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1501        mNewPosition += updatePeriod * newPosCount;
1502    }
1503
1504    // Cache other fields that will be needed soon
1505    uint32_t loopPeriod = mLoopPeriod;
1506    uint32_t sampleRate = mSampleRate;
1507    uint32_t notificationFrames = mNotificationFramesAct;
1508    if (mRefreshRemaining) {
1509        mRefreshRemaining = false;
1510        mRemainingFrames = notificationFrames;
1511        mRetryOnPartialBuffer = false;
1512    }
1513    size_t misalignment = mProxy->getMisalignment();
1514    uint32_t sequence = mSequence;
1515    sp<AudioTrackClientProxy> proxy = mProxy;
1516
1517    // These fields don't need to be cached, because they are assigned only by set():
1518    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1519    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1520
1521    mLock.unlock();
1522
1523    if (waitStreamEnd) {
1524        struct timespec timeout;
1525        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1526        timeout.tv_nsec = 0;
1527
1528        status_t status = proxy->waitStreamEndDone(&timeout);
1529        switch (status) {
1530        case NO_ERROR:
1531        case DEAD_OBJECT:
1532        case TIMED_OUT:
1533            mCbf(EVENT_STREAM_END, mUserData, NULL);
1534            {
1535                AutoMutex lock(mLock);
1536                // The previously assigned value of waitStreamEnd is no longer valid,
1537                // since the mutex has been unlocked and either the callback handler
1538                // or another thread could have re-started the AudioTrack during that time.
1539                waitStreamEnd = mState == STATE_STOPPING;
1540                if (waitStreamEnd) {
1541                    mState = STATE_STOPPED;
1542                }
1543            }
1544            if (waitStreamEnd && status != DEAD_OBJECT) {
1545               return NS_INACTIVE;
1546            }
1547            break;
1548        }
1549        return 0;
1550    }
1551
1552    // perform callbacks while unlocked
1553    if (newUnderrun) {
1554        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1555    }
1556    // FIXME we will miss loops if loop cycle was signaled several times since last call
1557    //       to processAudioBuffer()
1558    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1559        mCbf(EVENT_LOOP_END, mUserData, NULL);
1560    }
1561    if (flags & CBLK_BUFFER_END) {
1562        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1563    }
1564    if (markerReached) {
1565        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1566    }
1567    while (newPosCount > 0) {
1568        size_t temp = newPosition;
1569        mCbf(EVENT_NEW_POS, mUserData, &temp);
1570        newPosition += updatePeriod;
1571        newPosCount--;
1572    }
1573
1574    if (mObservedSequence != sequence) {
1575        mObservedSequence = sequence;
1576        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1577        // for offloaded tracks, just wait for the upper layers to recreate the track
1578        if (isOffloaded()) {
1579            return NS_INACTIVE;
1580        }
1581    }
1582
1583    // if inactive, then don't run me again until re-started
1584    if (!active) {
1585        return NS_INACTIVE;
1586    }
1587
1588    // Compute the estimated time until the next timed event (position, markers, loops)
1589    // FIXME only for non-compressed audio
1590    uint32_t minFrames = ~0;
1591    if (!markerReached && position < markerPosition) {
1592        minFrames = markerPosition - position;
1593    }
1594    if (loopPeriod > 0 && loopPeriod < minFrames) {
1595        minFrames = loopPeriod;
1596    }
1597    if (updatePeriod > 0 && updatePeriod < minFrames) {
1598        minFrames = updatePeriod;
1599    }
1600
1601    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1602    static const uint32_t kPoll = 0;
1603    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1604        minFrames = kPoll * notificationFrames;
1605    }
1606
1607    // Convert frame units to time units
1608    nsecs_t ns = NS_WHENEVER;
1609    if (minFrames != (uint32_t) ~0) {
1610        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1611        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1612        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1613    }
1614
1615    // If not supplying data by EVENT_MORE_DATA, then we're done
1616    if (mTransfer != TRANSFER_CALLBACK) {
1617        return ns;
1618    }
1619
1620    struct timespec timeout;
1621    const struct timespec *requested = &ClientProxy::kForever;
1622    if (ns != NS_WHENEVER) {
1623        timeout.tv_sec = ns / 1000000000LL;
1624        timeout.tv_nsec = ns % 1000000000LL;
1625        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1626        requested = &timeout;
1627    }
1628
1629    while (mRemainingFrames > 0) {
1630
1631        Buffer audioBuffer;
1632        audioBuffer.frameCount = mRemainingFrames;
1633        size_t nonContig;
1634        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1635        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1636                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1637        requested = &ClientProxy::kNonBlocking;
1638        size_t avail = audioBuffer.frameCount + nonContig;
1639        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1640                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1641        if (err != NO_ERROR) {
1642            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1643                    (isOffloaded() && (err == DEAD_OBJECT))) {
1644                return 0;
1645            }
1646            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1647            return NS_NEVER;
1648        }
1649
1650        if (mRetryOnPartialBuffer && !isOffloaded()) {
1651            mRetryOnPartialBuffer = false;
1652            if (avail < mRemainingFrames) {
1653                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1654                if (ns < 0 || myns < ns) {
1655                    ns = myns;
1656                }
1657                return ns;
1658            }
1659        }
1660
1661        // Divide buffer size by 2 to take into account the expansion
1662        // due to 8 to 16 bit conversion: the callback must fill only half
1663        // of the destination buffer
1664        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1665            audioBuffer.size >>= 1;
1666        }
1667
1668        size_t reqSize = audioBuffer.size;
1669        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1670        size_t writtenSize = audioBuffer.size;
1671
1672        // Sanity check on returned size
1673        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1674            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1675                    reqSize, (int) writtenSize);
1676            return NS_NEVER;
1677        }
1678
1679        if (writtenSize == 0) {
1680            // The callback is done filling buffers
1681            // Keep this thread going to handle timed events and
1682            // still try to get more data in intervals of WAIT_PERIOD_MS
1683            // but don't just loop and block the CPU, so wait
1684            return WAIT_PERIOD_MS * 1000000LL;
1685        }
1686
1687        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1688            // 8 to 16 bit conversion, note that source and destination are the same address
1689            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1690            audioBuffer.size <<= 1;
1691        }
1692
1693        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1694        audioBuffer.frameCount = releasedFrames;
1695        mRemainingFrames -= releasedFrames;
1696        if (misalignment >= releasedFrames) {
1697            misalignment -= releasedFrames;
1698        } else {
1699            misalignment = 0;
1700        }
1701
1702        releaseBuffer(&audioBuffer);
1703
1704        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1705        // if callback doesn't like to accept the full chunk
1706        if (writtenSize < reqSize) {
1707            continue;
1708        }
1709
1710        // There could be enough non-contiguous frames available to satisfy the remaining request
1711        if (mRemainingFrames <= nonContig) {
1712            continue;
1713        }
1714
1715#if 0
1716        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1717        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1718        // that total to a sum == notificationFrames.
1719        if (0 < misalignment && misalignment <= mRemainingFrames) {
1720            mRemainingFrames = misalignment;
1721            return (mRemainingFrames * 1100000000LL) / sampleRate;
1722        }
1723#endif
1724
1725    }
1726    mRemainingFrames = notificationFrames;
1727    mRetryOnPartialBuffer = true;
1728
1729    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1730    return 0;
1731}
1732
1733status_t AudioTrack::restoreTrack_l(const char *from)
1734{
1735    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1736          isOffloaded_l() ? "Offloaded" : "PCM", from);
1737    ++mSequence;
1738    status_t result;
1739
1740    // refresh the audio configuration cache in this process to make sure we get new
1741    // output parameters in createTrack_l()
1742    AudioSystem::clearAudioConfigCache();
1743
1744    if (isOffloaded_l()) {
1745        // FIXME re-creation of offloaded tracks is not yet implemented
1746        return DEAD_OBJECT;
1747    }
1748
1749    // if the new IAudioTrack is created, createTrack_l() will modify the
1750    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1751    // It will also delete the strong references on previous IAudioTrack and IMemory
1752
1753    // take the frames that will be lost by track recreation into account in saved position
1754    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1755    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1756    result = createTrack_l(position /*epoch*/);
1757
1758    if (result == NO_ERROR) {
1759        // continue playback from last known position, but
1760        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1761        if (mStaticProxy != NULL) {
1762            mLoopPeriod = 0;
1763            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1764        }
1765        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1766        //       track destruction have been played? This is critical for SoundPool implementation
1767        //       This must be broken, and needs to be tested/debugged.
1768#if 0
1769        // restore write index and set other indexes to reflect empty buffer status
1770        if (!strcmp(from, "start")) {
1771            // Make sure that a client relying on callback events indicating underrun or
1772            // the actual amount of audio frames played (e.g SoundPool) receives them.
1773            if (mSharedBuffer == 0) {
1774                // restart playback even if buffer is not completely filled.
1775                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1776            }
1777        }
1778#endif
1779        if (mState == STATE_ACTIVE) {
1780            result = mAudioTrack->start();
1781        }
1782    }
1783    if (result != NO_ERROR) {
1784        ALOGW("restoreTrack_l() failed status %d", result);
1785        mState = STATE_STOPPED;
1786    }
1787
1788    return result;
1789}
1790
1791status_t AudioTrack::setParameters(const String8& keyValuePairs)
1792{
1793    AutoMutex lock(mLock);
1794    return mAudioTrack->setParameters(keyValuePairs);
1795}
1796
1797status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1798{
1799    AutoMutex lock(mLock);
1800    // FIXME not implemented for fast tracks; should use proxy and SSQ
1801    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1802        return INVALID_OPERATION;
1803    }
1804    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1805        return INVALID_OPERATION;
1806    }
1807    status_t status = mAudioTrack->getTimestamp(timestamp);
1808    if (status == NO_ERROR) {
1809        timestamp.mPosition += mProxy->getEpoch();
1810    }
1811    return status;
1812}
1813
1814String8 AudioTrack::getParameters(const String8& keys)
1815{
1816    audio_io_handle_t output = getOutput();
1817    if (output != AUDIO_IO_HANDLE_NONE) {
1818        return AudioSystem::getParameters(output, keys);
1819    } else {
1820        return String8::empty();
1821    }
1822}
1823
1824bool AudioTrack::isOffloaded() const
1825{
1826    AutoMutex lock(mLock);
1827    return isOffloaded_l();
1828}
1829
1830status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1831{
1832
1833    const size_t SIZE = 256;
1834    char buffer[SIZE];
1835    String8 result;
1836
1837    result.append(" AudioTrack::dump\n");
1838    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1839            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1840    result.append(buffer);
1841    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1842            mChannelCount, mFrameCount);
1843    result.append(buffer);
1844    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1845    result.append(buffer);
1846    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1847    result.append(buffer);
1848    ::write(fd, result.string(), result.size());
1849    return NO_ERROR;
1850}
1851
1852uint32_t AudioTrack::getUnderrunFrames() const
1853{
1854    AutoMutex lock(mLock);
1855    return mProxy->getUnderrunFrames();
1856}
1857
1858// =========================================================================
1859
1860void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1861{
1862    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1863    if (audioTrack != 0) {
1864        AutoMutex lock(audioTrack->mLock);
1865        audioTrack->mProxy->binderDied();
1866    }
1867}
1868
1869// =========================================================================
1870
1871AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1872    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1873      mIgnoreNextPausedInt(false)
1874{
1875}
1876
1877AudioTrack::AudioTrackThread::~AudioTrackThread()
1878{
1879}
1880
1881bool AudioTrack::AudioTrackThread::threadLoop()
1882{
1883    {
1884        AutoMutex _l(mMyLock);
1885        if (mPaused) {
1886            mMyCond.wait(mMyLock);
1887            // caller will check for exitPending()
1888            return true;
1889        }
1890        if (mIgnoreNextPausedInt) {
1891            mIgnoreNextPausedInt = false;
1892            mPausedInt = false;
1893        }
1894        if (mPausedInt) {
1895            if (mPausedNs > 0) {
1896                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1897            } else {
1898                mMyCond.wait(mMyLock);
1899            }
1900            mPausedInt = false;
1901            return true;
1902        }
1903    }
1904    nsecs_t ns = mReceiver.processAudioBuffer();
1905    switch (ns) {
1906    case 0:
1907        return true;
1908    case NS_INACTIVE:
1909        pauseInternal();
1910        return true;
1911    case NS_NEVER:
1912        return false;
1913    case NS_WHENEVER:
1914        // FIXME increase poll interval, or make event-driven
1915        ns = 1000000000LL;
1916        // fall through
1917    default:
1918        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1919        pauseInternal(ns);
1920        return true;
1921    }
1922}
1923
1924void AudioTrack::AudioTrackThread::requestExit()
1925{
1926    // must be in this order to avoid a race condition
1927    Thread::requestExit();
1928    resume();
1929}
1930
1931void AudioTrack::AudioTrackThread::pause()
1932{
1933    AutoMutex _l(mMyLock);
1934    mPaused = true;
1935}
1936
1937void AudioTrack::AudioTrackThread::resume()
1938{
1939    AutoMutex _l(mMyLock);
1940    mIgnoreNextPausedInt = true;
1941    if (mPaused || mPausedInt) {
1942        mPaused = false;
1943        mPausedInt = false;
1944        mMyCond.signal();
1945    }
1946}
1947
1948void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1949{
1950    AutoMutex _l(mMyLock);
1951    mPausedInt = true;
1952    mPausedNs = ns;
1953}
1954
1955}; // namespace android
1956