AudioTrack.cpp revision e541269be94f3a1072932d51537905b120ef4733
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // FIXME merge with similar code in createTrack_l(), except we're missing
48    //       some information here that is available in createTrack_l():
49    //          audio_io_handle_t output
50    //          audio_format_t format
51    //          audio_channel_mask_t channelMask
52    //          audio_output_flags_t flags
53    uint32_t afSampleRate;
54    status_t status;
55    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
56    if (status != NO_ERROR) {
57        ALOGE("Unable to query output sample rate for stream type %d; status %d",
58                streamType, status);
59        return status;
60    }
61    size_t afFrameCount;
62    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
63    if (status != NO_ERROR) {
64        ALOGE("Unable to query output frame count for stream type %d; status %d",
65                streamType, status);
66        return status;
67    }
68    uint32_t afLatency;
69    status = AudioSystem::getOutputLatency(&afLatency, streamType);
70    if (status != NO_ERROR) {
71        ALOGE("Unable to query output latency for stream type %d; status %d",
72                streamType, status);
73        return status;
74    }
75
76    // Ensure that buffer depth covers at least audio hardware latency
77    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
78    if (minBufCount < 2) {
79        minBufCount = 2;
80    }
81
82    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
83            afFrameCount * minBufCount * sampleRate / afSampleRate;
84    // The formula above should always produce a non-zero value, but return an error
85    // in the unlikely event that it does not, as that's part of the API contract.
86    if (*frameCount == 0) {
87        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
88                streamType, sampleRate);
89        return BAD_VALUE;
90    }
91    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
92            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
93    return NO_ERROR;
94}
95
96// ---------------------------------------------------------------------------
97
98AudioTrack::AudioTrack()
99    : mStatus(NO_INIT),
100      mIsTimed(false),
101      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
102      mPreviousSchedulingGroup(SP_DEFAULT),
103      mPausedPosition(0)
104{
105}
106
107AudioTrack::AudioTrack(
108        audio_stream_type_t streamType,
109        uint32_t sampleRate,
110        audio_format_t format,
111        audio_channel_mask_t channelMask,
112        size_t frameCount,
113        audio_output_flags_t flags,
114        callback_t cbf,
115        void* user,
116        uint32_t notificationFrames,
117        int sessionId,
118        transfer_type transferType,
119        const audio_offload_info_t *offloadInfo,
120        int uid,
121        pid_t pid)
122    : mStatus(NO_INIT),
123      mIsTimed(false),
124      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
125      mPreviousSchedulingGroup(SP_DEFAULT),
126      mPausedPosition(0)
127{
128    mStatus = set(streamType, sampleRate, format, channelMask,
129            frameCount, flags, cbf, user, notificationFrames,
130            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
131            offloadInfo, uid, pid);
132}
133
134AudioTrack::AudioTrack(
135        audio_stream_type_t streamType,
136        uint32_t sampleRate,
137        audio_format_t format,
138        audio_channel_mask_t channelMask,
139        const sp<IMemory>& sharedBuffer,
140        audio_output_flags_t flags,
141        callback_t cbf,
142        void* user,
143        uint32_t notificationFrames,
144        int sessionId,
145        transfer_type transferType,
146        const audio_offload_info_t *offloadInfo,
147        int uid,
148        pid_t pid)
149    : mStatus(NO_INIT),
150      mIsTimed(false),
151      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
152      mPreviousSchedulingGroup(SP_DEFAULT),
153      mPausedPosition(0)
154{
155    mStatus = set(streamType, sampleRate, format, channelMask,
156            0 /*frameCount*/, flags, cbf, user, notificationFrames,
157            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
158            uid, pid);
159}
160
161AudioTrack::~AudioTrack()
162{
163    if (mStatus == NO_ERROR) {
164        // Make sure that callback function exits in the case where
165        // it is looping on buffer full condition in obtainBuffer().
166        // Otherwise the callback thread will never exit.
167        stop();
168        if (mAudioTrackThread != 0) {
169            mProxy->interrupt();
170            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
171            mAudioTrackThread->requestExitAndWait();
172            mAudioTrackThread.clear();
173        }
174        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
175        mAudioTrack.clear();
176        IPCThreadState::self()->flushCommands();
177        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
178                IPCThreadState::self()->getCallingPid(), mClientPid);
179        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
180    }
181}
182
183status_t AudioTrack::set(
184        audio_stream_type_t streamType,
185        uint32_t sampleRate,
186        audio_format_t format,
187        audio_channel_mask_t channelMask,
188        size_t frameCount,
189        audio_output_flags_t flags,
190        callback_t cbf,
191        void* user,
192        uint32_t notificationFrames,
193        const sp<IMemory>& sharedBuffer,
194        bool threadCanCallJava,
195        int sessionId,
196        transfer_type transferType,
197        const audio_offload_info_t *offloadInfo,
198        int uid,
199        pid_t pid)
200{
201    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
202          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
203          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
204          sessionId, transferType);
205
206    switch (transferType) {
207    case TRANSFER_DEFAULT:
208        if (sharedBuffer != 0) {
209            transferType = TRANSFER_SHARED;
210        } else if (cbf == NULL || threadCanCallJava) {
211            transferType = TRANSFER_SYNC;
212        } else {
213            transferType = TRANSFER_CALLBACK;
214        }
215        break;
216    case TRANSFER_CALLBACK:
217        if (cbf == NULL || sharedBuffer != 0) {
218            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
219            return BAD_VALUE;
220        }
221        break;
222    case TRANSFER_OBTAIN:
223    case TRANSFER_SYNC:
224        if (sharedBuffer != 0) {
225            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
226            return BAD_VALUE;
227        }
228        break;
229    case TRANSFER_SHARED:
230        if (sharedBuffer == 0) {
231            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
232            return BAD_VALUE;
233        }
234        break;
235    default:
236        ALOGE("Invalid transfer type %d", transferType);
237        return BAD_VALUE;
238    }
239    mSharedBuffer = sharedBuffer;
240    mTransfer = transferType;
241
242    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
243            sharedBuffer->size());
244
245    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
246
247    AutoMutex lock(mLock);
248
249    // invariant that mAudioTrack != 0 is true only after set() returns successfully
250    if (mAudioTrack != 0) {
251        ALOGE("Track already in use");
252        return INVALID_OPERATION;
253    }
254
255    // handle default values first.
256    if (streamType == AUDIO_STREAM_DEFAULT) {
257        streamType = AUDIO_STREAM_MUSIC;
258    }
259    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
260        ALOGE("Invalid stream type %d", streamType);
261        return BAD_VALUE;
262    }
263    mStreamType = streamType;
264
265    status_t status;
266    if (sampleRate == 0) {
267        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
268        if (status != NO_ERROR) {
269            ALOGE("Could not get output sample rate for stream type %d; status %d",
270                    streamType, status);
271            return status;
272        }
273    }
274    mSampleRate = sampleRate;
275
276    // these below should probably come from the audioFlinger too...
277    if (format == AUDIO_FORMAT_DEFAULT) {
278        format = AUDIO_FORMAT_PCM_16_BIT;
279    }
280
281    // validate parameters
282    if (!audio_is_valid_format(format)) {
283        ALOGE("Invalid format %#x", format);
284        return BAD_VALUE;
285    }
286    mFormat = format;
287
288    if (!audio_is_output_channel(channelMask)) {
289        ALOGE("Invalid channel mask %#x", channelMask);
290        return BAD_VALUE;
291    }
292    mChannelMask = channelMask;
293    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
294    mChannelCount = channelCount;
295
296    // AudioFlinger does not currently support 8-bit data in shared memory
297    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
298        ALOGE("8-bit data in shared memory is not supported");
299        return BAD_VALUE;
300    }
301
302    // force direct flag if format is not linear PCM
303    // or offload was requested
304    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
305            || !audio_is_linear_pcm(format)) {
306        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
307                    ? "Offload request, forcing to Direct Output"
308                    : "Not linear PCM, forcing to Direct Output");
309        flags = (audio_output_flags_t)
310                // FIXME why can't we allow direct AND fast?
311                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
312    }
313    // only allow deep buffering for music stream type
314    if (streamType != AUDIO_STREAM_MUSIC) {
315        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
316    }
317
318    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
319        if (audio_is_linear_pcm(format)) {
320            mFrameSize = channelCount * audio_bytes_per_sample(format);
321        } else {
322            mFrameSize = sizeof(uint8_t);
323        }
324        mFrameSizeAF = mFrameSize;
325    } else {
326        ALOG_ASSERT(audio_is_linear_pcm(format));
327        mFrameSize = channelCount * audio_bytes_per_sample(format);
328        mFrameSizeAF = channelCount * audio_bytes_per_sample(
329                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
330        // createTrack will return an error if PCM format is not supported by server,
331        // so no need to check for specific PCM formats here
332    }
333
334    // Make copy of input parameter offloadInfo so that in the future:
335    //  (a) createTrack_l doesn't need it as an input parameter
336    //  (b) we can support re-creation of offloaded tracks
337    if (offloadInfo != NULL) {
338        mOffloadInfoCopy = *offloadInfo;
339        mOffloadInfo = &mOffloadInfoCopy;
340    } else {
341        mOffloadInfo = NULL;
342    }
343
344    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
345    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
346    mSendLevel = 0.0f;
347    // mFrameCount is initialized in createTrack_l
348    mReqFrameCount = frameCount;
349    mNotificationFramesReq = notificationFrames;
350    mNotificationFramesAct = 0;
351    mSessionId = sessionId;
352    int callingpid = IPCThreadState::self()->getCallingPid();
353    int mypid = getpid();
354    if (uid == -1 || (callingpid != mypid)) {
355        mClientUid = IPCThreadState::self()->getCallingUid();
356    } else {
357        mClientUid = uid;
358    }
359    if (pid == -1 || (callingpid != mypid)) {
360        mClientPid = callingpid;
361    } else {
362        mClientPid = pid;
363    }
364    mAuxEffectId = 0;
365    mFlags = flags;
366    mCbf = cbf;
367
368    if (cbf != NULL) {
369        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
370        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
371    }
372
373    // create the IAudioTrack
374    status = createTrack_l(0 /*epoch*/);
375
376    if (status != NO_ERROR) {
377        if (mAudioTrackThread != 0) {
378            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
379            mAudioTrackThread->requestExitAndWait();
380            mAudioTrackThread.clear();
381        }
382        return status;
383    }
384
385    mStatus = NO_ERROR;
386    mState = STATE_STOPPED;
387    mUserData = user;
388    mLoopPeriod = 0;
389    mMarkerPosition = 0;
390    mMarkerReached = false;
391    mNewPosition = 0;
392    mUpdatePeriod = 0;
393    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
394    mSequence = 1;
395    mObservedSequence = mSequence;
396    mInUnderrun = false;
397
398    return NO_ERROR;
399}
400
401// -------------------------------------------------------------------------
402
403status_t AudioTrack::start()
404{
405    AutoMutex lock(mLock);
406
407    if (mState == STATE_ACTIVE) {
408        return INVALID_OPERATION;
409    }
410
411    mInUnderrun = true;
412
413    State previousState = mState;
414    if (previousState == STATE_PAUSED_STOPPING) {
415        mState = STATE_STOPPING;
416    } else {
417        mState = STATE_ACTIVE;
418    }
419    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
420        // reset current position as seen by client to 0
421        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
422        // force refresh of remaining frames by processAudioBuffer() as last
423        // write before stop could be partial.
424        mRefreshRemaining = true;
425    }
426    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
427    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
428
429    sp<AudioTrackThread> t = mAudioTrackThread;
430    if (t != 0) {
431        if (previousState == STATE_STOPPING) {
432            mProxy->interrupt();
433        } else {
434            t->resume();
435        }
436    } else {
437        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
438        get_sched_policy(0, &mPreviousSchedulingGroup);
439        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
440    }
441
442    status_t status = NO_ERROR;
443    if (!(flags & CBLK_INVALID)) {
444        status = mAudioTrack->start();
445        if (status == DEAD_OBJECT) {
446            flags |= CBLK_INVALID;
447        }
448    }
449    if (flags & CBLK_INVALID) {
450        status = restoreTrack_l("start");
451    }
452
453    if (status != NO_ERROR) {
454        ALOGE("start() status %d", status);
455        mState = previousState;
456        if (t != 0) {
457            if (previousState != STATE_STOPPING) {
458                t->pause();
459            }
460        } else {
461            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
462            set_sched_policy(0, mPreviousSchedulingGroup);
463        }
464    }
465
466    return status;
467}
468
469void AudioTrack::stop()
470{
471    AutoMutex lock(mLock);
472    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
473        return;
474    }
475
476    if (isOffloaded_l()) {
477        mState = STATE_STOPPING;
478    } else {
479        mState = STATE_STOPPED;
480    }
481
482    mProxy->interrupt();
483    mAudioTrack->stop();
484    // the playback head position will reset to 0, so if a marker is set, we need
485    // to activate it again
486    mMarkerReached = false;
487#if 0
488    // Force flush if a shared buffer is used otherwise audioflinger
489    // will not stop before end of buffer is reached.
490    // It may be needed to make sure that we stop playback, likely in case looping is on.
491    if (mSharedBuffer != 0) {
492        flush_l();
493    }
494#endif
495
496    sp<AudioTrackThread> t = mAudioTrackThread;
497    if (t != 0) {
498        if (!isOffloaded_l()) {
499            t->pause();
500        }
501    } else {
502        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
503        set_sched_policy(0, mPreviousSchedulingGroup);
504    }
505}
506
507bool AudioTrack::stopped() const
508{
509    AutoMutex lock(mLock);
510    return mState != STATE_ACTIVE;
511}
512
513void AudioTrack::flush()
514{
515    if (mSharedBuffer != 0) {
516        return;
517    }
518    AutoMutex lock(mLock);
519    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
520        return;
521    }
522    flush_l();
523}
524
525void AudioTrack::flush_l()
526{
527    ALOG_ASSERT(mState != STATE_ACTIVE);
528
529    // clear playback marker and periodic update counter
530    mMarkerPosition = 0;
531    mMarkerReached = false;
532    mUpdatePeriod = 0;
533    mRefreshRemaining = true;
534
535    mState = STATE_FLUSHED;
536    if (isOffloaded_l()) {
537        mProxy->interrupt();
538    }
539    mProxy->flush();
540    mAudioTrack->flush();
541}
542
543void AudioTrack::pause()
544{
545    AutoMutex lock(mLock);
546    if (mState == STATE_ACTIVE) {
547        mState = STATE_PAUSED;
548    } else if (mState == STATE_STOPPING) {
549        mState = STATE_PAUSED_STOPPING;
550    } else {
551        return;
552    }
553    mProxy->interrupt();
554    mAudioTrack->pause();
555
556    if (isOffloaded_l()) {
557        if (mOutput != AUDIO_IO_HANDLE_NONE) {
558            uint32_t halFrames;
559            // OffloadThread sends HAL pause in its threadLoop.. time saved
560            // here can be slightly off
561            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
562            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
563        }
564    }
565}
566
567status_t AudioTrack::setVolume(float left, float right)
568{
569    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
570        return BAD_VALUE;
571    }
572
573    AutoMutex lock(mLock);
574    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
575    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
576
577    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
578
579    if (isOffloaded_l()) {
580        mAudioTrack->signal();
581    }
582    return NO_ERROR;
583}
584
585status_t AudioTrack::setVolume(float volume)
586{
587    return setVolume(volume, volume);
588}
589
590status_t AudioTrack::setAuxEffectSendLevel(float level)
591{
592    if (level < 0.0f || level > 1.0f) {
593        return BAD_VALUE;
594    }
595
596    AutoMutex lock(mLock);
597    mSendLevel = level;
598    mProxy->setSendLevel(level);
599
600    return NO_ERROR;
601}
602
603void AudioTrack::getAuxEffectSendLevel(float* level) const
604{
605    if (level != NULL) {
606        *level = mSendLevel;
607    }
608}
609
610status_t AudioTrack::setSampleRate(uint32_t rate)
611{
612    if (mIsTimed || isOffloaded()) {
613        return INVALID_OPERATION;
614    }
615
616    uint32_t afSamplingRate;
617    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
618        return NO_INIT;
619    }
620    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
621    if (rate == 0 || rate > afSamplingRate*2 ) {
622        return BAD_VALUE;
623    }
624
625    AutoMutex lock(mLock);
626    mSampleRate = rate;
627    mProxy->setSampleRate(rate);
628
629    return NO_ERROR;
630}
631
632uint32_t AudioTrack::getSampleRate() const
633{
634    if (mIsTimed) {
635        return 0;
636    }
637
638    AutoMutex lock(mLock);
639
640    // sample rate can be updated during playback by the offloaded decoder so we need to
641    // query the HAL and update if needed.
642// FIXME use Proxy return channel to update the rate from server and avoid polling here
643    if (isOffloaded_l()) {
644        if (mOutput != AUDIO_IO_HANDLE_NONE) {
645            uint32_t sampleRate = 0;
646            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
647            if (status == NO_ERROR) {
648                mSampleRate = sampleRate;
649            }
650        }
651    }
652    return mSampleRate;
653}
654
655status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
656{
657    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
658        return INVALID_OPERATION;
659    }
660
661    if (loopCount == 0) {
662        ;
663    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
664            loopEnd - loopStart >= MIN_LOOP) {
665        ;
666    } else {
667        return BAD_VALUE;
668    }
669
670    AutoMutex lock(mLock);
671    // See setPosition() regarding setting parameters such as loop points or position while active
672    if (mState == STATE_ACTIVE) {
673        return INVALID_OPERATION;
674    }
675    setLoop_l(loopStart, loopEnd, loopCount);
676    return NO_ERROR;
677}
678
679void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
680{
681    // FIXME If setting a loop also sets position to start of loop, then
682    //       this is correct.  Otherwise it should be removed.
683    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
684    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
685    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
686}
687
688status_t AudioTrack::setMarkerPosition(uint32_t marker)
689{
690    // The only purpose of setting marker position is to get a callback
691    if (mCbf == NULL || isOffloaded()) {
692        return INVALID_OPERATION;
693    }
694
695    AutoMutex lock(mLock);
696    mMarkerPosition = marker;
697    mMarkerReached = false;
698
699    return NO_ERROR;
700}
701
702status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
703{
704    if (isOffloaded()) {
705        return INVALID_OPERATION;
706    }
707    if (marker == NULL) {
708        return BAD_VALUE;
709    }
710
711    AutoMutex lock(mLock);
712    *marker = mMarkerPosition;
713
714    return NO_ERROR;
715}
716
717status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
718{
719    // The only purpose of setting position update period is to get a callback
720    if (mCbf == NULL || isOffloaded()) {
721        return INVALID_OPERATION;
722    }
723
724    AutoMutex lock(mLock);
725    mNewPosition = mProxy->getPosition() + updatePeriod;
726    mUpdatePeriod = updatePeriod;
727
728    return NO_ERROR;
729}
730
731status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
732{
733    if (isOffloaded()) {
734        return INVALID_OPERATION;
735    }
736    if (updatePeriod == NULL) {
737        return BAD_VALUE;
738    }
739
740    AutoMutex lock(mLock);
741    *updatePeriod = mUpdatePeriod;
742
743    return NO_ERROR;
744}
745
746status_t AudioTrack::setPosition(uint32_t position)
747{
748    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
749        return INVALID_OPERATION;
750    }
751    if (position > mFrameCount) {
752        return BAD_VALUE;
753    }
754
755    AutoMutex lock(mLock);
756    // Currently we require that the player is inactive before setting parameters such as position
757    // or loop points.  Otherwise, there could be a race condition: the application could read the
758    // current position, compute a new position or loop parameters, and then set that position or
759    // loop parameters but it would do the "wrong" thing since the position has continued to advance
760    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
761    // to specify how it wants to handle such scenarios.
762    if (mState == STATE_ACTIVE) {
763        return INVALID_OPERATION;
764    }
765    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
766    mLoopPeriod = 0;
767    // FIXME Check whether loops and setting position are incompatible in old code.
768    // If we use setLoop for both purposes we lose the capability to set the position while looping.
769    mStaticProxy->setLoop(position, mFrameCount, 0);
770
771    return NO_ERROR;
772}
773
774status_t AudioTrack::getPosition(uint32_t *position) const
775{
776    if (position == NULL) {
777        return BAD_VALUE;
778    }
779
780    AutoMutex lock(mLock);
781    if (isOffloaded_l()) {
782        uint32_t dspFrames = 0;
783
784        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
785            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
786            *position = mPausedPosition;
787            return NO_ERROR;
788        }
789
790        if (mOutput != AUDIO_IO_HANDLE_NONE) {
791            uint32_t halFrames;
792            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
793        }
794        *position = dspFrames;
795    } else {
796        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
797        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
798                mProxy->getPosition();
799    }
800    return NO_ERROR;
801}
802
803status_t AudioTrack::getBufferPosition(uint32_t *position)
804{
805    if (mSharedBuffer == 0 || mIsTimed) {
806        return INVALID_OPERATION;
807    }
808    if (position == NULL) {
809        return BAD_VALUE;
810    }
811
812    AutoMutex lock(mLock);
813    *position = mStaticProxy->getBufferPosition();
814    return NO_ERROR;
815}
816
817status_t AudioTrack::reload()
818{
819    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
820        return INVALID_OPERATION;
821    }
822
823    AutoMutex lock(mLock);
824    // See setPosition() regarding setting parameters such as loop points or position while active
825    if (mState == STATE_ACTIVE) {
826        return INVALID_OPERATION;
827    }
828    mNewPosition = mUpdatePeriod;
829    mLoopPeriod = 0;
830    // FIXME The new code cannot reload while keeping a loop specified.
831    // Need to check how the old code handled this, and whether it's a significant change.
832    mStaticProxy->setLoop(0, mFrameCount, 0);
833    return NO_ERROR;
834}
835
836audio_io_handle_t AudioTrack::getOutput() const
837{
838    AutoMutex lock(mLock);
839    return mOutput;
840}
841
842status_t AudioTrack::attachAuxEffect(int effectId)
843{
844    AutoMutex lock(mLock);
845    status_t status = mAudioTrack->attachAuxEffect(effectId);
846    if (status == NO_ERROR) {
847        mAuxEffectId = effectId;
848    }
849    return status;
850}
851
852// -------------------------------------------------------------------------
853
854// must be called with mLock held
855status_t AudioTrack::createTrack_l(size_t epoch)
856{
857    status_t status;
858    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
859    if (audioFlinger == 0) {
860        ALOGE("Could not get audioflinger");
861        return NO_INIT;
862    }
863
864    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
865            mChannelMask, mFlags, mOffloadInfo);
866    if (output == AUDIO_IO_HANDLE_NONE) {
867        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
868              "channel mask %#x, flags %#x",
869              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
870        return BAD_VALUE;
871    }
872    {
873    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
874    // we must release it ourselves if anything goes wrong.
875
876    // Not all of these values are needed under all conditions, but it is easier to get them all
877
878    uint32_t afLatency;
879    status = AudioSystem::getLatency(output, &afLatency);
880    if (status != NO_ERROR) {
881        ALOGE("getLatency(%d) failed status %d", output, status);
882        goto release;
883    }
884
885    size_t afFrameCount;
886    status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount);
887    if (status != NO_ERROR) {
888        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status);
889        goto release;
890    }
891
892    uint32_t afSampleRate;
893    status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate);
894    if (status != NO_ERROR) {
895        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status);
896        goto release;
897    }
898
899    // Client decides whether the track is TIMED (see below), but can only express a preference
900    // for FAST.  Server will perform additional tests.
901    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
902            // either of these use cases:
903            // use case 1: shared buffer
904            (mSharedBuffer != 0) ||
905            // use case 2: callback transfer mode
906            (mTransfer == TRANSFER_CALLBACK)) &&
907            // matching sample rate
908            (mSampleRate == afSampleRate))) {
909        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
910        // once denied, do not request again if IAudioTrack is re-created
911        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
912    }
913    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
914
915    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
916    //  n = 1   fast track with single buffering; nBuffering is ignored
917    //  n = 2   fast track with double buffering
918    //  n = 2   normal track, no sample rate conversion
919    //  n = 3   normal track, with sample rate conversion
920    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
921    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
922    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
923
924    mNotificationFramesAct = mNotificationFramesReq;
925
926    size_t frameCount = mReqFrameCount;
927    if (!audio_is_linear_pcm(mFormat)) {
928
929        if (mSharedBuffer != 0) {
930            // Same comment as below about ignoring frameCount parameter for set()
931            frameCount = mSharedBuffer->size();
932        } else if (frameCount == 0) {
933            frameCount = afFrameCount;
934        }
935        if (mNotificationFramesAct != frameCount) {
936            mNotificationFramesAct = frameCount;
937        }
938    } else if (mSharedBuffer != 0) {
939
940        // Ensure that buffer alignment matches channel count
941        // 8-bit data in shared memory is not currently supported by AudioFlinger
942        size_t alignment = audio_bytes_per_sample(
943                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
944        if (alignment & 1) {
945            alignment = 1;
946        }
947        if (mChannelCount > 1) {
948            // More than 2 channels does not require stronger alignment than stereo
949            alignment <<= 1;
950        }
951        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
952            ALOGE("Invalid buffer alignment: address %p, channel count %u",
953                    mSharedBuffer->pointer(), mChannelCount);
954            status = BAD_VALUE;
955            goto release;
956        }
957
958        // When initializing a shared buffer AudioTrack via constructors,
959        // there's no frameCount parameter.
960        // But when initializing a shared buffer AudioTrack via set(),
961        // there _is_ a frameCount parameter.  We silently ignore it.
962        frameCount = mSharedBuffer->size() / mFrameSizeAF;
963
964    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
965
966        // FIXME move these calculations and associated checks to server
967
968        // Ensure that buffer depth covers at least audio hardware latency
969        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
970        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
971                afFrameCount, minBufCount, afSampleRate, afLatency);
972        if (minBufCount <= nBuffering) {
973            minBufCount = nBuffering;
974        }
975
976        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
977        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
978                ", afLatency=%d",
979                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
980
981        if (frameCount == 0) {
982            frameCount = minFrameCount;
983        } else if (frameCount < minFrameCount) {
984            // not ALOGW because it happens all the time when playing key clicks over A2DP
985            ALOGV("Minimum buffer size corrected from %d to %d",
986                     frameCount, minFrameCount);
987            frameCount = minFrameCount;
988        }
989        // Make sure that application is notified with sufficient margin before underrun
990        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
991            mNotificationFramesAct = frameCount/nBuffering;
992        }
993
994    } else {
995        // For fast tracks, the frame count calculations and checks are done by server
996    }
997
998    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
999    if (mIsTimed) {
1000        trackFlags |= IAudioFlinger::TRACK_TIMED;
1001    }
1002
1003    pid_t tid = -1;
1004    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1005        trackFlags |= IAudioFlinger::TRACK_FAST;
1006        if (mAudioTrackThread != 0) {
1007            tid = mAudioTrackThread->getTid();
1008        }
1009    }
1010
1011    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1012        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1013    }
1014
1015    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1016                                // but we will still need the original value also
1017    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1018                                                      mSampleRate,
1019                                                      // AudioFlinger only sees 16-bit PCM
1020                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1021                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1022                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1023                                                      mChannelMask,
1024                                                      &temp,
1025                                                      &trackFlags,
1026                                                      mSharedBuffer,
1027                                                      output,
1028                                                      tid,
1029                                                      &mSessionId,
1030                                                      mClientUid,
1031                                                      &status);
1032
1033    if (status != NO_ERROR) {
1034        ALOGE("AudioFlinger could not create track, status: %d", status);
1035        goto release;
1036    }
1037    ALOG_ASSERT(track != 0);
1038
1039    // AudioFlinger now owns the reference to the I/O handle,
1040    // so we are no longer responsible for releasing it.
1041
1042    sp<IMemory> iMem = track->getCblk();
1043    if (iMem == 0) {
1044        ALOGE("Could not get control block");
1045        return NO_INIT;
1046    }
1047    void *iMemPointer = iMem->pointer();
1048    if (iMemPointer == NULL) {
1049        ALOGE("Could not get control block pointer");
1050        return NO_INIT;
1051    }
1052    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1053    if (mAudioTrack != 0) {
1054        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1055        mDeathNotifier.clear();
1056    }
1057    mAudioTrack = track;
1058
1059    mCblkMemory = iMem;
1060    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1061    mCblk = cblk;
1062    // note that temp is the (possibly revised) value of frameCount
1063    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1064        // In current design, AudioTrack client checks and ensures frame count validity before
1065        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1066        // for fast track as it uses a special method of assigning frame count.
1067        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1068    }
1069    frameCount = temp;
1070
1071    mAwaitBoost = false;
1072    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1073        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1074            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1075            mAwaitBoost = true;
1076            if (mSharedBuffer == 0) {
1077                // Theoretically double-buffering is not required for fast tracks,
1078                // due to tighter scheduling.  But in practice, to accommodate kernels with
1079                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1080                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1081                    mNotificationFramesAct = frameCount/nBuffering;
1082                }
1083            }
1084        } else {
1085            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1086            // once denied, do not request again if IAudioTrack is re-created
1087            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1088            if (mSharedBuffer == 0) {
1089                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1090                    mNotificationFramesAct = frameCount/nBuffering;
1091                }
1092            }
1093        }
1094    }
1095    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1096        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1097            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1098        } else {
1099            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1100            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1101            // FIXME This is a warning, not an error, so don't return error status
1102            //return NO_INIT;
1103        }
1104    }
1105
1106    // We retain a copy of the I/O handle, but don't own the reference
1107    mOutput = output;
1108    mRefreshRemaining = true;
1109
1110    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1111    // is the value of pointer() for the shared buffer, otherwise buffers points
1112    // immediately after the control block.  This address is for the mapping within client
1113    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1114    void* buffers;
1115    if (mSharedBuffer == 0) {
1116        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1117    } else {
1118        buffers = mSharedBuffer->pointer();
1119    }
1120
1121    mAudioTrack->attachAuxEffect(mAuxEffectId);
1122    // FIXME don't believe this lie
1123    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1124
1125    mFrameCount = frameCount;
1126    // If IAudioTrack is re-created, don't let the requested frameCount
1127    // decrease.  This can confuse clients that cache frameCount().
1128    if (frameCount > mReqFrameCount) {
1129        mReqFrameCount = frameCount;
1130    }
1131
1132    // update proxy
1133    if (mSharedBuffer == 0) {
1134        mStaticProxy.clear();
1135        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1136    } else {
1137        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1138        mProxy = mStaticProxy;
1139    }
1140    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[AUDIO_INTERLEAVE_RIGHT] * 0x1000)) << 16) |
1141            uint16_t(mVolume[AUDIO_INTERLEAVE_LEFT] * 0x1000));
1142    mProxy->setSendLevel(mSendLevel);
1143    mProxy->setSampleRate(mSampleRate);
1144    mProxy->setEpoch(epoch);
1145    mProxy->setMinimum(mNotificationFramesAct);
1146
1147    mDeathNotifier = new DeathNotifier(this);
1148    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1149
1150    return NO_ERROR;
1151    }
1152
1153release:
1154    AudioSystem::releaseOutput(output);
1155    if (status == NO_ERROR) {
1156        status = NO_INIT;
1157    }
1158    return status;
1159}
1160
1161status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1162{
1163    if (audioBuffer == NULL) {
1164        return BAD_VALUE;
1165    }
1166    if (mTransfer != TRANSFER_OBTAIN) {
1167        audioBuffer->frameCount = 0;
1168        audioBuffer->size = 0;
1169        audioBuffer->raw = NULL;
1170        return INVALID_OPERATION;
1171    }
1172
1173    const struct timespec *requested;
1174    struct timespec timeout;
1175    if (waitCount == -1) {
1176        requested = &ClientProxy::kForever;
1177    } else if (waitCount == 0) {
1178        requested = &ClientProxy::kNonBlocking;
1179    } else if (waitCount > 0) {
1180        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1181        timeout.tv_sec = ms / 1000;
1182        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1183        requested = &timeout;
1184    } else {
1185        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1186        requested = NULL;
1187    }
1188    return obtainBuffer(audioBuffer, requested);
1189}
1190
1191status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1192        struct timespec *elapsed, size_t *nonContig)
1193{
1194    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1195    uint32_t oldSequence = 0;
1196    uint32_t newSequence;
1197
1198    Proxy::Buffer buffer;
1199    status_t status = NO_ERROR;
1200
1201    static const int32_t kMaxTries = 5;
1202    int32_t tryCounter = kMaxTries;
1203
1204    do {
1205        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1206        // keep them from going away if another thread re-creates the track during obtainBuffer()
1207        sp<AudioTrackClientProxy> proxy;
1208        sp<IMemory> iMem;
1209
1210        {   // start of lock scope
1211            AutoMutex lock(mLock);
1212
1213            newSequence = mSequence;
1214            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1215            if (status == DEAD_OBJECT) {
1216                // re-create track, unless someone else has already done so
1217                if (newSequence == oldSequence) {
1218                    status = restoreTrack_l("obtainBuffer");
1219                    if (status != NO_ERROR) {
1220                        buffer.mFrameCount = 0;
1221                        buffer.mRaw = NULL;
1222                        buffer.mNonContig = 0;
1223                        break;
1224                    }
1225                }
1226            }
1227            oldSequence = newSequence;
1228
1229            // Keep the extra references
1230            proxy = mProxy;
1231            iMem = mCblkMemory;
1232
1233            if (mState == STATE_STOPPING) {
1234                status = -EINTR;
1235                buffer.mFrameCount = 0;
1236                buffer.mRaw = NULL;
1237                buffer.mNonContig = 0;
1238                break;
1239            }
1240
1241            // Non-blocking if track is stopped or paused
1242            if (mState != STATE_ACTIVE) {
1243                requested = &ClientProxy::kNonBlocking;
1244            }
1245
1246        }   // end of lock scope
1247
1248        buffer.mFrameCount = audioBuffer->frameCount;
1249        // FIXME starts the requested timeout and elapsed over from scratch
1250        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1251
1252    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1253
1254    audioBuffer->frameCount = buffer.mFrameCount;
1255    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1256    audioBuffer->raw = buffer.mRaw;
1257    if (nonContig != NULL) {
1258        *nonContig = buffer.mNonContig;
1259    }
1260    return status;
1261}
1262
1263void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1264{
1265    if (mTransfer == TRANSFER_SHARED) {
1266        return;
1267    }
1268
1269    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1270    if (stepCount == 0) {
1271        return;
1272    }
1273
1274    Proxy::Buffer buffer;
1275    buffer.mFrameCount = stepCount;
1276    buffer.mRaw = audioBuffer->raw;
1277
1278    AutoMutex lock(mLock);
1279    mInUnderrun = false;
1280    mProxy->releaseBuffer(&buffer);
1281
1282    // restart track if it was disabled by audioflinger due to previous underrun
1283    if (mState == STATE_ACTIVE) {
1284        audio_track_cblk_t* cblk = mCblk;
1285        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1286            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1287            // FIXME ignoring status
1288            mAudioTrack->start();
1289        }
1290    }
1291}
1292
1293// -------------------------------------------------------------------------
1294
1295ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1296{
1297    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1298        return INVALID_OPERATION;
1299    }
1300
1301    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1302        // Sanity-check: user is most-likely passing an error code, and it would
1303        // make the return value ambiguous (actualSize vs error).
1304        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1305        return BAD_VALUE;
1306    }
1307
1308    size_t written = 0;
1309    Buffer audioBuffer;
1310
1311    while (userSize >= mFrameSize) {
1312        audioBuffer.frameCount = userSize / mFrameSize;
1313
1314        status_t err = obtainBuffer(&audioBuffer,
1315                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1316        if (err < 0) {
1317            if (written > 0) {
1318                break;
1319            }
1320            return ssize_t(err);
1321        }
1322
1323        size_t toWrite;
1324        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1325            // Divide capacity by 2 to take expansion into account
1326            toWrite = audioBuffer.size >> 1;
1327            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1328        } else {
1329            toWrite = audioBuffer.size;
1330            memcpy(audioBuffer.i8, buffer, toWrite);
1331        }
1332        buffer = ((const char *) buffer) + toWrite;
1333        userSize -= toWrite;
1334        written += toWrite;
1335
1336        releaseBuffer(&audioBuffer);
1337    }
1338
1339    return written;
1340}
1341
1342// -------------------------------------------------------------------------
1343
1344TimedAudioTrack::TimedAudioTrack() {
1345    mIsTimed = true;
1346}
1347
1348status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1349{
1350    AutoMutex lock(mLock);
1351    status_t result = UNKNOWN_ERROR;
1352
1353#if 1
1354    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1355    // while we are accessing the cblk
1356    sp<IAudioTrack> audioTrack = mAudioTrack;
1357    sp<IMemory> iMem = mCblkMemory;
1358#endif
1359
1360    // If the track is not invalid already, try to allocate a buffer.  alloc
1361    // fails indicating that the server is dead, flag the track as invalid so
1362    // we can attempt to restore in just a bit.
1363    audio_track_cblk_t* cblk = mCblk;
1364    if (!(cblk->mFlags & CBLK_INVALID)) {
1365        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1366        if (result == DEAD_OBJECT) {
1367            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1368        }
1369    }
1370
1371    // If the track is invalid at this point, attempt to restore it. and try the
1372    // allocation one more time.
1373    if (cblk->mFlags & CBLK_INVALID) {
1374        result = restoreTrack_l("allocateTimedBuffer");
1375
1376        if (result == NO_ERROR) {
1377            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1378        }
1379    }
1380
1381    return result;
1382}
1383
1384status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1385                                           int64_t pts)
1386{
1387    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1388    {
1389        AutoMutex lock(mLock);
1390        audio_track_cblk_t* cblk = mCblk;
1391        // restart track if it was disabled by audioflinger due to previous underrun
1392        if (buffer->size() != 0 && status == NO_ERROR &&
1393                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1394            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1395            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1396            // FIXME ignoring status
1397            mAudioTrack->start();
1398        }
1399    }
1400    return status;
1401}
1402
1403status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1404                                                TargetTimeline target)
1405{
1406    return mAudioTrack->setMediaTimeTransform(xform, target);
1407}
1408
1409// -------------------------------------------------------------------------
1410
1411nsecs_t AudioTrack::processAudioBuffer()
1412{
1413    // Currently the AudioTrack thread is not created if there are no callbacks.
1414    // Would it ever make sense to run the thread, even without callbacks?
1415    // If so, then replace this by checks at each use for mCbf != NULL.
1416    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1417
1418    mLock.lock();
1419    if (mAwaitBoost) {
1420        mAwaitBoost = false;
1421        mLock.unlock();
1422        static const int32_t kMaxTries = 5;
1423        int32_t tryCounter = kMaxTries;
1424        uint32_t pollUs = 10000;
1425        do {
1426            int policy = sched_getscheduler(0);
1427            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1428                break;
1429            }
1430            usleep(pollUs);
1431            pollUs <<= 1;
1432        } while (tryCounter-- > 0);
1433        if (tryCounter < 0) {
1434            ALOGE("did not receive expected priority boost on time");
1435        }
1436        // Run again immediately
1437        return 0;
1438    }
1439
1440    // Can only reference mCblk while locked
1441    int32_t flags = android_atomic_and(
1442        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1443
1444    // Check for track invalidation
1445    if (flags & CBLK_INVALID) {
1446        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1447        // AudioSystem cache. We should not exit here but after calling the callback so
1448        // that the upper layers can recreate the track
1449        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1450            status_t status = restoreTrack_l("processAudioBuffer");
1451            mLock.unlock();
1452            // Run again immediately, but with a new IAudioTrack
1453            return 0;
1454        }
1455    }
1456
1457    bool waitStreamEnd = mState == STATE_STOPPING;
1458    bool active = mState == STATE_ACTIVE;
1459
1460    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1461    bool newUnderrun = false;
1462    if (flags & CBLK_UNDERRUN) {
1463#if 0
1464        // Currently in shared buffer mode, when the server reaches the end of buffer,
1465        // the track stays active in continuous underrun state.  It's up to the application
1466        // to pause or stop the track, or set the position to a new offset within buffer.
1467        // This was some experimental code to auto-pause on underrun.   Keeping it here
1468        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1469        if (mTransfer == TRANSFER_SHARED) {
1470            mState = STATE_PAUSED;
1471            active = false;
1472        }
1473#endif
1474        if (!mInUnderrun) {
1475            mInUnderrun = true;
1476            newUnderrun = true;
1477        }
1478    }
1479
1480    // Get current position of server
1481    size_t position = mProxy->getPosition();
1482
1483    // Manage marker callback
1484    bool markerReached = false;
1485    size_t markerPosition = mMarkerPosition;
1486    // FIXME fails for wraparound, need 64 bits
1487    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1488        mMarkerReached = markerReached = true;
1489    }
1490
1491    // Determine number of new position callback(s) that will be needed, while locked
1492    size_t newPosCount = 0;
1493    size_t newPosition = mNewPosition;
1494    size_t updatePeriod = mUpdatePeriod;
1495    // FIXME fails for wraparound, need 64 bits
1496    if (updatePeriod > 0 && position >= newPosition) {
1497        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1498        mNewPosition += updatePeriod * newPosCount;
1499    }
1500
1501    // Cache other fields that will be needed soon
1502    uint32_t loopPeriod = mLoopPeriod;
1503    uint32_t sampleRate = mSampleRate;
1504    uint32_t notificationFrames = mNotificationFramesAct;
1505    if (mRefreshRemaining) {
1506        mRefreshRemaining = false;
1507        mRemainingFrames = notificationFrames;
1508        mRetryOnPartialBuffer = false;
1509    }
1510    size_t misalignment = mProxy->getMisalignment();
1511    uint32_t sequence = mSequence;
1512    sp<AudioTrackClientProxy> proxy = mProxy;
1513
1514    // These fields don't need to be cached, because they are assigned only by set():
1515    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1516    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1517
1518    mLock.unlock();
1519
1520    if (waitStreamEnd) {
1521        struct timespec timeout;
1522        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1523        timeout.tv_nsec = 0;
1524
1525        status_t status = proxy->waitStreamEndDone(&timeout);
1526        switch (status) {
1527        case NO_ERROR:
1528        case DEAD_OBJECT:
1529        case TIMED_OUT:
1530            mCbf(EVENT_STREAM_END, mUserData, NULL);
1531            {
1532                AutoMutex lock(mLock);
1533                // The previously assigned value of waitStreamEnd is no longer valid,
1534                // since the mutex has been unlocked and either the callback handler
1535                // or another thread could have re-started the AudioTrack during that time.
1536                waitStreamEnd = mState == STATE_STOPPING;
1537                if (waitStreamEnd) {
1538                    mState = STATE_STOPPED;
1539                }
1540            }
1541            if (waitStreamEnd && status != DEAD_OBJECT) {
1542               return NS_INACTIVE;
1543            }
1544            break;
1545        }
1546        return 0;
1547    }
1548
1549    // perform callbacks while unlocked
1550    if (newUnderrun) {
1551        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1552    }
1553    // FIXME we will miss loops if loop cycle was signaled several times since last call
1554    //       to processAudioBuffer()
1555    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1556        mCbf(EVENT_LOOP_END, mUserData, NULL);
1557    }
1558    if (flags & CBLK_BUFFER_END) {
1559        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1560    }
1561    if (markerReached) {
1562        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1563    }
1564    while (newPosCount > 0) {
1565        size_t temp = newPosition;
1566        mCbf(EVENT_NEW_POS, mUserData, &temp);
1567        newPosition += updatePeriod;
1568        newPosCount--;
1569    }
1570
1571    if (mObservedSequence != sequence) {
1572        mObservedSequence = sequence;
1573        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1574        // for offloaded tracks, just wait for the upper layers to recreate the track
1575        if (isOffloaded()) {
1576            return NS_INACTIVE;
1577        }
1578    }
1579
1580    // if inactive, then don't run me again until re-started
1581    if (!active) {
1582        return NS_INACTIVE;
1583    }
1584
1585    // Compute the estimated time until the next timed event (position, markers, loops)
1586    // FIXME only for non-compressed audio
1587    uint32_t minFrames = ~0;
1588    if (!markerReached && position < markerPosition) {
1589        minFrames = markerPosition - position;
1590    }
1591    if (loopPeriod > 0 && loopPeriod < minFrames) {
1592        minFrames = loopPeriod;
1593    }
1594    if (updatePeriod > 0 && updatePeriod < minFrames) {
1595        minFrames = updatePeriod;
1596    }
1597
1598    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1599    static const uint32_t kPoll = 0;
1600    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1601        minFrames = kPoll * notificationFrames;
1602    }
1603
1604    // Convert frame units to time units
1605    nsecs_t ns = NS_WHENEVER;
1606    if (minFrames != (uint32_t) ~0) {
1607        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1608        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1609        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1610    }
1611
1612    // If not supplying data by EVENT_MORE_DATA, then we're done
1613    if (mTransfer != TRANSFER_CALLBACK) {
1614        return ns;
1615    }
1616
1617    struct timespec timeout;
1618    const struct timespec *requested = &ClientProxy::kForever;
1619    if (ns != NS_WHENEVER) {
1620        timeout.tv_sec = ns / 1000000000LL;
1621        timeout.tv_nsec = ns % 1000000000LL;
1622        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1623        requested = &timeout;
1624    }
1625
1626    while (mRemainingFrames > 0) {
1627
1628        Buffer audioBuffer;
1629        audioBuffer.frameCount = mRemainingFrames;
1630        size_t nonContig;
1631        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1632        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1633                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1634        requested = &ClientProxy::kNonBlocking;
1635        size_t avail = audioBuffer.frameCount + nonContig;
1636        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1637                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1638        if (err != NO_ERROR) {
1639            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1640                    (isOffloaded() && (err == DEAD_OBJECT))) {
1641                return 0;
1642            }
1643            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1644            return NS_NEVER;
1645        }
1646
1647        if (mRetryOnPartialBuffer && !isOffloaded()) {
1648            mRetryOnPartialBuffer = false;
1649            if (avail < mRemainingFrames) {
1650                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1651                if (ns < 0 || myns < ns) {
1652                    ns = myns;
1653                }
1654                return ns;
1655            }
1656        }
1657
1658        // Divide buffer size by 2 to take into account the expansion
1659        // due to 8 to 16 bit conversion: the callback must fill only half
1660        // of the destination buffer
1661        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1662            audioBuffer.size >>= 1;
1663        }
1664
1665        size_t reqSize = audioBuffer.size;
1666        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1667        size_t writtenSize = audioBuffer.size;
1668
1669        // Sanity check on returned size
1670        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1671            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1672                    reqSize, (int) writtenSize);
1673            return NS_NEVER;
1674        }
1675
1676        if (writtenSize == 0) {
1677            // The callback is done filling buffers
1678            // Keep this thread going to handle timed events and
1679            // still try to get more data in intervals of WAIT_PERIOD_MS
1680            // but don't just loop and block the CPU, so wait
1681            return WAIT_PERIOD_MS * 1000000LL;
1682        }
1683
1684        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1685            // 8 to 16 bit conversion, note that source and destination are the same address
1686            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1687            audioBuffer.size <<= 1;
1688        }
1689
1690        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1691        audioBuffer.frameCount = releasedFrames;
1692        mRemainingFrames -= releasedFrames;
1693        if (misalignment >= releasedFrames) {
1694            misalignment -= releasedFrames;
1695        } else {
1696            misalignment = 0;
1697        }
1698
1699        releaseBuffer(&audioBuffer);
1700
1701        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1702        // if callback doesn't like to accept the full chunk
1703        if (writtenSize < reqSize) {
1704            continue;
1705        }
1706
1707        // There could be enough non-contiguous frames available to satisfy the remaining request
1708        if (mRemainingFrames <= nonContig) {
1709            continue;
1710        }
1711
1712#if 0
1713        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1714        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1715        // that total to a sum == notificationFrames.
1716        if (0 < misalignment && misalignment <= mRemainingFrames) {
1717            mRemainingFrames = misalignment;
1718            return (mRemainingFrames * 1100000000LL) / sampleRate;
1719        }
1720#endif
1721
1722    }
1723    mRemainingFrames = notificationFrames;
1724    mRetryOnPartialBuffer = true;
1725
1726    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1727    return 0;
1728}
1729
1730status_t AudioTrack::restoreTrack_l(const char *from)
1731{
1732    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1733          isOffloaded_l() ? "Offloaded" : "PCM", from);
1734    ++mSequence;
1735    status_t result;
1736
1737    // refresh the audio configuration cache in this process to make sure we get new
1738    // output parameters in createTrack_l()
1739    AudioSystem::clearAudioConfigCache();
1740
1741    if (isOffloaded_l()) {
1742        // FIXME re-creation of offloaded tracks is not yet implemented
1743        return DEAD_OBJECT;
1744    }
1745
1746    // if the new IAudioTrack is created, createTrack_l() will modify the
1747    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1748    // It will also delete the strong references on previous IAudioTrack and IMemory
1749
1750    // take the frames that will be lost by track recreation into account in saved position
1751    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1752    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1753    result = createTrack_l(position /*epoch*/);
1754
1755    if (result == NO_ERROR) {
1756        // continue playback from last known position, but
1757        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1758        if (mStaticProxy != NULL) {
1759            mLoopPeriod = 0;
1760            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1761        }
1762        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1763        //       track destruction have been played? This is critical for SoundPool implementation
1764        //       This must be broken, and needs to be tested/debugged.
1765#if 0
1766        // restore write index and set other indexes to reflect empty buffer status
1767        if (!strcmp(from, "start")) {
1768            // Make sure that a client relying on callback events indicating underrun or
1769            // the actual amount of audio frames played (e.g SoundPool) receives them.
1770            if (mSharedBuffer == 0) {
1771                // restart playback even if buffer is not completely filled.
1772                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1773            }
1774        }
1775#endif
1776        if (mState == STATE_ACTIVE) {
1777            result = mAudioTrack->start();
1778        }
1779    }
1780    if (result != NO_ERROR) {
1781        ALOGW("restoreTrack_l() failed status %d", result);
1782        mState = STATE_STOPPED;
1783    }
1784
1785    return result;
1786}
1787
1788status_t AudioTrack::setParameters(const String8& keyValuePairs)
1789{
1790    AutoMutex lock(mLock);
1791    return mAudioTrack->setParameters(keyValuePairs);
1792}
1793
1794status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1795{
1796    AutoMutex lock(mLock);
1797    // FIXME not implemented for fast tracks; should use proxy and SSQ
1798    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1799        return INVALID_OPERATION;
1800    }
1801    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1802        return INVALID_OPERATION;
1803    }
1804    status_t status = mAudioTrack->getTimestamp(timestamp);
1805    if (status == NO_ERROR) {
1806        timestamp.mPosition += mProxy->getEpoch();
1807    }
1808    return status;
1809}
1810
1811String8 AudioTrack::getParameters(const String8& keys)
1812{
1813    audio_io_handle_t output = getOutput();
1814    if (output != AUDIO_IO_HANDLE_NONE) {
1815        return AudioSystem::getParameters(output, keys);
1816    } else {
1817        return String8::empty();
1818    }
1819}
1820
1821bool AudioTrack::isOffloaded() const
1822{
1823    AutoMutex lock(mLock);
1824    return isOffloaded_l();
1825}
1826
1827status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1828{
1829
1830    const size_t SIZE = 256;
1831    char buffer[SIZE];
1832    String8 result;
1833
1834    result.append(" AudioTrack::dump\n");
1835    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1836            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1837    result.append(buffer);
1838    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1839            mChannelCount, mFrameCount);
1840    result.append(buffer);
1841    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1842    result.append(buffer);
1843    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1844    result.append(buffer);
1845    ::write(fd, result.string(), result.size());
1846    return NO_ERROR;
1847}
1848
1849uint32_t AudioTrack::getUnderrunFrames() const
1850{
1851    AutoMutex lock(mLock);
1852    return mProxy->getUnderrunFrames();
1853}
1854
1855// =========================================================================
1856
1857void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1858{
1859    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1860    if (audioTrack != 0) {
1861        AutoMutex lock(audioTrack->mLock);
1862        audioTrack->mProxy->binderDied();
1863    }
1864}
1865
1866// =========================================================================
1867
1868AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1869    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1870      mIgnoreNextPausedInt(false)
1871{
1872}
1873
1874AudioTrack::AudioTrackThread::~AudioTrackThread()
1875{
1876}
1877
1878bool AudioTrack::AudioTrackThread::threadLoop()
1879{
1880    {
1881        AutoMutex _l(mMyLock);
1882        if (mPaused) {
1883            mMyCond.wait(mMyLock);
1884            // caller will check for exitPending()
1885            return true;
1886        }
1887        if (mIgnoreNextPausedInt) {
1888            mIgnoreNextPausedInt = false;
1889            mPausedInt = false;
1890        }
1891        if (mPausedInt) {
1892            if (mPausedNs > 0) {
1893                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1894            } else {
1895                mMyCond.wait(mMyLock);
1896            }
1897            mPausedInt = false;
1898            return true;
1899        }
1900    }
1901    nsecs_t ns = mReceiver.processAudioBuffer();
1902    switch (ns) {
1903    case 0:
1904        return true;
1905    case NS_INACTIVE:
1906        pauseInternal();
1907        return true;
1908    case NS_NEVER:
1909        return false;
1910    case NS_WHENEVER:
1911        // FIXME increase poll interval, or make event-driven
1912        ns = 1000000000LL;
1913        // fall through
1914    default:
1915        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1916        pauseInternal(ns);
1917        return true;
1918    }
1919}
1920
1921void AudioTrack::AudioTrackThread::requestExit()
1922{
1923    // must be in this order to avoid a race condition
1924    Thread::requestExit();
1925    resume();
1926}
1927
1928void AudioTrack::AudioTrackThread::pause()
1929{
1930    AutoMutex _l(mMyLock);
1931    mPaused = true;
1932}
1933
1934void AudioTrack::AudioTrackThread::resume()
1935{
1936    AutoMutex _l(mMyLock);
1937    mIgnoreNextPausedInt = true;
1938    if (mPaused || mPausedInt) {
1939        mPaused = false;
1940        mPausedInt = false;
1941        mMyCond.signal();
1942    }
1943}
1944
1945void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1946{
1947    AutoMutex _l(mMyLock);
1948    mPausedInt = true;
1949    mPausedNs = ns;
1950}
1951
1952}; // namespace android
1953