AudioTrack.cpp revision 66e4635cb09fadcaccf912f37c387396c428378a
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // FIXME merge with similar code in createTrack_l(), except we're missing
48    //       some information here that is available in createTrack_l():
49    //          audio_io_handle_t output
50    //          audio_format_t format
51    //          audio_channel_mask_t channelMask
52    //          audio_output_flags_t flags
53    uint32_t afSampleRate;
54    status_t status;
55    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
56    if (status != NO_ERROR) {
57        ALOGE("Unable to query output sample rate for stream type %d; status %d",
58                streamType, status);
59        return status;
60    }
61    size_t afFrameCount;
62    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
63    if (status != NO_ERROR) {
64        ALOGE("Unable to query output frame count for stream type %d; status %d",
65                streamType, status);
66        return status;
67    }
68    uint32_t afLatency;
69    status = AudioSystem::getOutputLatency(&afLatency, streamType);
70    if (status != NO_ERROR) {
71        ALOGE("Unable to query output latency for stream type %d; status %d",
72                streamType, status);
73        return status;
74    }
75
76    // Ensure that buffer depth covers at least audio hardware latency
77    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
78    if (minBufCount < 2) {
79        minBufCount = 2;
80    }
81
82    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
83            afFrameCount * minBufCount * sampleRate / afSampleRate;
84    // The formula above should always produce a non-zero value, but return an error
85    // in the unlikely event that it does not, as that's part of the API contract.
86    if (*frameCount == 0) {
87        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
88                streamType, sampleRate);
89        return BAD_VALUE;
90    }
91    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
92            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
93    return NO_ERROR;
94}
95
96// ---------------------------------------------------------------------------
97
98AudioTrack::AudioTrack()
99    : mStatus(NO_INIT),
100      mIsTimed(false),
101      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
102      mPreviousSchedulingGroup(SP_DEFAULT),
103      mPausedPosition(0)
104{
105}
106
107AudioTrack::AudioTrack(
108        audio_stream_type_t streamType,
109        uint32_t sampleRate,
110        audio_format_t format,
111        audio_channel_mask_t channelMask,
112        size_t frameCount,
113        audio_output_flags_t flags,
114        callback_t cbf,
115        void* user,
116        uint32_t notificationFrames,
117        int sessionId,
118        transfer_type transferType,
119        const audio_offload_info_t *offloadInfo,
120        int uid,
121        pid_t pid)
122    : mStatus(NO_INIT),
123      mIsTimed(false),
124      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
125      mPreviousSchedulingGroup(SP_DEFAULT),
126      mPausedPosition(0)
127{
128    mStatus = set(streamType, sampleRate, format, channelMask,
129            frameCount, flags, cbf, user, notificationFrames,
130            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
131            offloadInfo, uid, pid);
132}
133
134AudioTrack::AudioTrack(
135        audio_stream_type_t streamType,
136        uint32_t sampleRate,
137        audio_format_t format,
138        audio_channel_mask_t channelMask,
139        const sp<IMemory>& sharedBuffer,
140        audio_output_flags_t flags,
141        callback_t cbf,
142        void* user,
143        uint32_t notificationFrames,
144        int sessionId,
145        transfer_type transferType,
146        const audio_offload_info_t *offloadInfo,
147        int uid,
148        pid_t pid)
149    : mStatus(NO_INIT),
150      mIsTimed(false),
151      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
152      mPreviousSchedulingGroup(SP_DEFAULT),
153      mPausedPosition(0)
154{
155    mStatus = set(streamType, sampleRate, format, channelMask,
156            0 /*frameCount*/, flags, cbf, user, notificationFrames,
157            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
158            uid, pid);
159}
160
161AudioTrack::~AudioTrack()
162{
163    if (mStatus == NO_ERROR) {
164        // Make sure that callback function exits in the case where
165        // it is looping on buffer full condition in obtainBuffer().
166        // Otherwise the callback thread will never exit.
167        stop();
168        if (mAudioTrackThread != 0) {
169            mProxy->interrupt();
170            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
171            mAudioTrackThread->requestExitAndWait();
172            mAudioTrackThread.clear();
173        }
174        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
175        mAudioTrack.clear();
176        IPCThreadState::self()->flushCommands();
177        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
178                IPCThreadState::self()->getCallingPid(), mClientPid);
179        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
180    }
181}
182
183status_t AudioTrack::set(
184        audio_stream_type_t streamType,
185        uint32_t sampleRate,
186        audio_format_t format,
187        audio_channel_mask_t channelMask,
188        size_t frameCount,
189        audio_output_flags_t flags,
190        callback_t cbf,
191        void* user,
192        uint32_t notificationFrames,
193        const sp<IMemory>& sharedBuffer,
194        bool threadCanCallJava,
195        int sessionId,
196        transfer_type transferType,
197        const audio_offload_info_t *offloadInfo,
198        int uid,
199        pid_t pid)
200{
201    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
202          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
203          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
204          sessionId, transferType);
205
206    switch (transferType) {
207    case TRANSFER_DEFAULT:
208        if (sharedBuffer != 0) {
209            transferType = TRANSFER_SHARED;
210        } else if (cbf == NULL || threadCanCallJava) {
211            transferType = TRANSFER_SYNC;
212        } else {
213            transferType = TRANSFER_CALLBACK;
214        }
215        break;
216    case TRANSFER_CALLBACK:
217        if (cbf == NULL || sharedBuffer != 0) {
218            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
219            return BAD_VALUE;
220        }
221        break;
222    case TRANSFER_OBTAIN:
223    case TRANSFER_SYNC:
224        if (sharedBuffer != 0) {
225            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
226            return BAD_VALUE;
227        }
228        break;
229    case TRANSFER_SHARED:
230        if (sharedBuffer == 0) {
231            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
232            return BAD_VALUE;
233        }
234        break;
235    default:
236        ALOGE("Invalid transfer type %d", transferType);
237        return BAD_VALUE;
238    }
239    mSharedBuffer = sharedBuffer;
240    mTransfer = transferType;
241
242    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
243            sharedBuffer->size());
244
245    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
246
247    AutoMutex lock(mLock);
248
249    // invariant that mAudioTrack != 0 is true only after set() returns successfully
250    if (mAudioTrack != 0) {
251        ALOGE("Track already in use");
252        return INVALID_OPERATION;
253    }
254
255    // handle default values first.
256    if (streamType == AUDIO_STREAM_DEFAULT) {
257        streamType = AUDIO_STREAM_MUSIC;
258    }
259    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
260        ALOGE("Invalid stream type %d", streamType);
261        return BAD_VALUE;
262    }
263    mStreamType = streamType;
264
265    status_t status;
266    if (sampleRate == 0) {
267        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
268        if (status != NO_ERROR) {
269            ALOGE("Could not get output sample rate for stream type %d; status %d",
270                    streamType, status);
271            return status;
272        }
273    }
274    mSampleRate = sampleRate;
275
276    // these below should probably come from the audioFlinger too...
277    if (format == AUDIO_FORMAT_DEFAULT) {
278        format = AUDIO_FORMAT_PCM_16_BIT;
279    }
280
281    // validate parameters
282    if (!audio_is_valid_format(format)) {
283        ALOGE("Invalid format %#x", format);
284        return BAD_VALUE;
285    }
286    mFormat = format;
287
288    if (!audio_is_output_channel(channelMask)) {
289        ALOGE("Invalid channel mask %#x", channelMask);
290        return BAD_VALUE;
291    }
292    mChannelMask = channelMask;
293    uint32_t channelCount = popcount(channelMask);
294    mChannelCount = channelCount;
295
296    // AudioFlinger does not currently support 8-bit data in shared memory
297    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
298        ALOGE("8-bit data in shared memory is not supported");
299        return BAD_VALUE;
300    }
301
302    // force direct flag if format is not linear PCM
303    // or offload was requested
304    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
305            || !audio_is_linear_pcm(format)) {
306        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
307                    ? "Offload request, forcing to Direct Output"
308                    : "Not linear PCM, forcing to Direct Output");
309        flags = (audio_output_flags_t)
310                // FIXME why can't we allow direct AND fast?
311                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
312    }
313    // only allow deep buffering for music stream type
314    if (streamType != AUDIO_STREAM_MUSIC) {
315        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
316    }
317
318    if (audio_is_linear_pcm(format)) {
319        mFrameSize = channelCount * audio_bytes_per_sample(format);
320        mFrameSizeAF = channelCount * sizeof(int16_t);
321    } else {
322        mFrameSize = sizeof(uint8_t);
323        mFrameSizeAF = sizeof(uint8_t);
324    }
325
326    // Make copy of input parameter offloadInfo so that in the future:
327    //  (a) createTrack_l doesn't need it as an input parameter
328    //  (b) we can support re-creation of offloaded tracks
329    if (offloadInfo != NULL) {
330        mOffloadInfoCopy = *offloadInfo;
331        mOffloadInfo = &mOffloadInfoCopy;
332    } else {
333        mOffloadInfo = NULL;
334    }
335
336    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
337    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
338    mSendLevel = 0.0f;
339    // mFrameCount is initialized in createTrack_l
340    mReqFrameCount = frameCount;
341    mNotificationFramesReq = notificationFrames;
342    mNotificationFramesAct = 0;
343    mSessionId = sessionId;
344    int callingpid = IPCThreadState::self()->getCallingPid();
345    int mypid = getpid();
346    if (uid == -1 || (callingpid != mypid)) {
347        mClientUid = IPCThreadState::self()->getCallingUid();
348    } else {
349        mClientUid = uid;
350    }
351    if (pid == -1 || (callingpid != mypid)) {
352        mClientPid = callingpid;
353    } else {
354        mClientPid = pid;
355    }
356    mAuxEffectId = 0;
357    mFlags = flags;
358    mCbf = cbf;
359
360    if (cbf != NULL) {
361        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
362        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
363    }
364
365    // create the IAudioTrack
366    status = createTrack_l(0 /*epoch*/);
367
368    if (status != NO_ERROR) {
369        if (mAudioTrackThread != 0) {
370            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
371            mAudioTrackThread->requestExitAndWait();
372            mAudioTrackThread.clear();
373        }
374        // Use of direct and offloaded output streams is ref counted by audio policy manager.
375#if 0   // FIXME This should no longer be needed
376        //Use of direct and offloaded output streams is ref counted by audio policy manager.
377        // As getOutput was called above and resulted in an output stream to be opened,
378        // we need to release it.
379        if (mOutput != 0) {
380            AudioSystem::releaseOutput(mOutput);
381            mOutput = 0;
382        }
383#endif
384        return status;
385    }
386
387    mStatus = NO_ERROR;
388    mState = STATE_STOPPED;
389    mUserData = user;
390    mLoopPeriod = 0;
391    mMarkerPosition = 0;
392    mMarkerReached = false;
393    mNewPosition = 0;
394    mUpdatePeriod = 0;
395    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
396    mSequence = 1;
397    mObservedSequence = mSequence;
398    mInUnderrun = false;
399
400    return NO_ERROR;
401}
402
403// -------------------------------------------------------------------------
404
405status_t AudioTrack::start()
406{
407    AutoMutex lock(mLock);
408
409    if (mState == STATE_ACTIVE) {
410        return INVALID_OPERATION;
411    }
412
413    mInUnderrun = true;
414
415    State previousState = mState;
416    if (previousState == STATE_PAUSED_STOPPING) {
417        mState = STATE_STOPPING;
418    } else {
419        mState = STATE_ACTIVE;
420    }
421    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
422        // reset current position as seen by client to 0
423        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
424        // force refresh of remaining frames by processAudioBuffer() as last
425        // write before stop could be partial.
426        mRefreshRemaining = true;
427    }
428    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
429    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
430
431    sp<AudioTrackThread> t = mAudioTrackThread;
432    if (t != 0) {
433        if (previousState == STATE_STOPPING) {
434            mProxy->interrupt();
435        } else {
436            t->resume();
437        }
438    } else {
439        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
440        get_sched_policy(0, &mPreviousSchedulingGroup);
441        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
442    }
443
444    status_t status = NO_ERROR;
445    if (!(flags & CBLK_INVALID)) {
446        status = mAudioTrack->start();
447        if (status == DEAD_OBJECT) {
448            flags |= CBLK_INVALID;
449        }
450    }
451    if (flags & CBLK_INVALID) {
452        status = restoreTrack_l("start");
453    }
454
455    if (status != NO_ERROR) {
456        ALOGE("start() status %d", status);
457        mState = previousState;
458        if (t != 0) {
459            if (previousState != STATE_STOPPING) {
460                t->pause();
461            }
462        } else {
463            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
464            set_sched_policy(0, mPreviousSchedulingGroup);
465        }
466    }
467
468    return status;
469}
470
471void AudioTrack::stop()
472{
473    AutoMutex lock(mLock);
474    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
475        return;
476    }
477
478    if (isOffloaded_l()) {
479        mState = STATE_STOPPING;
480    } else {
481        mState = STATE_STOPPED;
482    }
483
484    mProxy->interrupt();
485    mAudioTrack->stop();
486    // the playback head position will reset to 0, so if a marker is set, we need
487    // to activate it again
488    mMarkerReached = false;
489#if 0
490    // Force flush if a shared buffer is used otherwise audioflinger
491    // will not stop before end of buffer is reached.
492    // It may be needed to make sure that we stop playback, likely in case looping is on.
493    if (mSharedBuffer != 0) {
494        flush_l();
495    }
496#endif
497
498    sp<AudioTrackThread> t = mAudioTrackThread;
499    if (t != 0) {
500        if (!isOffloaded_l()) {
501            t->pause();
502        }
503    } else {
504        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
505        set_sched_policy(0, mPreviousSchedulingGroup);
506    }
507}
508
509bool AudioTrack::stopped() const
510{
511    AutoMutex lock(mLock);
512    return mState != STATE_ACTIVE;
513}
514
515void AudioTrack::flush()
516{
517    if (mSharedBuffer != 0) {
518        return;
519    }
520    AutoMutex lock(mLock);
521    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
522        return;
523    }
524    flush_l();
525}
526
527void AudioTrack::flush_l()
528{
529    ALOG_ASSERT(mState != STATE_ACTIVE);
530
531    // clear playback marker and periodic update counter
532    mMarkerPosition = 0;
533    mMarkerReached = false;
534    mUpdatePeriod = 0;
535    mRefreshRemaining = true;
536
537    mState = STATE_FLUSHED;
538    if (isOffloaded_l()) {
539        mProxy->interrupt();
540    }
541    mProxy->flush();
542    mAudioTrack->flush();
543}
544
545void AudioTrack::pause()
546{
547    AutoMutex lock(mLock);
548    if (mState == STATE_ACTIVE) {
549        mState = STATE_PAUSED;
550    } else if (mState == STATE_STOPPING) {
551        mState = STATE_PAUSED_STOPPING;
552    } else {
553        return;
554    }
555    mProxy->interrupt();
556    mAudioTrack->pause();
557
558    if (isOffloaded_l()) {
559        if (mOutput != 0) {
560            uint32_t halFrames;
561            // OffloadThread sends HAL pause in its threadLoop.. time saved
562            // here can be slightly off
563            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
564            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
565        }
566    }
567}
568
569status_t AudioTrack::setVolume(float left, float right)
570{
571    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
572        return BAD_VALUE;
573    }
574
575    AutoMutex lock(mLock);
576    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
577    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
578
579    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
580
581    if (isOffloaded_l()) {
582        mAudioTrack->signal();
583    }
584    return NO_ERROR;
585}
586
587status_t AudioTrack::setVolume(float volume)
588{
589    return setVolume(volume, volume);
590}
591
592status_t AudioTrack::setAuxEffectSendLevel(float level)
593{
594    if (level < 0.0f || level > 1.0f) {
595        return BAD_VALUE;
596    }
597
598    AutoMutex lock(mLock);
599    mSendLevel = level;
600    mProxy->setSendLevel(level);
601
602    return NO_ERROR;
603}
604
605void AudioTrack::getAuxEffectSendLevel(float* level) const
606{
607    if (level != NULL) {
608        *level = mSendLevel;
609    }
610}
611
612status_t AudioTrack::setSampleRate(uint32_t rate)
613{
614    if (mIsTimed || isOffloaded()) {
615        return INVALID_OPERATION;
616    }
617
618    uint32_t afSamplingRate;
619    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
620        return NO_INIT;
621    }
622    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
623    if (rate == 0 || rate > afSamplingRate*2 ) {
624        return BAD_VALUE;
625    }
626
627    AutoMutex lock(mLock);
628    mSampleRate = rate;
629    mProxy->setSampleRate(rate);
630
631    return NO_ERROR;
632}
633
634uint32_t AudioTrack::getSampleRate() const
635{
636    if (mIsTimed) {
637        return 0;
638    }
639
640    AutoMutex lock(mLock);
641
642    // sample rate can be updated during playback by the offloaded decoder so we need to
643    // query the HAL and update if needed.
644// FIXME use Proxy return channel to update the rate from server and avoid polling here
645    if (isOffloaded_l()) {
646        if (mOutput != 0) {
647            uint32_t sampleRate = 0;
648            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
649            if (status == NO_ERROR) {
650                mSampleRate = sampleRate;
651            }
652        }
653    }
654    return mSampleRate;
655}
656
657status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
658{
659    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
660        return INVALID_OPERATION;
661    }
662
663    if (loopCount == 0) {
664        ;
665    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
666            loopEnd - loopStart >= MIN_LOOP) {
667        ;
668    } else {
669        return BAD_VALUE;
670    }
671
672    AutoMutex lock(mLock);
673    // See setPosition() regarding setting parameters such as loop points or position while active
674    if (mState == STATE_ACTIVE) {
675        return INVALID_OPERATION;
676    }
677    setLoop_l(loopStart, loopEnd, loopCount);
678    return NO_ERROR;
679}
680
681void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
682{
683    // FIXME If setting a loop also sets position to start of loop, then
684    //       this is correct.  Otherwise it should be removed.
685    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
686    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
687    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
688}
689
690status_t AudioTrack::setMarkerPosition(uint32_t marker)
691{
692    // The only purpose of setting marker position is to get a callback
693    if (mCbf == NULL || isOffloaded()) {
694        return INVALID_OPERATION;
695    }
696
697    AutoMutex lock(mLock);
698    mMarkerPosition = marker;
699    mMarkerReached = false;
700
701    return NO_ERROR;
702}
703
704status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
705{
706    if (isOffloaded()) {
707        return INVALID_OPERATION;
708    }
709    if (marker == NULL) {
710        return BAD_VALUE;
711    }
712
713    AutoMutex lock(mLock);
714    *marker = mMarkerPosition;
715
716    return NO_ERROR;
717}
718
719status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
720{
721    // The only purpose of setting position update period is to get a callback
722    if (mCbf == NULL || isOffloaded()) {
723        return INVALID_OPERATION;
724    }
725
726    AutoMutex lock(mLock);
727    mNewPosition = mProxy->getPosition() + updatePeriod;
728    mUpdatePeriod = updatePeriod;
729
730    return NO_ERROR;
731}
732
733status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
734{
735    if (isOffloaded()) {
736        return INVALID_OPERATION;
737    }
738    if (updatePeriod == NULL) {
739        return BAD_VALUE;
740    }
741
742    AutoMutex lock(mLock);
743    *updatePeriod = mUpdatePeriod;
744
745    return NO_ERROR;
746}
747
748status_t AudioTrack::setPosition(uint32_t position)
749{
750    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
751        return INVALID_OPERATION;
752    }
753    if (position > mFrameCount) {
754        return BAD_VALUE;
755    }
756
757    AutoMutex lock(mLock);
758    // Currently we require that the player is inactive before setting parameters such as position
759    // or loop points.  Otherwise, there could be a race condition: the application could read the
760    // current position, compute a new position or loop parameters, and then set that position or
761    // loop parameters but it would do the "wrong" thing since the position has continued to advance
762    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
763    // to specify how it wants to handle such scenarios.
764    if (mState == STATE_ACTIVE) {
765        return INVALID_OPERATION;
766    }
767    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
768    mLoopPeriod = 0;
769    // FIXME Check whether loops and setting position are incompatible in old code.
770    // If we use setLoop for both purposes we lose the capability to set the position while looping.
771    mStaticProxy->setLoop(position, mFrameCount, 0);
772
773    return NO_ERROR;
774}
775
776status_t AudioTrack::getPosition(uint32_t *position) const
777{
778    if (position == NULL) {
779        return BAD_VALUE;
780    }
781
782    AutoMutex lock(mLock);
783    if (isOffloaded_l()) {
784        uint32_t dspFrames = 0;
785
786        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
787            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
788            *position = mPausedPosition;
789            return NO_ERROR;
790        }
791
792        if (mOutput != 0) {
793            uint32_t halFrames;
794            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
795        }
796        *position = dspFrames;
797    } else {
798        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
799        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
800                mProxy->getPosition();
801    }
802    return NO_ERROR;
803}
804
805status_t AudioTrack::getBufferPosition(uint32_t *position)
806{
807    if (mSharedBuffer == 0 || mIsTimed) {
808        return INVALID_OPERATION;
809    }
810    if (position == NULL) {
811        return BAD_VALUE;
812    }
813
814    AutoMutex lock(mLock);
815    *position = mStaticProxy->getBufferPosition();
816    return NO_ERROR;
817}
818
819status_t AudioTrack::reload()
820{
821    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
822        return INVALID_OPERATION;
823    }
824
825    AutoMutex lock(mLock);
826    // See setPosition() regarding setting parameters such as loop points or position while active
827    if (mState == STATE_ACTIVE) {
828        return INVALID_OPERATION;
829    }
830    mNewPosition = mUpdatePeriod;
831    mLoopPeriod = 0;
832    // FIXME The new code cannot reload while keeping a loop specified.
833    // Need to check how the old code handled this, and whether it's a significant change.
834    mStaticProxy->setLoop(0, mFrameCount, 0);
835    return NO_ERROR;
836}
837
838audio_io_handle_t AudioTrack::getOutput() const
839{
840    AutoMutex lock(mLock);
841    return mOutput;
842}
843
844status_t AudioTrack::attachAuxEffect(int effectId)
845{
846    AutoMutex lock(mLock);
847    status_t status = mAudioTrack->attachAuxEffect(effectId);
848    if (status == NO_ERROR) {
849        mAuxEffectId = effectId;
850    }
851    return status;
852}
853
854// -------------------------------------------------------------------------
855
856// must be called with mLock held
857status_t AudioTrack::createTrack_l(size_t epoch)
858{
859    status_t status;
860    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
861    if (audioFlinger == 0) {
862        ALOGE("Could not get audioflinger");
863        return NO_INIT;
864    }
865
866    audio_io_handle_t output = AudioSystem::getOutput(mStreamType, mSampleRate, mFormat,
867            mChannelMask, mFlags, mOffloadInfo);
868    if (output == 0) {
869        ALOGE("Could not get audio output for stream type %d, sample rate %u, format %#x, "
870              "channel mask %#x, flags %#x",
871              mStreamType, mSampleRate, mFormat, mChannelMask, mFlags);
872        return BAD_VALUE;
873    }
874    {
875    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
876    // we must release it ourselves if anything goes wrong.
877
878    // Not all of these values are needed under all conditions, but it is easier to get them all
879
880    uint32_t afLatency;
881    status = AudioSystem::getLatency(output, mStreamType, &afLatency);
882    if (status != NO_ERROR) {
883        ALOGE("getLatency(%d) failed status %d", output, status);
884        goto release;
885    }
886
887    size_t afFrameCount;
888    status = AudioSystem::getFrameCount(output, mStreamType, &afFrameCount);
889    if (status != NO_ERROR) {
890        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, mStreamType, status);
891        goto release;
892    }
893
894    uint32_t afSampleRate;
895    status = AudioSystem::getSamplingRate(output, mStreamType, &afSampleRate);
896    if (status != NO_ERROR) {
897        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, mStreamType, status);
898        goto release;
899    }
900
901    // Client decides whether the track is TIMED (see below), but can only express a preference
902    // for FAST.  Server will perform additional tests.
903    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
904            // either of these use cases:
905            // use case 1: shared buffer
906            (mSharedBuffer != 0) ||
907            // use case 2: callback transfer mode
908            (mTransfer == TRANSFER_CALLBACK)) &&
909            // matching sample rate
910            (mSampleRate == afSampleRate))) {
911        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
912        // once denied, do not request again if IAudioTrack is re-created
913        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
914    }
915    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
916
917    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
918    //  n = 1   fast track with single buffering; nBuffering is ignored
919    //  n = 2   fast track with double buffering
920    //  n = 2   normal track, no sample rate conversion
921    //  n = 3   normal track, with sample rate conversion
922    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
923    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
924    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
925
926    mNotificationFramesAct = mNotificationFramesReq;
927
928    size_t frameCount = mReqFrameCount;
929    if (!audio_is_linear_pcm(mFormat)) {
930
931        if (mSharedBuffer != 0) {
932            // Same comment as below about ignoring frameCount parameter for set()
933            frameCount = mSharedBuffer->size();
934        } else if (frameCount == 0) {
935            frameCount = afFrameCount;
936        }
937        if (mNotificationFramesAct != frameCount) {
938            mNotificationFramesAct = frameCount;
939        }
940    } else if (mSharedBuffer != 0) {
941
942        // Ensure that buffer alignment matches channel count
943        // 8-bit data in shared memory is not currently supported by AudioFlinger
944        size_t alignment = /* mFormat == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
945        if (mChannelCount > 1) {
946            // More than 2 channels does not require stronger alignment than stereo
947            alignment <<= 1;
948        }
949        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
950            ALOGE("Invalid buffer alignment: address %p, channel count %u",
951                    mSharedBuffer->pointer(), mChannelCount);
952            status = BAD_VALUE;
953            goto release;
954        }
955
956        // When initializing a shared buffer AudioTrack via constructors,
957        // there's no frameCount parameter.
958        // But when initializing a shared buffer AudioTrack via set(),
959        // there _is_ a frameCount parameter.  We silently ignore it.
960        frameCount = mSharedBuffer->size()/mChannelCount/sizeof(int16_t);
961
962    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
963
964        // FIXME move these calculations and associated checks to server
965
966        // Ensure that buffer depth covers at least audio hardware latency
967        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
968        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
969                afFrameCount, minBufCount, afSampleRate, afLatency);
970        if (minBufCount <= nBuffering) {
971            minBufCount = nBuffering;
972        }
973
974        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
975        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
976                ", afLatency=%d",
977                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
978
979        if (frameCount == 0) {
980            frameCount = minFrameCount;
981        } else if (frameCount < minFrameCount) {
982            // not ALOGW because it happens all the time when playing key clicks over A2DP
983            ALOGV("Minimum buffer size corrected from %d to %d",
984                     frameCount, minFrameCount);
985            frameCount = minFrameCount;
986        }
987        // Make sure that application is notified with sufficient margin before underrun
988        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
989            mNotificationFramesAct = frameCount/nBuffering;
990        }
991
992    } else {
993        // For fast tracks, the frame count calculations and checks are done by server
994    }
995
996    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
997    if (mIsTimed) {
998        trackFlags |= IAudioFlinger::TRACK_TIMED;
999    }
1000
1001    pid_t tid = -1;
1002    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1003        trackFlags |= IAudioFlinger::TRACK_FAST;
1004        if (mAudioTrackThread != 0) {
1005            tid = mAudioTrackThread->getTid();
1006        }
1007    }
1008
1009    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1010        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1011    }
1012
1013    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1014                                // but we will still need the original value also
1015    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1016                                                      mSampleRate,
1017                                                      // AudioFlinger only sees 16-bit PCM
1018                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT ?
1019                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1020                                                      mChannelMask,
1021                                                      &temp,
1022                                                      &trackFlags,
1023                                                      mSharedBuffer,
1024                                                      output,
1025                                                      tid,
1026                                                      &mSessionId,
1027                                                      mClientUid,
1028                                                      &status);
1029
1030    if (status != NO_ERROR) {
1031        ALOGE("AudioFlinger could not create track, status: %d", status);
1032        goto release;
1033    }
1034    ALOG_ASSERT(track != 0);
1035
1036    // AudioFlinger now owns the reference to the I/O handle,
1037    // so we are no longer responsible for releasing it.
1038
1039    sp<IMemory> iMem = track->getCblk();
1040    if (iMem == 0) {
1041        ALOGE("Could not get control block");
1042        return NO_INIT;
1043    }
1044    void *iMemPointer = iMem->pointer();
1045    if (iMemPointer == NULL) {
1046        ALOGE("Could not get control block pointer");
1047        return NO_INIT;
1048    }
1049    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1050    if (mAudioTrack != 0) {
1051        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1052        mDeathNotifier.clear();
1053    }
1054    mAudioTrack = track;
1055
1056    mCblkMemory = iMem;
1057    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1058    mCblk = cblk;
1059    // note that temp is the (possibly revised) value of frameCount
1060    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1061        // In current design, AudioTrack client checks and ensures frame count validity before
1062        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1063        // for fast track as it uses a special method of assigning frame count.
1064        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1065    }
1066    frameCount = temp;
1067
1068    mAwaitBoost = false;
1069    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1070        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1071            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1072            mAwaitBoost = true;
1073            if (mSharedBuffer == 0) {
1074                // Theoretically double-buffering is not required for fast tracks,
1075                // due to tighter scheduling.  But in practice, to accommodate kernels with
1076                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1077                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1078                    mNotificationFramesAct = frameCount/nBuffering;
1079                }
1080            }
1081        } else {
1082            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1083            // once denied, do not request again if IAudioTrack is re-created
1084            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1085            if (mSharedBuffer == 0) {
1086                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1087                    mNotificationFramesAct = frameCount/nBuffering;
1088                }
1089            }
1090        }
1091    }
1092    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1093        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1094            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1095        } else {
1096            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1097            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1098            // FIXME This is a warning, not an error, so don't return error status
1099            //return NO_INIT;
1100        }
1101    }
1102
1103    // We retain a copy of the I/O handle, but don't own the reference
1104    mOutput = output;
1105    mRefreshRemaining = true;
1106
1107    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1108    // is the value of pointer() for the shared buffer, otherwise buffers points
1109    // immediately after the control block.  This address is for the mapping within client
1110    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1111    void* buffers;
1112    if (mSharedBuffer == 0) {
1113        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1114    } else {
1115        buffers = mSharedBuffer->pointer();
1116    }
1117
1118    mAudioTrack->attachAuxEffect(mAuxEffectId);
1119    // FIXME don't believe this lie
1120    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1121
1122    mFrameCount = frameCount;
1123    // If IAudioTrack is re-created, don't let the requested frameCount
1124    // decrease.  This can confuse clients that cache frameCount().
1125    if (frameCount > mReqFrameCount) {
1126        mReqFrameCount = frameCount;
1127    }
1128
1129    // update proxy
1130    if (mSharedBuffer == 0) {
1131        mStaticProxy.clear();
1132        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1133    } else {
1134        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1135        mProxy = mStaticProxy;
1136    }
1137    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[AUDIO_INTERLEAVE_RIGHT] * 0x1000)) << 16) |
1138            uint16_t(mVolume[AUDIO_INTERLEAVE_LEFT] * 0x1000));
1139    mProxy->setSendLevel(mSendLevel);
1140    mProxy->setSampleRate(mSampleRate);
1141    mProxy->setEpoch(epoch);
1142    mProxy->setMinimum(mNotificationFramesAct);
1143
1144    mDeathNotifier = new DeathNotifier(this);
1145    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1146
1147    return NO_ERROR;
1148    }
1149
1150release:
1151    AudioSystem::releaseOutput(output);
1152    if (status == NO_ERROR) {
1153        status = NO_INIT;
1154    }
1155    return status;
1156}
1157
1158status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1159{
1160    if (audioBuffer == NULL) {
1161        return BAD_VALUE;
1162    }
1163    if (mTransfer != TRANSFER_OBTAIN) {
1164        audioBuffer->frameCount = 0;
1165        audioBuffer->size = 0;
1166        audioBuffer->raw = NULL;
1167        return INVALID_OPERATION;
1168    }
1169
1170    const struct timespec *requested;
1171    struct timespec timeout;
1172    if (waitCount == -1) {
1173        requested = &ClientProxy::kForever;
1174    } else if (waitCount == 0) {
1175        requested = &ClientProxy::kNonBlocking;
1176    } else if (waitCount > 0) {
1177        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1178        timeout.tv_sec = ms / 1000;
1179        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1180        requested = &timeout;
1181    } else {
1182        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1183        requested = NULL;
1184    }
1185    return obtainBuffer(audioBuffer, requested);
1186}
1187
1188status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1189        struct timespec *elapsed, size_t *nonContig)
1190{
1191    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1192    uint32_t oldSequence = 0;
1193    uint32_t newSequence;
1194
1195    Proxy::Buffer buffer;
1196    status_t status = NO_ERROR;
1197
1198    static const int32_t kMaxTries = 5;
1199    int32_t tryCounter = kMaxTries;
1200
1201    do {
1202        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1203        // keep them from going away if another thread re-creates the track during obtainBuffer()
1204        sp<AudioTrackClientProxy> proxy;
1205        sp<IMemory> iMem;
1206
1207        {   // start of lock scope
1208            AutoMutex lock(mLock);
1209
1210            newSequence = mSequence;
1211            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1212            if (status == DEAD_OBJECT) {
1213                // re-create track, unless someone else has already done so
1214                if (newSequence == oldSequence) {
1215                    status = restoreTrack_l("obtainBuffer");
1216                    if (status != NO_ERROR) {
1217                        buffer.mFrameCount = 0;
1218                        buffer.mRaw = NULL;
1219                        buffer.mNonContig = 0;
1220                        break;
1221                    }
1222                }
1223            }
1224            oldSequence = newSequence;
1225
1226            // Keep the extra references
1227            proxy = mProxy;
1228            iMem = mCblkMemory;
1229
1230            if (mState == STATE_STOPPING) {
1231                status = -EINTR;
1232                buffer.mFrameCount = 0;
1233                buffer.mRaw = NULL;
1234                buffer.mNonContig = 0;
1235                break;
1236            }
1237
1238            // Non-blocking if track is stopped or paused
1239            if (mState != STATE_ACTIVE) {
1240                requested = &ClientProxy::kNonBlocking;
1241            }
1242
1243        }   // end of lock scope
1244
1245        buffer.mFrameCount = audioBuffer->frameCount;
1246        // FIXME starts the requested timeout and elapsed over from scratch
1247        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1248
1249    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1250
1251    audioBuffer->frameCount = buffer.mFrameCount;
1252    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1253    audioBuffer->raw = buffer.mRaw;
1254    if (nonContig != NULL) {
1255        *nonContig = buffer.mNonContig;
1256    }
1257    return status;
1258}
1259
1260void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1261{
1262    if (mTransfer == TRANSFER_SHARED) {
1263        return;
1264    }
1265
1266    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1267    if (stepCount == 0) {
1268        return;
1269    }
1270
1271    Proxy::Buffer buffer;
1272    buffer.mFrameCount = stepCount;
1273    buffer.mRaw = audioBuffer->raw;
1274
1275    AutoMutex lock(mLock);
1276    mInUnderrun = false;
1277    mProxy->releaseBuffer(&buffer);
1278
1279    // restart track if it was disabled by audioflinger due to previous underrun
1280    if (mState == STATE_ACTIVE) {
1281        audio_track_cblk_t* cblk = mCblk;
1282        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1283            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1284            // FIXME ignoring status
1285            mAudioTrack->start();
1286        }
1287    }
1288}
1289
1290// -------------------------------------------------------------------------
1291
1292ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1293{
1294    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1295        return INVALID_OPERATION;
1296    }
1297
1298    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1299        // Sanity-check: user is most-likely passing an error code, and it would
1300        // make the return value ambiguous (actualSize vs error).
1301        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1302        return BAD_VALUE;
1303    }
1304
1305    size_t written = 0;
1306    Buffer audioBuffer;
1307
1308    while (userSize >= mFrameSize) {
1309        audioBuffer.frameCount = userSize / mFrameSize;
1310
1311        status_t err = obtainBuffer(&audioBuffer,
1312                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1313        if (err < 0) {
1314            if (written > 0) {
1315                break;
1316            }
1317            return ssize_t(err);
1318        }
1319
1320        size_t toWrite;
1321        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1322            // Divide capacity by 2 to take expansion into account
1323            toWrite = audioBuffer.size >> 1;
1324            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1325        } else {
1326            toWrite = audioBuffer.size;
1327            memcpy(audioBuffer.i8, buffer, toWrite);
1328        }
1329        buffer = ((const char *) buffer) + toWrite;
1330        userSize -= toWrite;
1331        written += toWrite;
1332
1333        releaseBuffer(&audioBuffer);
1334    }
1335
1336    return written;
1337}
1338
1339// -------------------------------------------------------------------------
1340
1341TimedAudioTrack::TimedAudioTrack() {
1342    mIsTimed = true;
1343}
1344
1345status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1346{
1347    AutoMutex lock(mLock);
1348    status_t result = UNKNOWN_ERROR;
1349
1350#if 1
1351    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1352    // while we are accessing the cblk
1353    sp<IAudioTrack> audioTrack = mAudioTrack;
1354    sp<IMemory> iMem = mCblkMemory;
1355#endif
1356
1357    // If the track is not invalid already, try to allocate a buffer.  alloc
1358    // fails indicating that the server is dead, flag the track as invalid so
1359    // we can attempt to restore in just a bit.
1360    audio_track_cblk_t* cblk = mCblk;
1361    if (!(cblk->mFlags & CBLK_INVALID)) {
1362        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1363        if (result == DEAD_OBJECT) {
1364            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1365        }
1366    }
1367
1368    // If the track is invalid at this point, attempt to restore it. and try the
1369    // allocation one more time.
1370    if (cblk->mFlags & CBLK_INVALID) {
1371        result = restoreTrack_l("allocateTimedBuffer");
1372
1373        if (result == NO_ERROR) {
1374            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1375        }
1376    }
1377
1378    return result;
1379}
1380
1381status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1382                                           int64_t pts)
1383{
1384    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1385    {
1386        AutoMutex lock(mLock);
1387        audio_track_cblk_t* cblk = mCblk;
1388        // restart track if it was disabled by audioflinger due to previous underrun
1389        if (buffer->size() != 0 && status == NO_ERROR &&
1390                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1391            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1392            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1393            // FIXME ignoring status
1394            mAudioTrack->start();
1395        }
1396    }
1397    return status;
1398}
1399
1400status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1401                                                TargetTimeline target)
1402{
1403    return mAudioTrack->setMediaTimeTransform(xform, target);
1404}
1405
1406// -------------------------------------------------------------------------
1407
1408nsecs_t AudioTrack::processAudioBuffer()
1409{
1410    // Currently the AudioTrack thread is not created if there are no callbacks.
1411    // Would it ever make sense to run the thread, even without callbacks?
1412    // If so, then replace this by checks at each use for mCbf != NULL.
1413    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1414
1415    mLock.lock();
1416    if (mAwaitBoost) {
1417        mAwaitBoost = false;
1418        mLock.unlock();
1419        static const int32_t kMaxTries = 5;
1420        int32_t tryCounter = kMaxTries;
1421        uint32_t pollUs = 10000;
1422        do {
1423            int policy = sched_getscheduler(0);
1424            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1425                break;
1426            }
1427            usleep(pollUs);
1428            pollUs <<= 1;
1429        } while (tryCounter-- > 0);
1430        if (tryCounter < 0) {
1431            ALOGE("did not receive expected priority boost on time");
1432        }
1433        // Run again immediately
1434        return 0;
1435    }
1436
1437    // Can only reference mCblk while locked
1438    int32_t flags = android_atomic_and(
1439        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1440
1441    // Check for track invalidation
1442    if (flags & CBLK_INVALID) {
1443        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1444        // AudioSystem cache. We should not exit here but after calling the callback so
1445        // that the upper layers can recreate the track
1446        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1447            status_t status = restoreTrack_l("processAudioBuffer");
1448            mLock.unlock();
1449            // Run again immediately, but with a new IAudioTrack
1450            return 0;
1451        }
1452    }
1453
1454    bool waitStreamEnd = mState == STATE_STOPPING;
1455    bool active = mState == STATE_ACTIVE;
1456
1457    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1458    bool newUnderrun = false;
1459    if (flags & CBLK_UNDERRUN) {
1460#if 0
1461        // Currently in shared buffer mode, when the server reaches the end of buffer,
1462        // the track stays active in continuous underrun state.  It's up to the application
1463        // to pause or stop the track, or set the position to a new offset within buffer.
1464        // This was some experimental code to auto-pause on underrun.   Keeping it here
1465        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1466        if (mTransfer == TRANSFER_SHARED) {
1467            mState = STATE_PAUSED;
1468            active = false;
1469        }
1470#endif
1471        if (!mInUnderrun) {
1472            mInUnderrun = true;
1473            newUnderrun = true;
1474        }
1475    }
1476
1477    // Get current position of server
1478    size_t position = mProxy->getPosition();
1479
1480    // Manage marker callback
1481    bool markerReached = false;
1482    size_t markerPosition = mMarkerPosition;
1483    // FIXME fails for wraparound, need 64 bits
1484    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1485        mMarkerReached = markerReached = true;
1486    }
1487
1488    // Determine number of new position callback(s) that will be needed, while locked
1489    size_t newPosCount = 0;
1490    size_t newPosition = mNewPosition;
1491    size_t updatePeriod = mUpdatePeriod;
1492    // FIXME fails for wraparound, need 64 bits
1493    if (updatePeriod > 0 && position >= newPosition) {
1494        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1495        mNewPosition += updatePeriod * newPosCount;
1496    }
1497
1498    // Cache other fields that will be needed soon
1499    uint32_t loopPeriod = mLoopPeriod;
1500    uint32_t sampleRate = mSampleRate;
1501    uint32_t notificationFrames = mNotificationFramesAct;
1502    if (mRefreshRemaining) {
1503        mRefreshRemaining = false;
1504        mRemainingFrames = notificationFrames;
1505        mRetryOnPartialBuffer = false;
1506    }
1507    size_t misalignment = mProxy->getMisalignment();
1508    uint32_t sequence = mSequence;
1509    sp<AudioTrackClientProxy> proxy = mProxy;
1510
1511    // These fields don't need to be cached, because they are assigned only by set():
1512    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1513    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1514
1515    mLock.unlock();
1516
1517    if (waitStreamEnd) {
1518        struct timespec timeout;
1519        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1520        timeout.tv_nsec = 0;
1521
1522        status_t status = proxy->waitStreamEndDone(&timeout);
1523        switch (status) {
1524        case NO_ERROR:
1525        case DEAD_OBJECT:
1526        case TIMED_OUT:
1527            mCbf(EVENT_STREAM_END, mUserData, NULL);
1528            {
1529                AutoMutex lock(mLock);
1530                // The previously assigned value of waitStreamEnd is no longer valid,
1531                // since the mutex has been unlocked and either the callback handler
1532                // or another thread could have re-started the AudioTrack during that time.
1533                waitStreamEnd = mState == STATE_STOPPING;
1534                if (waitStreamEnd) {
1535                    mState = STATE_STOPPED;
1536                }
1537            }
1538            if (waitStreamEnd && status != DEAD_OBJECT) {
1539               return NS_INACTIVE;
1540            }
1541            break;
1542        }
1543        return 0;
1544    }
1545
1546    // perform callbacks while unlocked
1547    if (newUnderrun) {
1548        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1549    }
1550    // FIXME we will miss loops if loop cycle was signaled several times since last call
1551    //       to processAudioBuffer()
1552    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1553        mCbf(EVENT_LOOP_END, mUserData, NULL);
1554    }
1555    if (flags & CBLK_BUFFER_END) {
1556        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1557    }
1558    if (markerReached) {
1559        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1560    }
1561    while (newPosCount > 0) {
1562        size_t temp = newPosition;
1563        mCbf(EVENT_NEW_POS, mUserData, &temp);
1564        newPosition += updatePeriod;
1565        newPosCount--;
1566    }
1567
1568    if (mObservedSequence != sequence) {
1569        mObservedSequence = sequence;
1570        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1571        // for offloaded tracks, just wait for the upper layers to recreate the track
1572        if (isOffloaded()) {
1573            return NS_INACTIVE;
1574        }
1575    }
1576
1577    // if inactive, then don't run me again until re-started
1578    if (!active) {
1579        return NS_INACTIVE;
1580    }
1581
1582    // Compute the estimated time until the next timed event (position, markers, loops)
1583    // FIXME only for non-compressed audio
1584    uint32_t minFrames = ~0;
1585    if (!markerReached && position < markerPosition) {
1586        minFrames = markerPosition - position;
1587    }
1588    if (loopPeriod > 0 && loopPeriod < minFrames) {
1589        minFrames = loopPeriod;
1590    }
1591    if (updatePeriod > 0 && updatePeriod < minFrames) {
1592        minFrames = updatePeriod;
1593    }
1594
1595    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1596    static const uint32_t kPoll = 0;
1597    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1598        minFrames = kPoll * notificationFrames;
1599    }
1600
1601    // Convert frame units to time units
1602    nsecs_t ns = NS_WHENEVER;
1603    if (minFrames != (uint32_t) ~0) {
1604        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1605        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1606        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1607    }
1608
1609    // If not supplying data by EVENT_MORE_DATA, then we're done
1610    if (mTransfer != TRANSFER_CALLBACK) {
1611        return ns;
1612    }
1613
1614    struct timespec timeout;
1615    const struct timespec *requested = &ClientProxy::kForever;
1616    if (ns != NS_WHENEVER) {
1617        timeout.tv_sec = ns / 1000000000LL;
1618        timeout.tv_nsec = ns % 1000000000LL;
1619        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1620        requested = &timeout;
1621    }
1622
1623    while (mRemainingFrames > 0) {
1624
1625        Buffer audioBuffer;
1626        audioBuffer.frameCount = mRemainingFrames;
1627        size_t nonContig;
1628        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1629        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1630                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1631        requested = &ClientProxy::kNonBlocking;
1632        size_t avail = audioBuffer.frameCount + nonContig;
1633        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1634                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1635        if (err != NO_ERROR) {
1636            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1637                    (isOffloaded() && (err == DEAD_OBJECT))) {
1638                return 0;
1639            }
1640            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1641            return NS_NEVER;
1642        }
1643
1644        if (mRetryOnPartialBuffer && !isOffloaded()) {
1645            mRetryOnPartialBuffer = false;
1646            if (avail < mRemainingFrames) {
1647                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1648                if (ns < 0 || myns < ns) {
1649                    ns = myns;
1650                }
1651                return ns;
1652            }
1653        }
1654
1655        // Divide buffer size by 2 to take into account the expansion
1656        // due to 8 to 16 bit conversion: the callback must fill only half
1657        // of the destination buffer
1658        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1659            audioBuffer.size >>= 1;
1660        }
1661
1662        size_t reqSize = audioBuffer.size;
1663        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1664        size_t writtenSize = audioBuffer.size;
1665
1666        // Sanity check on returned size
1667        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1668            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1669                    reqSize, (int) writtenSize);
1670            return NS_NEVER;
1671        }
1672
1673        if (writtenSize == 0) {
1674            // The callback is done filling buffers
1675            // Keep this thread going to handle timed events and
1676            // still try to get more data in intervals of WAIT_PERIOD_MS
1677            // but don't just loop and block the CPU, so wait
1678            return WAIT_PERIOD_MS * 1000000LL;
1679        }
1680
1681        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1682            // 8 to 16 bit conversion, note that source and destination are the same address
1683            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1684            audioBuffer.size <<= 1;
1685        }
1686
1687        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1688        audioBuffer.frameCount = releasedFrames;
1689        mRemainingFrames -= releasedFrames;
1690        if (misalignment >= releasedFrames) {
1691            misalignment -= releasedFrames;
1692        } else {
1693            misalignment = 0;
1694        }
1695
1696        releaseBuffer(&audioBuffer);
1697
1698        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1699        // if callback doesn't like to accept the full chunk
1700        if (writtenSize < reqSize) {
1701            continue;
1702        }
1703
1704        // There could be enough non-contiguous frames available to satisfy the remaining request
1705        if (mRemainingFrames <= nonContig) {
1706            continue;
1707        }
1708
1709#if 0
1710        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1711        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1712        // that total to a sum == notificationFrames.
1713        if (0 < misalignment && misalignment <= mRemainingFrames) {
1714            mRemainingFrames = misalignment;
1715            return (mRemainingFrames * 1100000000LL) / sampleRate;
1716        }
1717#endif
1718
1719    }
1720    mRemainingFrames = notificationFrames;
1721    mRetryOnPartialBuffer = true;
1722
1723    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1724    return 0;
1725}
1726
1727status_t AudioTrack::restoreTrack_l(const char *from)
1728{
1729    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1730          isOffloaded_l() ? "Offloaded" : "PCM", from);
1731    ++mSequence;
1732    status_t result;
1733
1734    // refresh the audio configuration cache in this process to make sure we get new
1735    // output parameters in createTrack_l()
1736    AudioSystem::clearAudioConfigCache();
1737
1738    if (isOffloaded_l()) {
1739        // FIXME re-creation of offloaded tracks is not yet implemented
1740        return DEAD_OBJECT;
1741    }
1742
1743    // if the new IAudioTrack is created, createTrack_l() will modify the
1744    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1745    // It will also delete the strong references on previous IAudioTrack and IMemory
1746
1747    // take the frames that will be lost by track recreation into account in saved position
1748    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1749    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1750    result = createTrack_l(position /*epoch*/);
1751
1752    if (result == NO_ERROR) {
1753        // continue playback from last known position, but
1754        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1755        if (mStaticProxy != NULL) {
1756            mLoopPeriod = 0;
1757            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1758        }
1759        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1760        //       track destruction have been played? This is critical for SoundPool implementation
1761        //       This must be broken, and needs to be tested/debugged.
1762#if 0
1763        // restore write index and set other indexes to reflect empty buffer status
1764        if (!strcmp(from, "start")) {
1765            // Make sure that a client relying on callback events indicating underrun or
1766            // the actual amount of audio frames played (e.g SoundPool) receives them.
1767            if (mSharedBuffer == 0) {
1768                // restart playback even if buffer is not completely filled.
1769                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1770            }
1771        }
1772#endif
1773        if (mState == STATE_ACTIVE) {
1774            result = mAudioTrack->start();
1775        }
1776    }
1777    if (result != NO_ERROR) {
1778        // Use of direct and offloaded output streams is ref counted by audio policy manager.
1779#if 0   // FIXME This should no longer be needed
1780        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1781        // As getOutput was called above and resulted in an output stream to be opened,
1782        // we need to release it.
1783        if (mOutput != 0) {
1784            AudioSystem::releaseOutput(mOutput);
1785            mOutput = 0;
1786        }
1787#endif
1788        ALOGW("restoreTrack_l() failed status %d", result);
1789        mState = STATE_STOPPED;
1790    }
1791
1792    return result;
1793}
1794
1795status_t AudioTrack::setParameters(const String8& keyValuePairs)
1796{
1797    AutoMutex lock(mLock);
1798    return mAudioTrack->setParameters(keyValuePairs);
1799}
1800
1801status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1802{
1803    AutoMutex lock(mLock);
1804    // FIXME not implemented for fast tracks; should use proxy and SSQ
1805    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1806        return INVALID_OPERATION;
1807    }
1808    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1809        return INVALID_OPERATION;
1810    }
1811    status_t status = mAudioTrack->getTimestamp(timestamp);
1812    if (status == NO_ERROR) {
1813        timestamp.mPosition += mProxy->getEpoch();
1814    }
1815    return status;
1816}
1817
1818String8 AudioTrack::getParameters(const String8& keys)
1819{
1820    audio_io_handle_t output = getOutput();
1821    if (output != 0) {
1822        return AudioSystem::getParameters(output, keys);
1823    } else {
1824        return String8::empty();
1825    }
1826}
1827
1828bool AudioTrack::isOffloaded() const
1829{
1830    AutoMutex lock(mLock);
1831    return isOffloaded_l();
1832}
1833
1834status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1835{
1836
1837    const size_t SIZE = 256;
1838    char buffer[SIZE];
1839    String8 result;
1840
1841    result.append(" AudioTrack::dump\n");
1842    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1843            mVolume[0], mVolume[1]);
1844    result.append(buffer);
1845    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1846            mChannelCount, mFrameCount);
1847    result.append(buffer);
1848    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1849    result.append(buffer);
1850    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1851    result.append(buffer);
1852    ::write(fd, result.string(), result.size());
1853    return NO_ERROR;
1854}
1855
1856uint32_t AudioTrack::getUnderrunFrames() const
1857{
1858    AutoMutex lock(mLock);
1859    return mProxy->getUnderrunFrames();
1860}
1861
1862// =========================================================================
1863
1864void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1865{
1866    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1867    if (audioTrack != 0) {
1868        AutoMutex lock(audioTrack->mLock);
1869        audioTrack->mProxy->binderDied();
1870    }
1871}
1872
1873// =========================================================================
1874
1875AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1876    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1877      mIgnoreNextPausedInt(false)
1878{
1879}
1880
1881AudioTrack::AudioTrackThread::~AudioTrackThread()
1882{
1883}
1884
1885bool AudioTrack::AudioTrackThread::threadLoop()
1886{
1887    {
1888        AutoMutex _l(mMyLock);
1889        if (mPaused) {
1890            mMyCond.wait(mMyLock);
1891            // caller will check for exitPending()
1892            return true;
1893        }
1894        if (mIgnoreNextPausedInt) {
1895            mIgnoreNextPausedInt = false;
1896            mPausedInt = false;
1897        }
1898        if (mPausedInt) {
1899            if (mPausedNs > 0) {
1900                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1901            } else {
1902                mMyCond.wait(mMyLock);
1903            }
1904            mPausedInt = false;
1905            return true;
1906        }
1907    }
1908    nsecs_t ns = mReceiver.processAudioBuffer();
1909    switch (ns) {
1910    case 0:
1911        return true;
1912    case NS_INACTIVE:
1913        pauseInternal();
1914        return true;
1915    case NS_NEVER:
1916        return false;
1917    case NS_WHENEVER:
1918        // FIXME increase poll interval, or make event-driven
1919        ns = 1000000000LL;
1920        // fall through
1921    default:
1922        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1923        pauseInternal(ns);
1924        return true;
1925    }
1926}
1927
1928void AudioTrack::AudioTrackThread::requestExit()
1929{
1930    // must be in this order to avoid a race condition
1931    Thread::requestExit();
1932    resume();
1933}
1934
1935void AudioTrack::AudioTrackThread::pause()
1936{
1937    AutoMutex _l(mMyLock);
1938    mPaused = true;
1939}
1940
1941void AudioTrack::AudioTrackThread::resume()
1942{
1943    AutoMutex _l(mMyLock);
1944    mIgnoreNextPausedInt = true;
1945    if (mPaused || mPausedInt) {
1946        mPaused = false;
1947        mPausedInt = false;
1948        mMyCond.signal();
1949    }
1950}
1951
1952void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1953{
1954    AutoMutex _l(mMyLock);
1955    mPausedInt = true;
1956    mPausedNs = ns;
1957}
1958
1959}; // namespace android
1960