AudioTrack.cpp revision 8ba90326d683b035d99e24db669093e4602a7149
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo,
105        int uid)
106    : mStatus(NO_INIT),
107      mIsTimed(false),
108      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
109      mPreviousSchedulingGroup(SP_DEFAULT)
110{
111    mStatus = set(streamType, sampleRate, format, channelMask,
112            frameCount, flags, cbf, user, notificationFrames,
113            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
114            offloadInfo, uid);
115}
116
117AudioTrack::AudioTrack(
118        audio_stream_type_t streamType,
119        uint32_t sampleRate,
120        audio_format_t format,
121        audio_channel_mask_t channelMask,
122        const sp<IMemory>& sharedBuffer,
123        audio_output_flags_t flags,
124        callback_t cbf,
125        void* user,
126        int notificationFrames,
127        int sessionId,
128        transfer_type transferType,
129        const audio_offload_info_t *offloadInfo,
130        int uid)
131    : mStatus(NO_INIT),
132      mIsTimed(false),
133      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
134      mPreviousSchedulingGroup(SP_DEFAULT)
135{
136    mStatus = set(streamType, sampleRate, format, channelMask,
137            0 /*frameCount*/, flags, cbf, user, notificationFrames,
138            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
139}
140
141AudioTrack::~AudioTrack()
142{
143    if (mStatus == NO_ERROR) {
144        // Make sure that callback function exits in the case where
145        // it is looping on buffer full condition in obtainBuffer().
146        // Otherwise the callback thread will never exit.
147        stop();
148        if (mAudioTrackThread != 0) {
149            mProxy->interrupt();
150            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
151            mAudioTrackThread->requestExitAndWait();
152            mAudioTrackThread.clear();
153        }
154        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
155        mAudioTrack.clear();
156        IPCThreadState::self()->flushCommands();
157        AudioSystem::releaseAudioSessionId(mSessionId);
158    }
159}
160
161status_t AudioTrack::set(
162        audio_stream_type_t streamType,
163        uint32_t sampleRate,
164        audio_format_t format,
165        audio_channel_mask_t channelMask,
166        int frameCountInt,
167        audio_output_flags_t flags,
168        callback_t cbf,
169        void* user,
170        int notificationFrames,
171        const sp<IMemory>& sharedBuffer,
172        bool threadCanCallJava,
173        int sessionId,
174        transfer_type transferType,
175        const audio_offload_info_t *offloadInfo,
176        int uid)
177{
178    switch (transferType) {
179    case TRANSFER_DEFAULT:
180        if (sharedBuffer != 0) {
181            transferType = TRANSFER_SHARED;
182        } else if (cbf == NULL || threadCanCallJava) {
183            transferType = TRANSFER_SYNC;
184        } else {
185            transferType = TRANSFER_CALLBACK;
186        }
187        break;
188    case TRANSFER_CALLBACK:
189        if (cbf == NULL || sharedBuffer != 0) {
190            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
191            return BAD_VALUE;
192        }
193        break;
194    case TRANSFER_OBTAIN:
195    case TRANSFER_SYNC:
196        if (sharedBuffer != 0) {
197            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
198            return BAD_VALUE;
199        }
200        break;
201    case TRANSFER_SHARED:
202        if (sharedBuffer == 0) {
203            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
204            return BAD_VALUE;
205        }
206        break;
207    default:
208        ALOGE("Invalid transfer type %d", transferType);
209        return BAD_VALUE;
210    }
211    mTransfer = transferType;
212
213    // FIXME "int" here is legacy and will be replaced by size_t later
214    if (frameCountInt < 0) {
215        ALOGE("Invalid frame count %d", frameCountInt);
216        return BAD_VALUE;
217    }
218    size_t frameCount = frameCountInt;
219
220    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
221            sharedBuffer->size());
222
223    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
224
225    AutoMutex lock(mLock);
226
227    // invariant that mAudioTrack != 0 is true only after set() returns successfully
228    if (mAudioTrack != 0) {
229        ALOGE("Track already in use");
230        return INVALID_OPERATION;
231    }
232
233    mOutput = 0;
234
235    // handle default values first.
236    if (streamType == AUDIO_STREAM_DEFAULT) {
237        streamType = AUDIO_STREAM_MUSIC;
238    }
239
240    if (sampleRate == 0) {
241        uint32_t afSampleRate;
242        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
243            return NO_INIT;
244        }
245        sampleRate = afSampleRate;
246    }
247    mSampleRate = sampleRate;
248
249    // these below should probably come from the audioFlinger too...
250    if (format == AUDIO_FORMAT_DEFAULT) {
251        format = AUDIO_FORMAT_PCM_16_BIT;
252    }
253
254    // validate parameters
255    if (!audio_is_valid_format(format)) {
256        ALOGE("Invalid format %d", format);
257        return BAD_VALUE;
258    }
259
260    if (!audio_is_output_channel(channelMask)) {
261        ALOGE("Invalid channel mask %#x", channelMask);
262        return BAD_VALUE;
263    }
264
265    // AudioFlinger does not currently support 8-bit data in shared memory
266    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
267        ALOGE("8-bit data in shared memory is not supported");
268        return BAD_VALUE;
269    }
270
271    // force direct flag if format is not linear PCM
272    // or offload was requested
273    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
274            || !audio_is_linear_pcm(format)) {
275        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
276                    ? "Offload request, forcing to Direct Output"
277                    : "Not linear PCM, forcing to Direct Output");
278        flags = (audio_output_flags_t)
279                // FIXME why can't we allow direct AND fast?
280                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
281    }
282    // only allow deep buffering for music stream type
283    if (streamType != AUDIO_STREAM_MUSIC) {
284        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
285    }
286
287    mChannelMask = channelMask;
288    uint32_t channelCount = popcount(channelMask);
289    mChannelCount = channelCount;
290
291    if (audio_is_linear_pcm(format)) {
292        mFrameSize = channelCount * audio_bytes_per_sample(format);
293        mFrameSizeAF = channelCount * sizeof(int16_t);
294    } else {
295        mFrameSize = sizeof(uint8_t);
296        mFrameSizeAF = sizeof(uint8_t);
297    }
298
299    audio_io_handle_t output = AudioSystem::getOutput(
300                                    streamType,
301                                    sampleRate, format, channelMask,
302                                    flags,
303                                    offloadInfo);
304
305    if (output == 0) {
306        ALOGE("Could not get audio output for stream type %d", streamType);
307        return BAD_VALUE;
308    }
309
310    mVolume[LEFT] = 1.0f;
311    mVolume[RIGHT] = 1.0f;
312    mSendLevel = 0.0f;
313    mFrameCount = frameCount;
314    mReqFrameCount = frameCount;
315    mNotificationFramesReq = notificationFrames;
316    mNotificationFramesAct = 0;
317    mSessionId = sessionId;
318    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
319        mClientUid = IPCThreadState::self()->getCallingUid();
320    } else {
321        mClientUid = uid;
322    }
323    mAuxEffectId = 0;
324    mFlags = flags;
325    mCbf = cbf;
326
327    if (cbf != NULL) {
328        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
329        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
330    }
331
332    // create the IAudioTrack
333    status_t status = createTrack_l(streamType,
334                                  sampleRate,
335                                  format,
336                                  frameCount,
337                                  flags,
338                                  sharedBuffer,
339                                  output,
340                                  0 /*epoch*/);
341
342    if (status != NO_ERROR) {
343        if (mAudioTrackThread != 0) {
344            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
345            mAudioTrackThread->requestExitAndWait();
346            mAudioTrackThread.clear();
347        }
348        //Use of direct and offloaded output streams is ref counted by audio policy manager.
349        // As getOutput was called above and resulted in an output stream to be opened,
350        // we need to release it.
351        AudioSystem::releaseOutput(output);
352        return status;
353    }
354
355    mStatus = NO_ERROR;
356    mStreamType = streamType;
357    mFormat = format;
358    mSharedBuffer = sharedBuffer;
359    mState = STATE_STOPPED;
360    mUserData = user;
361    mLoopPeriod = 0;
362    mMarkerPosition = 0;
363    mMarkerReached = false;
364    mNewPosition = 0;
365    mUpdatePeriod = 0;
366    AudioSystem::acquireAudioSessionId(mSessionId);
367    mSequence = 1;
368    mObservedSequence = mSequence;
369    mInUnderrun = false;
370    mOutput = output;
371
372    return NO_ERROR;
373}
374
375// -------------------------------------------------------------------------
376
377status_t AudioTrack::start()
378{
379    AutoMutex lock(mLock);
380
381    if (mState == STATE_ACTIVE) {
382        return INVALID_OPERATION;
383    }
384
385    mInUnderrun = true;
386
387    State previousState = mState;
388    if (previousState == STATE_PAUSED_STOPPING) {
389        mState = STATE_STOPPING;
390    } else {
391        mState = STATE_ACTIVE;
392    }
393    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
394        // reset current position as seen by client to 0
395        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
396        // force refresh of remaining frames by processAudioBuffer() as last
397        // write before stop could be partial.
398        mRefreshRemaining = true;
399    }
400    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
401    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
402
403    sp<AudioTrackThread> t = mAudioTrackThread;
404    if (t != 0) {
405        if (previousState == STATE_STOPPING) {
406            mProxy->interrupt();
407        } else {
408            t->resume();
409        }
410    } else {
411        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
412        get_sched_policy(0, &mPreviousSchedulingGroup);
413        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
414    }
415
416    status_t status = NO_ERROR;
417    if (!(flags & CBLK_INVALID)) {
418        status = mAudioTrack->start();
419        if (status == DEAD_OBJECT) {
420            flags |= CBLK_INVALID;
421        }
422    }
423    if (flags & CBLK_INVALID) {
424        status = restoreTrack_l("start");
425    }
426
427    if (status != NO_ERROR) {
428        ALOGE("start() status %d", status);
429        mState = previousState;
430        if (t != 0) {
431            if (previousState != STATE_STOPPING) {
432                t->pause();
433            }
434        } else {
435            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
436            set_sched_policy(0, mPreviousSchedulingGroup);
437        }
438    }
439
440    return status;
441}
442
443void AudioTrack::stop()
444{
445    AutoMutex lock(mLock);
446    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
447        return;
448    }
449
450    if (isOffloaded()) {
451        mState = STATE_STOPPING;
452    } else {
453        mState = STATE_STOPPED;
454    }
455
456    mProxy->interrupt();
457    mAudioTrack->stop();
458    // the playback head position will reset to 0, so if a marker is set, we need
459    // to activate it again
460    mMarkerReached = false;
461#if 0
462    // Force flush if a shared buffer is used otherwise audioflinger
463    // will not stop before end of buffer is reached.
464    // It may be needed to make sure that we stop playback, likely in case looping is on.
465    if (mSharedBuffer != 0) {
466        flush_l();
467    }
468#endif
469
470    sp<AudioTrackThread> t = mAudioTrackThread;
471    if (t != 0) {
472        if (!isOffloaded()) {
473            t->pause();
474        }
475    } else {
476        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
477        set_sched_policy(0, mPreviousSchedulingGroup);
478    }
479}
480
481bool AudioTrack::stopped() const
482{
483    AutoMutex lock(mLock);
484    return mState != STATE_ACTIVE;
485}
486
487void AudioTrack::flush()
488{
489    if (mSharedBuffer != 0) {
490        return;
491    }
492    AutoMutex lock(mLock);
493    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
494        return;
495    }
496    flush_l();
497}
498
499void AudioTrack::flush_l()
500{
501    ALOG_ASSERT(mState != STATE_ACTIVE);
502
503    // clear playback marker and periodic update counter
504    mMarkerPosition = 0;
505    mMarkerReached = false;
506    mUpdatePeriod = 0;
507    mRefreshRemaining = true;
508
509    mState = STATE_FLUSHED;
510    if (isOffloaded()) {
511        mProxy->interrupt();
512    }
513    mProxy->flush();
514    mAudioTrack->flush();
515}
516
517void AudioTrack::pause()
518{
519    AutoMutex lock(mLock);
520    if (mState == STATE_ACTIVE) {
521        mState = STATE_PAUSED;
522    } else if (mState == STATE_STOPPING) {
523        mState = STATE_PAUSED_STOPPING;
524    } else {
525        return;
526    }
527    mProxy->interrupt();
528    mAudioTrack->pause();
529}
530
531status_t AudioTrack::setVolume(float left, float right)
532{
533    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
534        return BAD_VALUE;
535    }
536
537    AutoMutex lock(mLock);
538    mVolume[LEFT] = left;
539    mVolume[RIGHT] = right;
540
541    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
542
543    if (isOffloaded()) {
544        mAudioTrack->signal();
545    }
546    return NO_ERROR;
547}
548
549status_t AudioTrack::setVolume(float volume)
550{
551    return setVolume(volume, volume);
552}
553
554status_t AudioTrack::setAuxEffectSendLevel(float level)
555{
556    if (level < 0.0f || level > 1.0f) {
557        return BAD_VALUE;
558    }
559
560    AutoMutex lock(mLock);
561    mSendLevel = level;
562    mProxy->setSendLevel(level);
563
564    return NO_ERROR;
565}
566
567void AudioTrack::getAuxEffectSendLevel(float* level) const
568{
569    if (level != NULL) {
570        *level = mSendLevel;
571    }
572}
573
574status_t AudioTrack::setSampleRate(uint32_t rate)
575{
576    if (mIsTimed || isOffloaded()) {
577        return INVALID_OPERATION;
578    }
579
580    uint32_t afSamplingRate;
581    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
582        return NO_INIT;
583    }
584    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
585    if (rate == 0 || rate > afSamplingRate*2 ) {
586        return BAD_VALUE;
587    }
588
589    AutoMutex lock(mLock);
590    mSampleRate = rate;
591    mProxy->setSampleRate(rate);
592
593    return NO_ERROR;
594}
595
596uint32_t AudioTrack::getSampleRate() const
597{
598    if (mIsTimed) {
599        return 0;
600    }
601
602    AutoMutex lock(mLock);
603    return mSampleRate;
604}
605
606status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
607{
608    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
609        return INVALID_OPERATION;
610    }
611
612    if (loopCount == 0) {
613        ;
614    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
615            loopEnd - loopStart >= MIN_LOOP) {
616        ;
617    } else {
618        return BAD_VALUE;
619    }
620
621    AutoMutex lock(mLock);
622    // See setPosition() regarding setting parameters such as loop points or position while active
623    if (mState == STATE_ACTIVE) {
624        return INVALID_OPERATION;
625    }
626    setLoop_l(loopStart, loopEnd, loopCount);
627    return NO_ERROR;
628}
629
630void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
631{
632    // FIXME If setting a loop also sets position to start of loop, then
633    //       this is correct.  Otherwise it should be removed.
634    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
635    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
636    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
637}
638
639status_t AudioTrack::setMarkerPosition(uint32_t marker)
640{
641    // The only purpose of setting marker position is to get a callback
642    if (mCbf == NULL || isOffloaded()) {
643        return INVALID_OPERATION;
644    }
645
646    AutoMutex lock(mLock);
647    mMarkerPosition = marker;
648    mMarkerReached = false;
649
650    return NO_ERROR;
651}
652
653status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
654{
655    if (isOffloaded()) {
656        return INVALID_OPERATION;
657    }
658    if (marker == NULL) {
659        return BAD_VALUE;
660    }
661
662    AutoMutex lock(mLock);
663    *marker = mMarkerPosition;
664
665    return NO_ERROR;
666}
667
668status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
669{
670    // The only purpose of setting position update period is to get a callback
671    if (mCbf == NULL || isOffloaded()) {
672        return INVALID_OPERATION;
673    }
674
675    AutoMutex lock(mLock);
676    mNewPosition = mProxy->getPosition() + updatePeriod;
677    mUpdatePeriod = updatePeriod;
678    return NO_ERROR;
679}
680
681status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
682{
683    if (isOffloaded()) {
684        return INVALID_OPERATION;
685    }
686    if (updatePeriod == NULL) {
687        return BAD_VALUE;
688    }
689
690    AutoMutex lock(mLock);
691    *updatePeriod = mUpdatePeriod;
692
693    return NO_ERROR;
694}
695
696status_t AudioTrack::setPosition(uint32_t position)
697{
698    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
699        return INVALID_OPERATION;
700    }
701    if (position > mFrameCount) {
702        return BAD_VALUE;
703    }
704
705    AutoMutex lock(mLock);
706    // Currently we require that the player is inactive before setting parameters such as position
707    // or loop points.  Otherwise, there could be a race condition: the application could read the
708    // current position, compute a new position or loop parameters, and then set that position or
709    // loop parameters but it would do the "wrong" thing since the position has continued to advance
710    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
711    // to specify how it wants to handle such scenarios.
712    if (mState == STATE_ACTIVE) {
713        return INVALID_OPERATION;
714    }
715    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
716    mLoopPeriod = 0;
717    // FIXME Check whether loops and setting position are incompatible in old code.
718    // If we use setLoop for both purposes we lose the capability to set the position while looping.
719    mStaticProxy->setLoop(position, mFrameCount, 0);
720
721    return NO_ERROR;
722}
723
724status_t AudioTrack::getPosition(uint32_t *position) const
725{
726    if (position == NULL) {
727        return BAD_VALUE;
728    }
729
730    AutoMutex lock(mLock);
731    if (isOffloaded()) {
732        uint32_t dspFrames = 0;
733
734        if (mOutput != 0) {
735            uint32_t halFrames;
736            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
737        }
738        *position = dspFrames;
739    } else {
740        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
741        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
742                mProxy->getPosition();
743    }
744    return NO_ERROR;
745}
746
747status_t AudioTrack::getBufferPosition(size_t *position)
748{
749    if (mSharedBuffer == 0 || mIsTimed) {
750        return INVALID_OPERATION;
751    }
752    if (position == NULL) {
753        return BAD_VALUE;
754    }
755
756    AutoMutex lock(mLock);
757    *position = mStaticProxy->getBufferPosition();
758    return NO_ERROR;
759}
760
761status_t AudioTrack::reload()
762{
763    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
764        return INVALID_OPERATION;
765    }
766
767    AutoMutex lock(mLock);
768    // See setPosition() regarding setting parameters such as loop points or position while active
769    if (mState == STATE_ACTIVE) {
770        return INVALID_OPERATION;
771    }
772    mNewPosition = mUpdatePeriod;
773    mLoopPeriod = 0;
774    // FIXME The new code cannot reload while keeping a loop specified.
775    // Need to check how the old code handled this, and whether it's a significant change.
776    mStaticProxy->setLoop(0, mFrameCount, 0);
777    return NO_ERROR;
778}
779
780audio_io_handle_t AudioTrack::getOutput()
781{
782    AutoMutex lock(mLock);
783    return mOutput;
784}
785
786// must be called with mLock held
787audio_io_handle_t AudioTrack::getOutput_l()
788{
789    if (mOutput) {
790        return mOutput;
791    } else {
792        return AudioSystem::getOutput(mStreamType,
793                                      mSampleRate, mFormat, mChannelMask, mFlags);
794    }
795}
796
797status_t AudioTrack::attachAuxEffect(int effectId)
798{
799    AutoMutex lock(mLock);
800    status_t status = mAudioTrack->attachAuxEffect(effectId);
801    if (status == NO_ERROR) {
802        mAuxEffectId = effectId;
803    }
804    return status;
805}
806
807// -------------------------------------------------------------------------
808
809// must be called with mLock held
810status_t AudioTrack::createTrack_l(
811        audio_stream_type_t streamType,
812        uint32_t sampleRate,
813        audio_format_t format,
814        size_t frameCount,
815        audio_output_flags_t flags,
816        const sp<IMemory>& sharedBuffer,
817        audio_io_handle_t output,
818        size_t epoch)
819{
820    status_t status;
821    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
822    if (audioFlinger == 0) {
823        ALOGE("Could not get audioflinger");
824        return NO_INIT;
825    }
826
827    // Not all of these values are needed under all conditions, but it is easier to get them all
828
829    uint32_t afLatency;
830    status = AudioSystem::getLatency(output, streamType, &afLatency);
831    if (status != NO_ERROR) {
832        ALOGE("getLatency(%d) failed status %d", output, status);
833        return NO_INIT;
834    }
835
836    size_t afFrameCount;
837    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
838    if (status != NO_ERROR) {
839        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
840        return NO_INIT;
841    }
842
843    uint32_t afSampleRate;
844    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
845    if (status != NO_ERROR) {
846        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
847        return NO_INIT;
848    }
849
850    // Client decides whether the track is TIMED (see below), but can only express a preference
851    // for FAST.  Server will perform additional tests.
852    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
853            // either of these use cases:
854            // use case 1: shared buffer
855            (sharedBuffer != 0) ||
856            // use case 2: callback handler
857            (mCbf != NULL))) {
858        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
859        // once denied, do not request again if IAudioTrack is re-created
860        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
861        mFlags = flags;
862    }
863    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
864
865    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
866    //  n = 1   fast track; nBuffering is ignored
867    //  n = 2   normal track, no sample rate conversion
868    //  n = 3   normal track, with sample rate conversion
869    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
870    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
871    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
872
873    mNotificationFramesAct = mNotificationFramesReq;
874
875    if (!audio_is_linear_pcm(format)) {
876
877        if (sharedBuffer != 0) {
878            // Same comment as below about ignoring frameCount parameter for set()
879            frameCount = sharedBuffer->size();
880        } else if (frameCount == 0) {
881            frameCount = afFrameCount;
882        }
883        if (mNotificationFramesAct != frameCount) {
884            mNotificationFramesAct = frameCount;
885        }
886    } else if (sharedBuffer != 0) {
887
888        // Ensure that buffer alignment matches channel count
889        // 8-bit data in shared memory is not currently supported by AudioFlinger
890        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
891        if (mChannelCount > 1) {
892            // More than 2 channels does not require stronger alignment than stereo
893            alignment <<= 1;
894        }
895        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
896            ALOGE("Invalid buffer alignment: address %p, channel count %u",
897                    sharedBuffer->pointer(), mChannelCount);
898            return BAD_VALUE;
899        }
900
901        // When initializing a shared buffer AudioTrack via constructors,
902        // there's no frameCount parameter.
903        // But when initializing a shared buffer AudioTrack via set(),
904        // there _is_ a frameCount parameter.  We silently ignore it.
905        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
906
907    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
908
909        // FIXME move these calculations and associated checks to server
910
911        // Ensure that buffer depth covers at least audio hardware latency
912        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
913        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
914                afFrameCount, minBufCount, afSampleRate, afLatency);
915        if (minBufCount <= nBuffering) {
916            minBufCount = nBuffering;
917        }
918
919        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
920        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
921                ", afLatency=%d",
922                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
923
924        if (frameCount == 0) {
925            frameCount = minFrameCount;
926        } else if (frameCount < minFrameCount) {
927            // not ALOGW because it happens all the time when playing key clicks over A2DP
928            ALOGV("Minimum buffer size corrected from %d to %d",
929                     frameCount, minFrameCount);
930            frameCount = minFrameCount;
931        }
932        // Make sure that application is notified with sufficient margin before underrun
933        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
934            mNotificationFramesAct = frameCount/nBuffering;
935        }
936
937    } else {
938        // For fast tracks, the frame count calculations and checks are done by server
939    }
940
941    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
942    if (mIsTimed) {
943        trackFlags |= IAudioFlinger::TRACK_TIMED;
944    }
945
946    pid_t tid = -1;
947    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
948        trackFlags |= IAudioFlinger::TRACK_FAST;
949        if (mAudioTrackThread != 0) {
950            tid = mAudioTrackThread->getTid();
951        }
952    }
953
954    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
955        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
956    }
957
958    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
959                                                      sampleRate,
960                                                      // AudioFlinger only sees 16-bit PCM
961                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
962                                                              AUDIO_FORMAT_PCM_16_BIT : format,
963                                                      mChannelMask,
964                                                      frameCount,
965                                                      &trackFlags,
966                                                      sharedBuffer,
967                                                      output,
968                                                      tid,
969                                                      &mSessionId,
970                                                      mName,
971                                                      mClientUid,
972                                                      &status);
973
974    if (track == 0) {
975        ALOGE("AudioFlinger could not create track, status: %d", status);
976        return status;
977    }
978    sp<IMemory> iMem = track->getCblk();
979    if (iMem == 0) {
980        ALOGE("Could not get control block");
981        return NO_INIT;
982    }
983    // invariant that mAudioTrack != 0 is true only after set() returns successfully
984    if (mAudioTrack != 0) {
985        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
986        mDeathNotifier.clear();
987    }
988    mAudioTrack = track;
989    mCblkMemory = iMem;
990    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
991    mCblk = cblk;
992    size_t temp = cblk->frameCount_;
993    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
994        // In current design, AudioTrack client checks and ensures frame count validity before
995        // passing it to AudioFlinger so AudioFlinger should not return a different value except
996        // for fast track as it uses a special method of assigning frame count.
997        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
998    }
999    frameCount = temp;
1000    mAwaitBoost = false;
1001    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1002        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1003            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1004            mAwaitBoost = true;
1005            if (sharedBuffer == 0) {
1006                // double-buffering is not required for fast tracks, due to tighter scheduling
1007                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
1008                    mNotificationFramesAct = frameCount;
1009                }
1010            }
1011        } else {
1012            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1013            // once denied, do not request again if IAudioTrack is re-created
1014            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1015            mFlags = flags;
1016            if (sharedBuffer == 0) {
1017                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1018                    mNotificationFramesAct = frameCount/nBuffering;
1019                }
1020            }
1021        }
1022    }
1023    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1024        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1025            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1026        } else {
1027            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1028            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1029            mFlags = flags;
1030            return NO_INIT;
1031        }
1032    }
1033
1034    mRefreshRemaining = true;
1035
1036    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1037    // is the value of pointer() for the shared buffer, otherwise buffers points
1038    // immediately after the control block.  This address is for the mapping within client
1039    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1040    void* buffers;
1041    if (sharedBuffer == 0) {
1042        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1043    } else {
1044        buffers = sharedBuffer->pointer();
1045    }
1046
1047    mAudioTrack->attachAuxEffect(mAuxEffectId);
1048    // FIXME don't believe this lie
1049    mLatency = afLatency + (1000*frameCount) / sampleRate;
1050    mFrameCount = frameCount;
1051    // If IAudioTrack is re-created, don't let the requested frameCount
1052    // decrease.  This can confuse clients that cache frameCount().
1053    if (frameCount > mReqFrameCount) {
1054        mReqFrameCount = frameCount;
1055    }
1056
1057    // update proxy
1058    if (sharedBuffer == 0) {
1059        mStaticProxy.clear();
1060        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1061    } else {
1062        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1063        mProxy = mStaticProxy;
1064    }
1065    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1066            uint16_t(mVolume[LEFT] * 0x1000));
1067    mProxy->setSendLevel(mSendLevel);
1068    mProxy->setSampleRate(mSampleRate);
1069    mProxy->setEpoch(epoch);
1070    mProxy->setMinimum(mNotificationFramesAct);
1071
1072    mDeathNotifier = new DeathNotifier(this);
1073    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1074
1075    return NO_ERROR;
1076}
1077
1078status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1079{
1080    if (audioBuffer == NULL) {
1081        return BAD_VALUE;
1082    }
1083    if (mTransfer != TRANSFER_OBTAIN) {
1084        audioBuffer->frameCount = 0;
1085        audioBuffer->size = 0;
1086        audioBuffer->raw = NULL;
1087        return INVALID_OPERATION;
1088    }
1089
1090    const struct timespec *requested;
1091    if (waitCount == -1) {
1092        requested = &ClientProxy::kForever;
1093    } else if (waitCount == 0) {
1094        requested = &ClientProxy::kNonBlocking;
1095    } else if (waitCount > 0) {
1096        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1097        struct timespec timeout;
1098        timeout.tv_sec = ms / 1000;
1099        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1100        requested = &timeout;
1101    } else {
1102        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1103        requested = NULL;
1104    }
1105    return obtainBuffer(audioBuffer, requested);
1106}
1107
1108status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1109        struct timespec *elapsed, size_t *nonContig)
1110{
1111    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1112    uint32_t oldSequence = 0;
1113    uint32_t newSequence;
1114
1115    Proxy::Buffer buffer;
1116    status_t status = NO_ERROR;
1117
1118    static const int32_t kMaxTries = 5;
1119    int32_t tryCounter = kMaxTries;
1120
1121    do {
1122        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1123        // keep them from going away if another thread re-creates the track during obtainBuffer()
1124        sp<AudioTrackClientProxy> proxy;
1125        sp<IMemory> iMem;
1126
1127        {   // start of lock scope
1128            AutoMutex lock(mLock);
1129
1130            newSequence = mSequence;
1131            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1132            if (status == DEAD_OBJECT) {
1133                // re-create track, unless someone else has already done so
1134                if (newSequence == oldSequence) {
1135                    status = restoreTrack_l("obtainBuffer");
1136                    if (status != NO_ERROR) {
1137                        buffer.mFrameCount = 0;
1138                        buffer.mRaw = NULL;
1139                        buffer.mNonContig = 0;
1140                        break;
1141                    }
1142                }
1143            }
1144            oldSequence = newSequence;
1145
1146            // Keep the extra references
1147            proxy = mProxy;
1148            iMem = mCblkMemory;
1149
1150            if (mState == STATE_STOPPING) {
1151                status = -EINTR;
1152                buffer.mFrameCount = 0;
1153                buffer.mRaw = NULL;
1154                buffer.mNonContig = 0;
1155                break;
1156            }
1157
1158            // Non-blocking if track is stopped or paused
1159            if (mState != STATE_ACTIVE) {
1160                requested = &ClientProxy::kNonBlocking;
1161            }
1162
1163        }   // end of lock scope
1164
1165        buffer.mFrameCount = audioBuffer->frameCount;
1166        // FIXME starts the requested timeout and elapsed over from scratch
1167        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1168
1169    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1170
1171    audioBuffer->frameCount = buffer.mFrameCount;
1172    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1173    audioBuffer->raw = buffer.mRaw;
1174    if (nonContig != NULL) {
1175        *nonContig = buffer.mNonContig;
1176    }
1177    return status;
1178}
1179
1180void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1181{
1182    if (mTransfer == TRANSFER_SHARED) {
1183        return;
1184    }
1185
1186    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1187    if (stepCount == 0) {
1188        return;
1189    }
1190
1191    Proxy::Buffer buffer;
1192    buffer.mFrameCount = stepCount;
1193    buffer.mRaw = audioBuffer->raw;
1194
1195    AutoMutex lock(mLock);
1196    mInUnderrun = false;
1197    mProxy->releaseBuffer(&buffer);
1198
1199    // restart track if it was disabled by audioflinger due to previous underrun
1200    if (mState == STATE_ACTIVE) {
1201        audio_track_cblk_t* cblk = mCblk;
1202        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1203            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1204                    this, mName.string());
1205            // FIXME ignoring status
1206            mAudioTrack->start();
1207        }
1208    }
1209}
1210
1211// -------------------------------------------------------------------------
1212
1213ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1214{
1215    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1216        return INVALID_OPERATION;
1217    }
1218
1219    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1220        // Sanity-check: user is most-likely passing an error code, and it would
1221        // make the return value ambiguous (actualSize vs error).
1222        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1223        return BAD_VALUE;
1224    }
1225
1226    size_t written = 0;
1227    Buffer audioBuffer;
1228
1229    while (userSize >= mFrameSize) {
1230        audioBuffer.frameCount = userSize / mFrameSize;
1231
1232        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1233        if (err < 0) {
1234            if (written > 0) {
1235                break;
1236            }
1237            return ssize_t(err);
1238        }
1239
1240        size_t toWrite;
1241        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1242            // Divide capacity by 2 to take expansion into account
1243            toWrite = audioBuffer.size >> 1;
1244            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1245        } else {
1246            toWrite = audioBuffer.size;
1247            memcpy(audioBuffer.i8, buffer, toWrite);
1248        }
1249        buffer = ((const char *) buffer) + toWrite;
1250        userSize -= toWrite;
1251        written += toWrite;
1252
1253        releaseBuffer(&audioBuffer);
1254    }
1255
1256    return written;
1257}
1258
1259// -------------------------------------------------------------------------
1260
1261TimedAudioTrack::TimedAudioTrack() {
1262    mIsTimed = true;
1263}
1264
1265status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1266{
1267    AutoMutex lock(mLock);
1268    status_t result = UNKNOWN_ERROR;
1269
1270#if 1
1271    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1272    // while we are accessing the cblk
1273    sp<IAudioTrack> audioTrack = mAudioTrack;
1274    sp<IMemory> iMem = mCblkMemory;
1275#endif
1276
1277    // If the track is not invalid already, try to allocate a buffer.  alloc
1278    // fails indicating that the server is dead, flag the track as invalid so
1279    // we can attempt to restore in just a bit.
1280    audio_track_cblk_t* cblk = mCblk;
1281    if (!(cblk->mFlags & CBLK_INVALID)) {
1282        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1283        if (result == DEAD_OBJECT) {
1284            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1285        }
1286    }
1287
1288    // If the track is invalid at this point, attempt to restore it. and try the
1289    // allocation one more time.
1290    if (cblk->mFlags & CBLK_INVALID) {
1291        result = restoreTrack_l("allocateTimedBuffer");
1292
1293        if (result == NO_ERROR) {
1294            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1295        }
1296    }
1297
1298    return result;
1299}
1300
1301status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1302                                           int64_t pts)
1303{
1304    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1305    {
1306        AutoMutex lock(mLock);
1307        audio_track_cblk_t* cblk = mCblk;
1308        // restart track if it was disabled by audioflinger due to previous underrun
1309        if (buffer->size() != 0 && status == NO_ERROR &&
1310                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1311            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1312            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1313            // FIXME ignoring status
1314            mAudioTrack->start();
1315        }
1316    }
1317    return status;
1318}
1319
1320status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1321                                                TargetTimeline target)
1322{
1323    return mAudioTrack->setMediaTimeTransform(xform, target);
1324}
1325
1326// -------------------------------------------------------------------------
1327
1328nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1329{
1330    // Currently the AudioTrack thread is not created if there are no callbacks.
1331    // Would it ever make sense to run the thread, even without callbacks?
1332    // If so, then replace this by checks at each use for mCbf != NULL.
1333    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1334
1335    mLock.lock();
1336    if (mAwaitBoost) {
1337        mAwaitBoost = false;
1338        mLock.unlock();
1339        static const int32_t kMaxTries = 5;
1340        int32_t tryCounter = kMaxTries;
1341        uint32_t pollUs = 10000;
1342        do {
1343            int policy = sched_getscheduler(0);
1344            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1345                break;
1346            }
1347            usleep(pollUs);
1348            pollUs <<= 1;
1349        } while (tryCounter-- > 0);
1350        if (tryCounter < 0) {
1351            ALOGE("did not receive expected priority boost on time");
1352        }
1353        // Run again immediately
1354        return 0;
1355    }
1356
1357    // Can only reference mCblk while locked
1358    int32_t flags = android_atomic_and(
1359        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1360
1361    // Check for track invalidation
1362    if (flags & CBLK_INVALID) {
1363        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1364        // AudioSystem cache. We should not exit here but after calling the callback so
1365        // that the upper layers can recreate the track
1366        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1367            status_t status = restoreTrack_l("processAudioBuffer");
1368            mLock.unlock();
1369            // Run again immediately, but with a new IAudioTrack
1370            return 0;
1371        }
1372    }
1373
1374    bool waitStreamEnd = mState == STATE_STOPPING;
1375    bool active = mState == STATE_ACTIVE;
1376
1377    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1378    bool newUnderrun = false;
1379    if (flags & CBLK_UNDERRUN) {
1380#if 0
1381        // Currently in shared buffer mode, when the server reaches the end of buffer,
1382        // the track stays active in continuous underrun state.  It's up to the application
1383        // to pause or stop the track, or set the position to a new offset within buffer.
1384        // This was some experimental code to auto-pause on underrun.   Keeping it here
1385        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1386        if (mTransfer == TRANSFER_SHARED) {
1387            mState = STATE_PAUSED;
1388            active = false;
1389        }
1390#endif
1391        if (!mInUnderrun) {
1392            mInUnderrun = true;
1393            newUnderrun = true;
1394        }
1395    }
1396
1397    // Get current position of server
1398    size_t position = mProxy->getPosition();
1399
1400    // Manage marker callback
1401    bool markerReached = false;
1402    size_t markerPosition = mMarkerPosition;
1403    // FIXME fails for wraparound, need 64 bits
1404    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1405        mMarkerReached = markerReached = true;
1406    }
1407
1408    // Determine number of new position callback(s) that will be needed, while locked
1409    size_t newPosCount = 0;
1410    size_t newPosition = mNewPosition;
1411    size_t updatePeriod = mUpdatePeriod;
1412    // FIXME fails for wraparound, need 64 bits
1413    if (updatePeriod > 0 && position >= newPosition) {
1414        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1415        mNewPosition += updatePeriod * newPosCount;
1416    }
1417
1418    // Cache other fields that will be needed soon
1419    uint32_t loopPeriod = mLoopPeriod;
1420    uint32_t sampleRate = mSampleRate;
1421    size_t notificationFrames = mNotificationFramesAct;
1422    if (mRefreshRemaining) {
1423        mRefreshRemaining = false;
1424        mRemainingFrames = notificationFrames;
1425        mRetryOnPartialBuffer = false;
1426    }
1427    size_t misalignment = mProxy->getMisalignment();
1428    uint32_t sequence = mSequence;
1429
1430    // These fields don't need to be cached, because they are assigned only by set():
1431    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1432    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1433
1434    mLock.unlock();
1435
1436    if (waitStreamEnd) {
1437        AutoMutex lock(mLock);
1438
1439        sp<AudioTrackClientProxy> proxy = mProxy;
1440        sp<IMemory> iMem = mCblkMemory;
1441
1442        struct timespec timeout;
1443        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1444        timeout.tv_nsec = 0;
1445
1446        mLock.unlock();
1447        status_t status = mProxy->waitStreamEndDone(&timeout);
1448        mLock.lock();
1449        switch (status) {
1450        case NO_ERROR:
1451        case DEAD_OBJECT:
1452        case TIMED_OUT:
1453            mLock.unlock();
1454            mCbf(EVENT_STREAM_END, mUserData, NULL);
1455            mLock.lock();
1456            if (mState == STATE_STOPPING) {
1457                mState = STATE_STOPPED;
1458                if (status != DEAD_OBJECT) {
1459                   return NS_INACTIVE;
1460                }
1461            }
1462            return 0;
1463        default:
1464            return 0;
1465        }
1466    }
1467
1468    // perform callbacks while unlocked
1469    if (newUnderrun) {
1470        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1471    }
1472    // FIXME we will miss loops if loop cycle was signaled several times since last call
1473    //       to processAudioBuffer()
1474    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1475        mCbf(EVENT_LOOP_END, mUserData, NULL);
1476    }
1477    if (flags & CBLK_BUFFER_END) {
1478        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1479    }
1480    if (markerReached) {
1481        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1482    }
1483    while (newPosCount > 0) {
1484        size_t temp = newPosition;
1485        mCbf(EVENT_NEW_POS, mUserData, &temp);
1486        newPosition += updatePeriod;
1487        newPosCount--;
1488    }
1489
1490    if (mObservedSequence != sequence) {
1491        mObservedSequence = sequence;
1492        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1493        // for offloaded tracks, just wait for the upper layers to recreate the track
1494        if (isOffloaded()) {
1495            return NS_INACTIVE;
1496        }
1497    }
1498
1499    // if inactive, then don't run me again until re-started
1500    if (!active) {
1501        return NS_INACTIVE;
1502    }
1503
1504    // Compute the estimated time until the next timed event (position, markers, loops)
1505    // FIXME only for non-compressed audio
1506    uint32_t minFrames = ~0;
1507    if (!markerReached && position < markerPosition) {
1508        minFrames = markerPosition - position;
1509    }
1510    if (loopPeriod > 0 && loopPeriod < minFrames) {
1511        minFrames = loopPeriod;
1512    }
1513    if (updatePeriod > 0 && updatePeriod < minFrames) {
1514        minFrames = updatePeriod;
1515    }
1516
1517    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1518    static const uint32_t kPoll = 0;
1519    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1520        minFrames = kPoll * notificationFrames;
1521    }
1522
1523    // Convert frame units to time units
1524    nsecs_t ns = NS_WHENEVER;
1525    if (minFrames != (uint32_t) ~0) {
1526        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1527        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1528        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1529    }
1530
1531    // If not supplying data by EVENT_MORE_DATA, then we're done
1532    if (mTransfer != TRANSFER_CALLBACK) {
1533        return ns;
1534    }
1535
1536    struct timespec timeout;
1537    const struct timespec *requested = &ClientProxy::kForever;
1538    if (ns != NS_WHENEVER) {
1539        timeout.tv_sec = ns / 1000000000LL;
1540        timeout.tv_nsec = ns % 1000000000LL;
1541        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1542        requested = &timeout;
1543    }
1544
1545    while (mRemainingFrames > 0) {
1546
1547        Buffer audioBuffer;
1548        audioBuffer.frameCount = mRemainingFrames;
1549        size_t nonContig;
1550        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1551        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1552                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1553        requested = &ClientProxy::kNonBlocking;
1554        size_t avail = audioBuffer.frameCount + nonContig;
1555        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1556                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1557        if (err != NO_ERROR) {
1558            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1559                    (isOffloaded() && (err == DEAD_OBJECT))) {
1560                return 0;
1561            }
1562            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1563            return NS_NEVER;
1564        }
1565
1566        if (mRetryOnPartialBuffer && !isOffloaded()) {
1567            mRetryOnPartialBuffer = false;
1568            if (avail < mRemainingFrames) {
1569                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1570                if (ns < 0 || myns < ns) {
1571                    ns = myns;
1572                }
1573                return ns;
1574            }
1575        }
1576
1577        // Divide buffer size by 2 to take into account the expansion
1578        // due to 8 to 16 bit conversion: the callback must fill only half
1579        // of the destination buffer
1580        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1581            audioBuffer.size >>= 1;
1582        }
1583
1584        size_t reqSize = audioBuffer.size;
1585        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1586        size_t writtenSize = audioBuffer.size;
1587        size_t writtenFrames = writtenSize / mFrameSize;
1588
1589        // Sanity check on returned size
1590        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1591            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1592                    reqSize, (int) writtenSize);
1593            return NS_NEVER;
1594        }
1595
1596        if (writtenSize == 0) {
1597            // The callback is done filling buffers
1598            // Keep this thread going to handle timed events and
1599            // still try to get more data in intervals of WAIT_PERIOD_MS
1600            // but don't just loop and block the CPU, so wait
1601            return WAIT_PERIOD_MS * 1000000LL;
1602        }
1603
1604        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1605            // 8 to 16 bit conversion, note that source and destination are the same address
1606            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1607            audioBuffer.size <<= 1;
1608        }
1609
1610        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1611        audioBuffer.frameCount = releasedFrames;
1612        mRemainingFrames -= releasedFrames;
1613        if (misalignment >= releasedFrames) {
1614            misalignment -= releasedFrames;
1615        } else {
1616            misalignment = 0;
1617        }
1618
1619        releaseBuffer(&audioBuffer);
1620
1621        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1622        // if callback doesn't like to accept the full chunk
1623        if (writtenSize < reqSize) {
1624            continue;
1625        }
1626
1627        // There could be enough non-contiguous frames available to satisfy the remaining request
1628        if (mRemainingFrames <= nonContig) {
1629            continue;
1630        }
1631
1632#if 0
1633        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1634        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1635        // that total to a sum == notificationFrames.
1636        if (0 < misalignment && misalignment <= mRemainingFrames) {
1637            mRemainingFrames = misalignment;
1638            return (mRemainingFrames * 1100000000LL) / sampleRate;
1639        }
1640#endif
1641
1642    }
1643    mRemainingFrames = notificationFrames;
1644    mRetryOnPartialBuffer = true;
1645
1646    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1647    return 0;
1648}
1649
1650status_t AudioTrack::restoreTrack_l(const char *from)
1651{
1652    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1653          isOffloaded() ? "Offloaded" : "PCM", from);
1654    ++mSequence;
1655    status_t result;
1656
1657    // refresh the audio configuration cache in this process to make sure we get new
1658    // output parameters in getOutput_l() and createTrack_l()
1659    AudioSystem::clearAudioConfigCache();
1660
1661    if (isOffloaded()) {
1662        return DEAD_OBJECT;
1663    }
1664
1665    // force new output query from audio policy manager;
1666    mOutput = 0;
1667    audio_io_handle_t output = getOutput_l();
1668
1669    // if the new IAudioTrack is created, createTrack_l() will modify the
1670    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1671    // It will also delete the strong references on previous IAudioTrack and IMemory
1672
1673    // take the frames that will be lost by track recreation into account in saved position
1674    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1675    mNewPosition = position + mUpdatePeriod;
1676    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1677    result = createTrack_l(mStreamType,
1678                           mSampleRate,
1679                           mFormat,
1680                           mReqFrameCount,  // so that frame count never goes down
1681                           mFlags,
1682                           mSharedBuffer,
1683                           output,
1684                           position /*epoch*/);
1685
1686    if (result == NO_ERROR) {
1687        // continue playback from last known position, but
1688        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1689        if (mStaticProxy != NULL) {
1690            mLoopPeriod = 0;
1691            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1692        }
1693        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1694        //       track destruction have been played? This is critical for SoundPool implementation
1695        //       This must be broken, and needs to be tested/debugged.
1696#if 0
1697        // restore write index and set other indexes to reflect empty buffer status
1698        if (!strcmp(from, "start")) {
1699            // Make sure that a client relying on callback events indicating underrun or
1700            // the actual amount of audio frames played (e.g SoundPool) receives them.
1701            if (mSharedBuffer == 0) {
1702                // restart playback even if buffer is not completely filled.
1703                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1704            }
1705        }
1706#endif
1707        if (mState == STATE_ACTIVE) {
1708            result = mAudioTrack->start();
1709        }
1710    }
1711    if (result != NO_ERROR) {
1712        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1713        // As getOutput was called above and resulted in an output stream to be opened,
1714        // we need to release it.
1715        AudioSystem::releaseOutput(output);
1716        ALOGW("restoreTrack_l() failed status %d", result);
1717        mState = STATE_STOPPED;
1718    }
1719
1720    return result;
1721}
1722
1723status_t AudioTrack::setParameters(const String8& keyValuePairs)
1724{
1725    AutoMutex lock(mLock);
1726    return mAudioTrack->setParameters(keyValuePairs);
1727}
1728
1729status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1730{
1731    AutoMutex lock(mLock);
1732    // FIXME not implemented for fast tracks; should use proxy and SSQ
1733    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1734        return INVALID_OPERATION;
1735    }
1736    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1737        return INVALID_OPERATION;
1738    }
1739    status_t status = mAudioTrack->getTimestamp(timestamp);
1740    if (status == NO_ERROR) {
1741        timestamp.mPosition += mProxy->getEpoch();
1742    }
1743    return status;
1744}
1745
1746String8 AudioTrack::getParameters(const String8& keys)
1747{
1748    if (mOutput) {
1749        return AudioSystem::getParameters(mOutput, keys);
1750    } else {
1751        return String8::empty();
1752    }
1753}
1754
1755status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1756{
1757
1758    const size_t SIZE = 256;
1759    char buffer[SIZE];
1760    String8 result;
1761
1762    result.append(" AudioTrack::dump\n");
1763    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1764            mVolume[0], mVolume[1]);
1765    result.append(buffer);
1766    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1767            mChannelCount, mFrameCount);
1768    result.append(buffer);
1769    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1770    result.append(buffer);
1771    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1772    result.append(buffer);
1773    ::write(fd, result.string(), result.size());
1774    return NO_ERROR;
1775}
1776
1777uint32_t AudioTrack::getUnderrunFrames() const
1778{
1779    AutoMutex lock(mLock);
1780    return mProxy->getUnderrunFrames();
1781}
1782
1783// =========================================================================
1784
1785void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1786{
1787    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1788    if (audioTrack != 0) {
1789        AutoMutex lock(audioTrack->mLock);
1790        audioTrack->mProxy->binderDied();
1791    }
1792}
1793
1794// =========================================================================
1795
1796AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1797    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1798      mIgnoreNextPausedInt(false)
1799{
1800}
1801
1802AudioTrack::AudioTrackThread::~AudioTrackThread()
1803{
1804}
1805
1806bool AudioTrack::AudioTrackThread::threadLoop()
1807{
1808    {
1809        AutoMutex _l(mMyLock);
1810        if (mPaused) {
1811            mMyCond.wait(mMyLock);
1812            // caller will check for exitPending()
1813            return true;
1814        }
1815        if (mIgnoreNextPausedInt) {
1816            mIgnoreNextPausedInt = false;
1817            mPausedInt = false;
1818        }
1819        if (mPausedInt) {
1820            if (mPausedNs > 0) {
1821                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1822            } else {
1823                mMyCond.wait(mMyLock);
1824            }
1825            mPausedInt = false;
1826            return true;
1827        }
1828    }
1829    nsecs_t ns = mReceiver.processAudioBuffer(this);
1830    switch (ns) {
1831    case 0:
1832        return true;
1833    case NS_INACTIVE:
1834        pauseInternal();
1835        return true;
1836    case NS_NEVER:
1837        return false;
1838    case NS_WHENEVER:
1839        // FIXME increase poll interval, or make event-driven
1840        ns = 1000000000LL;
1841        // fall through
1842    default:
1843        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1844        pauseInternal(ns);
1845        return true;
1846    }
1847}
1848
1849void AudioTrack::AudioTrackThread::requestExit()
1850{
1851    // must be in this order to avoid a race condition
1852    Thread::requestExit();
1853    resume();
1854}
1855
1856void AudioTrack::AudioTrackThread::pause()
1857{
1858    AutoMutex _l(mMyLock);
1859    mPaused = true;
1860}
1861
1862void AudioTrack::AudioTrackThread::resume()
1863{
1864    AutoMutex _l(mMyLock);
1865    mIgnoreNextPausedInt = true;
1866    if (mPaused || mPausedInt) {
1867        mPaused = false;
1868        mPausedInt = false;
1869        mMyCond.signal();
1870    }
1871}
1872
1873void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1874{
1875    AutoMutex _l(mMyLock);
1876    mPausedInt = true;
1877    mPausedNs = ns;
1878}
1879
1880}; // namespace android
1881