AudioTrack.cpp revision b5fed68bcdd6f44424c9e4d12bfe9a3ff51bd62e
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo)
105    : mStatus(NO_INIT),
106      mIsTimed(false),
107      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
108      mPreviousSchedulingGroup(SP_DEFAULT)
109{
110    mStatus = set(streamType, sampleRate, format, channelMask,
111            frameCount, flags, cbf, user, notificationFrames,
112            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
113}
114
115AudioTrack::AudioTrack(
116        audio_stream_type_t streamType,
117        uint32_t sampleRate,
118        audio_format_t format,
119        audio_channel_mask_t channelMask,
120        const sp<IMemory>& sharedBuffer,
121        audio_output_flags_t flags,
122        callback_t cbf,
123        void* user,
124        int notificationFrames,
125        int sessionId,
126        transfer_type transferType,
127        const audio_offload_info_t *offloadInfo)
128    : mStatus(NO_INIT),
129      mIsTimed(false),
130      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
131      mPreviousSchedulingGroup(SP_DEFAULT)
132{
133    mStatus = set(streamType, sampleRate, format, channelMask,
134            0 /*frameCount*/, flags, cbf, user, notificationFrames,
135            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
136}
137
138AudioTrack::~AudioTrack()
139{
140    if (mStatus == NO_ERROR) {
141        // Make sure that callback function exits in the case where
142        // it is looping on buffer full condition in obtainBuffer().
143        // Otherwise the callback thread will never exit.
144        stop();
145        if (mAudioTrackThread != 0) {
146            mProxy->interrupt();
147            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
148            mAudioTrackThread->requestExitAndWait();
149            mAudioTrackThread.clear();
150        }
151        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
152        mAudioTrack.clear();
153        IPCThreadState::self()->flushCommands();
154        AudioSystem::releaseAudioSessionId(mSessionId);
155    }
156}
157
158status_t AudioTrack::set(
159        audio_stream_type_t streamType,
160        uint32_t sampleRate,
161        audio_format_t format,
162        audio_channel_mask_t channelMask,
163        int frameCountInt,
164        audio_output_flags_t flags,
165        callback_t cbf,
166        void* user,
167        int notificationFrames,
168        const sp<IMemory>& sharedBuffer,
169        bool threadCanCallJava,
170        int sessionId,
171        transfer_type transferType,
172        const audio_offload_info_t *offloadInfo)
173{
174    switch (transferType) {
175    case TRANSFER_DEFAULT:
176        if (sharedBuffer != 0) {
177            transferType = TRANSFER_SHARED;
178        } else if (cbf == NULL || threadCanCallJava) {
179            transferType = TRANSFER_SYNC;
180        } else {
181            transferType = TRANSFER_CALLBACK;
182        }
183        break;
184    case TRANSFER_CALLBACK:
185        if (cbf == NULL || sharedBuffer != 0) {
186            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
187            return BAD_VALUE;
188        }
189        break;
190    case TRANSFER_OBTAIN:
191    case TRANSFER_SYNC:
192        if (sharedBuffer != 0) {
193            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
194            return BAD_VALUE;
195        }
196        break;
197    case TRANSFER_SHARED:
198        if (sharedBuffer == 0) {
199            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
200            return BAD_VALUE;
201        }
202        break;
203    default:
204        ALOGE("Invalid transfer type %d", transferType);
205        return BAD_VALUE;
206    }
207    mTransfer = transferType;
208
209    // FIXME "int" here is legacy and will be replaced by size_t later
210    if (frameCountInt < 0) {
211        ALOGE("Invalid frame count %d", frameCountInt);
212        return BAD_VALUE;
213    }
214    size_t frameCount = frameCountInt;
215
216    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
217            sharedBuffer->size());
218
219    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
220
221    AutoMutex lock(mLock);
222
223    // invariant that mAudioTrack != 0 is true only after set() returns successfully
224    if (mAudioTrack != 0) {
225        ALOGE("Track already in use");
226        return INVALID_OPERATION;
227    }
228
229    mOutput = 0;
230
231    // handle default values first.
232    if (streamType == AUDIO_STREAM_DEFAULT) {
233        streamType = AUDIO_STREAM_MUSIC;
234    }
235
236    if (sampleRate == 0) {
237        uint32_t afSampleRate;
238        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
239            return NO_INIT;
240        }
241        sampleRate = afSampleRate;
242    }
243    mSampleRate = sampleRate;
244
245    // these below should probably come from the audioFlinger too...
246    if (format == AUDIO_FORMAT_DEFAULT) {
247        format = AUDIO_FORMAT_PCM_16_BIT;
248    }
249    if (channelMask == 0) {
250        channelMask = AUDIO_CHANNEL_OUT_STEREO;
251    }
252
253    // validate parameters
254    if (!audio_is_valid_format(format)) {
255        ALOGE("Invalid format %d", format);
256        return BAD_VALUE;
257    }
258
259    // AudioFlinger does not currently support 8-bit data in shared memory
260    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
261        ALOGE("8-bit data in shared memory is not supported");
262        return BAD_VALUE;
263    }
264
265    // force direct flag if format is not linear PCM
266    // or offload was requested
267    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
268            || !audio_is_linear_pcm(format)) {
269        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
270                    ? "Offload request, forcing to Direct Output"
271                    : "Not linear PCM, forcing to Direct Output");
272        flags = (audio_output_flags_t)
273                // FIXME why can't we allow direct AND fast?
274                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
275    }
276    // only allow deep buffering for music stream type
277    if (streamType != AUDIO_STREAM_MUSIC) {
278        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
279    }
280
281    if (!audio_is_output_channel(channelMask)) {
282        ALOGE("Invalid channel mask %#x", channelMask);
283        return BAD_VALUE;
284    }
285    mChannelMask = channelMask;
286    uint32_t channelCount = popcount(channelMask);
287    mChannelCount = channelCount;
288
289    if (audio_is_linear_pcm(format)) {
290        mFrameSize = channelCount * audio_bytes_per_sample(format);
291        mFrameSizeAF = channelCount * sizeof(int16_t);
292    } else {
293        mFrameSize = sizeof(uint8_t);
294        mFrameSizeAF = sizeof(uint8_t);
295    }
296
297    audio_io_handle_t output = AudioSystem::getOutput(
298                                    streamType,
299                                    sampleRate, format, channelMask,
300                                    flags,
301                                    offloadInfo);
302
303    if (output == 0) {
304        ALOGE("Could not get audio output for stream type %d", streamType);
305        return BAD_VALUE;
306    }
307
308    mVolume[LEFT] = 1.0f;
309    mVolume[RIGHT] = 1.0f;
310    mSendLevel = 0.0f;
311    mFrameCount = frameCount;
312    mReqFrameCount = frameCount;
313    mNotificationFramesReq = notificationFrames;
314    mNotificationFramesAct = 0;
315    mSessionId = sessionId;
316    mAuxEffectId = 0;
317    mFlags = flags;
318    mCbf = cbf;
319
320    if (cbf != NULL) {
321        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
322        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
323    }
324
325    // create the IAudioTrack
326    status_t status = createTrack_l(streamType,
327                                  sampleRate,
328                                  format,
329                                  frameCount,
330                                  flags,
331                                  sharedBuffer,
332                                  output,
333                                  0 /*epoch*/);
334
335    if (status != NO_ERROR) {
336        if (mAudioTrackThread != 0) {
337            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
338            mAudioTrackThread->requestExitAndWait();
339            mAudioTrackThread.clear();
340        }
341        //Use of direct and offloaded output streams is ref counted by audio policy manager.
342        // As getOutput was called above and resulted in an output stream to be opened,
343        // we need to release it.
344        AudioSystem::releaseOutput(output);
345        return status;
346    }
347
348    mStatus = NO_ERROR;
349    mStreamType = streamType;
350    mFormat = format;
351    mSharedBuffer = sharedBuffer;
352    mState = STATE_STOPPED;
353    mUserData = user;
354    mLoopPeriod = 0;
355    mMarkerPosition = 0;
356    mMarkerReached = false;
357    mNewPosition = 0;
358    mUpdatePeriod = 0;
359    AudioSystem::acquireAudioSessionId(mSessionId);
360    mSequence = 1;
361    mObservedSequence = mSequence;
362    mInUnderrun = false;
363    mOutput = output;
364
365    return NO_ERROR;
366}
367
368// -------------------------------------------------------------------------
369
370status_t AudioTrack::start()
371{
372    AutoMutex lock(mLock);
373
374    if (mState == STATE_ACTIVE) {
375        return INVALID_OPERATION;
376    }
377
378    mInUnderrun = true;
379
380    State previousState = mState;
381    if (previousState == STATE_PAUSED_STOPPING) {
382        mState = STATE_STOPPING;
383    } else {
384        mState = STATE_ACTIVE;
385    }
386    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
387        // reset current position as seen by client to 0
388        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
389        // force refresh of remaining frames by processAudioBuffer() as last
390        // write before stop could be partial.
391        mRefreshRemaining = true;
392    }
393    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
394    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
395
396    sp<AudioTrackThread> t = mAudioTrackThread;
397    if (t != 0) {
398        if (previousState == STATE_STOPPING) {
399            mProxy->interrupt();
400        } else {
401            t->resume();
402        }
403    } else {
404        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
405        get_sched_policy(0, &mPreviousSchedulingGroup);
406        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
407    }
408
409    status_t status = NO_ERROR;
410    if (!(flags & CBLK_INVALID)) {
411        status = mAudioTrack->start();
412        if (status == DEAD_OBJECT) {
413            flags |= CBLK_INVALID;
414        }
415    }
416    if (flags & CBLK_INVALID) {
417        status = restoreTrack_l("start");
418    }
419
420    if (status != NO_ERROR) {
421        ALOGE("start() status %d", status);
422        mState = previousState;
423        if (t != 0) {
424            if (previousState != STATE_STOPPING) {
425                t->pause();
426            }
427        } else {
428            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
429            set_sched_policy(0, mPreviousSchedulingGroup);
430        }
431    }
432
433    return status;
434}
435
436void AudioTrack::stop()
437{
438    AutoMutex lock(mLock);
439    // FIXME pause then stop should not be a nop
440    if (mState != STATE_ACTIVE) {
441        return;
442    }
443
444    if (isOffloaded()) {
445        mState = STATE_STOPPING;
446    } else {
447        mState = STATE_STOPPED;
448    }
449
450    mProxy->interrupt();
451    mAudioTrack->stop();
452    // the playback head position will reset to 0, so if a marker is set, we need
453    // to activate it again
454    mMarkerReached = false;
455#if 0
456    // Force flush if a shared buffer is used otherwise audioflinger
457    // will not stop before end of buffer is reached.
458    // It may be needed to make sure that we stop playback, likely in case looping is on.
459    if (mSharedBuffer != 0) {
460        flush_l();
461    }
462#endif
463
464    sp<AudioTrackThread> t = mAudioTrackThread;
465    if (t != 0) {
466        if (!isOffloaded()) {
467            t->pause();
468        }
469    } else {
470        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
471        set_sched_policy(0, mPreviousSchedulingGroup);
472    }
473}
474
475bool AudioTrack::stopped() const
476{
477    AutoMutex lock(mLock);
478    return mState != STATE_ACTIVE;
479}
480
481void AudioTrack::flush()
482{
483    if (mSharedBuffer != 0) {
484        return;
485    }
486    AutoMutex lock(mLock);
487    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
488        return;
489    }
490    flush_l();
491}
492
493void AudioTrack::flush_l()
494{
495    ALOG_ASSERT(mState != STATE_ACTIVE);
496
497    // clear playback marker and periodic update counter
498    mMarkerPosition = 0;
499    mMarkerReached = false;
500    mUpdatePeriod = 0;
501    mRefreshRemaining = true;
502
503    mState = STATE_FLUSHED;
504    if (isOffloaded()) {
505        mProxy->interrupt();
506    }
507    mProxy->flush();
508    mAudioTrack->flush();
509}
510
511void AudioTrack::pause()
512{
513    AutoMutex lock(mLock);
514    if (mState == STATE_ACTIVE) {
515        mState = STATE_PAUSED;
516    } else if (mState == STATE_STOPPING) {
517        mState = STATE_PAUSED_STOPPING;
518    } else {
519        return;
520    }
521    mProxy->interrupt();
522    mAudioTrack->pause();
523}
524
525status_t AudioTrack::setVolume(float left, float right)
526{
527    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
528        return BAD_VALUE;
529    }
530
531    AutoMutex lock(mLock);
532    mVolume[LEFT] = left;
533    mVolume[RIGHT] = right;
534
535    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
536
537    if (isOffloaded()) {
538        mAudioTrack->signal();
539    }
540    return NO_ERROR;
541}
542
543status_t AudioTrack::setVolume(float volume)
544{
545    return setVolume(volume, volume);
546}
547
548status_t AudioTrack::setAuxEffectSendLevel(float level)
549{
550    if (level < 0.0f || level > 1.0f) {
551        return BAD_VALUE;
552    }
553
554    AutoMutex lock(mLock);
555    mSendLevel = level;
556    mProxy->setSendLevel(level);
557
558    return NO_ERROR;
559}
560
561void AudioTrack::getAuxEffectSendLevel(float* level) const
562{
563    if (level != NULL) {
564        *level = mSendLevel;
565    }
566}
567
568status_t AudioTrack::setSampleRate(uint32_t rate)
569{
570    if (mIsTimed || isOffloaded()) {
571        return INVALID_OPERATION;
572    }
573
574    uint32_t afSamplingRate;
575    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
576        return NO_INIT;
577    }
578    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
579    if (rate == 0 || rate > afSamplingRate*2 ) {
580        return BAD_VALUE;
581    }
582
583    AutoMutex lock(mLock);
584    mSampleRate = rate;
585    mProxy->setSampleRate(rate);
586
587    return NO_ERROR;
588}
589
590uint32_t AudioTrack::getSampleRate() const
591{
592    if (mIsTimed) {
593        return 0;
594    }
595
596    AutoMutex lock(mLock);
597    return mSampleRate;
598}
599
600status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
601{
602    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
603        return INVALID_OPERATION;
604    }
605
606    if (loopCount == 0) {
607        ;
608    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
609            loopEnd - loopStart >= MIN_LOOP) {
610        ;
611    } else {
612        return BAD_VALUE;
613    }
614
615    AutoMutex lock(mLock);
616    // See setPosition() regarding setting parameters such as loop points or position while active
617    if (mState == STATE_ACTIVE) {
618        return INVALID_OPERATION;
619    }
620    setLoop_l(loopStart, loopEnd, loopCount);
621    return NO_ERROR;
622}
623
624void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
625{
626    // FIXME If setting a loop also sets position to start of loop, then
627    //       this is correct.  Otherwise it should be removed.
628    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
629    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
630    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
631}
632
633status_t AudioTrack::setMarkerPosition(uint32_t marker)
634{
635    // The only purpose of setting marker position is to get a callback
636    if (mCbf == NULL || isOffloaded()) {
637        return INVALID_OPERATION;
638    }
639
640    AutoMutex lock(mLock);
641    mMarkerPosition = marker;
642    mMarkerReached = false;
643
644    return NO_ERROR;
645}
646
647status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
648{
649    if (isOffloaded()) {
650        return INVALID_OPERATION;
651    }
652    if (marker == NULL) {
653        return BAD_VALUE;
654    }
655
656    AutoMutex lock(mLock);
657    *marker = mMarkerPosition;
658
659    return NO_ERROR;
660}
661
662status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
663{
664    // The only purpose of setting position update period is to get a callback
665    if (mCbf == NULL || isOffloaded()) {
666        return INVALID_OPERATION;
667    }
668
669    AutoMutex lock(mLock);
670    mNewPosition = mProxy->getPosition() + updatePeriod;
671    mUpdatePeriod = updatePeriod;
672    return NO_ERROR;
673}
674
675status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
676{
677    if (isOffloaded()) {
678        return INVALID_OPERATION;
679    }
680    if (updatePeriod == NULL) {
681        return BAD_VALUE;
682    }
683
684    AutoMutex lock(mLock);
685    *updatePeriod = mUpdatePeriod;
686
687    return NO_ERROR;
688}
689
690status_t AudioTrack::setPosition(uint32_t position)
691{
692    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
693        return INVALID_OPERATION;
694    }
695    if (position > mFrameCount) {
696        return BAD_VALUE;
697    }
698
699    AutoMutex lock(mLock);
700    // Currently we require that the player is inactive before setting parameters such as position
701    // or loop points.  Otherwise, there could be a race condition: the application could read the
702    // current position, compute a new position or loop parameters, and then set that position or
703    // loop parameters but it would do the "wrong" thing since the position has continued to advance
704    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
705    // to specify how it wants to handle such scenarios.
706    if (mState == STATE_ACTIVE) {
707        return INVALID_OPERATION;
708    }
709    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
710    mLoopPeriod = 0;
711    // FIXME Check whether loops and setting position are incompatible in old code.
712    // If we use setLoop for both purposes we lose the capability to set the position while looping.
713    mStaticProxy->setLoop(position, mFrameCount, 0);
714
715    return NO_ERROR;
716}
717
718status_t AudioTrack::getPosition(uint32_t *position) const
719{
720    if (position == NULL) {
721        return BAD_VALUE;
722    }
723
724    AutoMutex lock(mLock);
725    if (isOffloaded()) {
726        uint32_t dspFrames = 0;
727
728        if (mOutput != 0) {
729            uint32_t halFrames;
730            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
731        }
732        *position = dspFrames;
733    } else {
734        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
735        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
736                mProxy->getPosition();
737    }
738    return NO_ERROR;
739}
740
741status_t AudioTrack::getBufferPosition(size_t *position)
742{
743    if (mSharedBuffer == 0 || mIsTimed) {
744        return INVALID_OPERATION;
745    }
746    if (position == NULL) {
747        return BAD_VALUE;
748    }
749
750    AutoMutex lock(mLock);
751    *position = mStaticProxy->getBufferPosition();
752    return NO_ERROR;
753}
754
755status_t AudioTrack::reload()
756{
757    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
758        return INVALID_OPERATION;
759    }
760
761    AutoMutex lock(mLock);
762    // See setPosition() regarding setting parameters such as loop points or position while active
763    if (mState == STATE_ACTIVE) {
764        return INVALID_OPERATION;
765    }
766    mNewPosition = mUpdatePeriod;
767    mLoopPeriod = 0;
768    // FIXME The new code cannot reload while keeping a loop specified.
769    // Need to check how the old code handled this, and whether it's a significant change.
770    mStaticProxy->setLoop(0, mFrameCount, 0);
771    return NO_ERROR;
772}
773
774audio_io_handle_t AudioTrack::getOutput()
775{
776    AutoMutex lock(mLock);
777    return mOutput;
778}
779
780// must be called with mLock held
781audio_io_handle_t AudioTrack::getOutput_l()
782{
783    if (mOutput) {
784        return mOutput;
785    } else {
786        return AudioSystem::getOutput(mStreamType,
787                                      mSampleRate, mFormat, mChannelMask, mFlags);
788    }
789}
790
791status_t AudioTrack::attachAuxEffect(int effectId)
792{
793    AutoMutex lock(mLock);
794    status_t status = mAudioTrack->attachAuxEffect(effectId);
795    if (status == NO_ERROR) {
796        mAuxEffectId = effectId;
797    }
798    return status;
799}
800
801// -------------------------------------------------------------------------
802
803// must be called with mLock held
804status_t AudioTrack::createTrack_l(
805        audio_stream_type_t streamType,
806        uint32_t sampleRate,
807        audio_format_t format,
808        size_t frameCount,
809        audio_output_flags_t flags,
810        const sp<IMemory>& sharedBuffer,
811        audio_io_handle_t output,
812        size_t epoch)
813{
814    status_t status;
815    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
816    if (audioFlinger == 0) {
817        ALOGE("Could not get audioflinger");
818        return NO_INIT;
819    }
820
821    // Not all of these values are needed under all conditions, but it is easier to get them all
822
823    uint32_t afLatency;
824    status = AudioSystem::getLatency(output, streamType, &afLatency);
825    if (status != NO_ERROR) {
826        ALOGE("getLatency(%d) failed status %d", output, status);
827        return NO_INIT;
828    }
829
830    size_t afFrameCount;
831    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
832    if (status != NO_ERROR) {
833        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
834        return NO_INIT;
835    }
836
837    uint32_t afSampleRate;
838    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
839    if (status != NO_ERROR) {
840        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
841        return NO_INIT;
842    }
843
844    // Client decides whether the track is TIMED (see below), but can only express a preference
845    // for FAST.  Server will perform additional tests.
846    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
847            // either of these use cases:
848            // use case 1: shared buffer
849            (sharedBuffer != 0) ||
850            // use case 2: callback handler
851            (mCbf != NULL))) {
852        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
853        // once denied, do not request again if IAudioTrack is re-created
854        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
855        mFlags = flags;
856    }
857    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
858
859    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
860    //  n = 1   fast track with single buffering; nBuffering is ignored
861    //  n = 2   fast track with double buffering
862    //  n = 2   normal track, no sample rate conversion
863    //  n = 3   normal track, with sample rate conversion
864    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
865    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
866    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
867
868    mNotificationFramesAct = mNotificationFramesReq;
869
870    if (!audio_is_linear_pcm(format)) {
871
872        if (sharedBuffer != 0) {
873            // Same comment as below about ignoring frameCount parameter for set()
874            frameCount = sharedBuffer->size();
875        } else if (frameCount == 0) {
876            frameCount = afFrameCount;
877        }
878        if (mNotificationFramesAct != frameCount) {
879            mNotificationFramesAct = frameCount;
880        }
881    } else if (sharedBuffer != 0) {
882
883        // Ensure that buffer alignment matches channel count
884        // 8-bit data in shared memory is not currently supported by AudioFlinger
885        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
886        if (mChannelCount > 1) {
887            // More than 2 channels does not require stronger alignment than stereo
888            alignment <<= 1;
889        }
890        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
891            ALOGE("Invalid buffer alignment: address %p, channel count %u",
892                    sharedBuffer->pointer(), mChannelCount);
893            return BAD_VALUE;
894        }
895
896        // When initializing a shared buffer AudioTrack via constructors,
897        // there's no frameCount parameter.
898        // But when initializing a shared buffer AudioTrack via set(),
899        // there _is_ a frameCount parameter.  We silently ignore it.
900        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
901
902    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
903
904        // FIXME move these calculations and associated checks to server
905
906        // Ensure that buffer depth covers at least audio hardware latency
907        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
908        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
909                afFrameCount, minBufCount, afSampleRate, afLatency);
910        if (minBufCount <= nBuffering) {
911            minBufCount = nBuffering;
912        }
913
914        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
915        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
916                ", afLatency=%d",
917                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
918
919        if (frameCount == 0) {
920            frameCount = minFrameCount;
921        } else if (frameCount < minFrameCount) {
922            // not ALOGW because it happens all the time when playing key clicks over A2DP
923            ALOGV("Minimum buffer size corrected from %d to %d",
924                     frameCount, minFrameCount);
925            frameCount = minFrameCount;
926        }
927        // Make sure that application is notified with sufficient margin before underrun
928        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
929            mNotificationFramesAct = frameCount/nBuffering;
930        }
931
932    } else {
933        // For fast tracks, the frame count calculations and checks are done by server
934    }
935
936    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
937    if (mIsTimed) {
938        trackFlags |= IAudioFlinger::TRACK_TIMED;
939    }
940
941    pid_t tid = -1;
942    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
943        trackFlags |= IAudioFlinger::TRACK_FAST;
944        if (mAudioTrackThread != 0) {
945            tid = mAudioTrackThread->getTid();
946        }
947    }
948
949    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
950        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
951    }
952
953    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
954                                                      sampleRate,
955                                                      // AudioFlinger only sees 16-bit PCM
956                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
957                                                              AUDIO_FORMAT_PCM_16_BIT : format,
958                                                      mChannelMask,
959                                                      frameCount,
960                                                      &trackFlags,
961                                                      sharedBuffer,
962                                                      output,
963                                                      tid,
964                                                      &mSessionId,
965                                                      mName,
966                                                      &status);
967
968    if (track == 0) {
969        ALOGE("AudioFlinger could not create track, status: %d", status);
970        return status;
971    }
972    sp<IMemory> iMem = track->getCblk();
973    if (iMem == 0) {
974        ALOGE("Could not get control block");
975        return NO_INIT;
976    }
977    // invariant that mAudioTrack != 0 is true only after set() returns successfully
978    if (mAudioTrack != 0) {
979        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
980        mDeathNotifier.clear();
981    }
982    mAudioTrack = track;
983    mCblkMemory = iMem;
984    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
985    mCblk = cblk;
986    size_t temp = cblk->frameCount_;
987    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
988        // In current design, AudioTrack client checks and ensures frame count validity before
989        // passing it to AudioFlinger so AudioFlinger should not return a different value except
990        // for fast track as it uses a special method of assigning frame count.
991        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
992    }
993    frameCount = temp;
994    mAwaitBoost = false;
995    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
996        if (trackFlags & IAudioFlinger::TRACK_FAST) {
997            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
998            mAwaitBoost = true;
999            if (sharedBuffer == 0) {
1000                // Theoretically double-buffering is not required for fast tracks,
1001                // due to tighter scheduling.  But in practice, to accommodate kernels with
1002                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1003                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1004                    mNotificationFramesAct = frameCount/nBuffering;
1005                }
1006            }
1007        } else {
1008            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1009            // once denied, do not request again if IAudioTrack is re-created
1010            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1011            mFlags = flags;
1012            if (sharedBuffer == 0) {
1013                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1014                    mNotificationFramesAct = frameCount/nBuffering;
1015                }
1016            }
1017        }
1018    }
1019    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1020        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1021            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1022        } else {
1023            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1024            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1025            mFlags = flags;
1026            return NO_INIT;
1027        }
1028    }
1029
1030    mRefreshRemaining = true;
1031
1032    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1033    // is the value of pointer() for the shared buffer, otherwise buffers points
1034    // immediately after the control block.  This address is for the mapping within client
1035    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1036    void* buffers;
1037    if (sharedBuffer == 0) {
1038        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1039    } else {
1040        buffers = sharedBuffer->pointer();
1041    }
1042
1043    mAudioTrack->attachAuxEffect(mAuxEffectId);
1044    // FIXME don't believe this lie
1045    mLatency = afLatency + (1000*frameCount) / sampleRate;
1046    mFrameCount = frameCount;
1047    // If IAudioTrack is re-created, don't let the requested frameCount
1048    // decrease.  This can confuse clients that cache frameCount().
1049    if (frameCount > mReqFrameCount) {
1050        mReqFrameCount = frameCount;
1051    }
1052
1053    // update proxy
1054    if (sharedBuffer == 0) {
1055        mStaticProxy.clear();
1056        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1057    } else {
1058        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1059        mProxy = mStaticProxy;
1060    }
1061    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1062            uint16_t(mVolume[LEFT] * 0x1000));
1063    mProxy->setSendLevel(mSendLevel);
1064    mProxy->setSampleRate(mSampleRate);
1065    mProxy->setEpoch(epoch);
1066    mProxy->setMinimum(mNotificationFramesAct);
1067
1068    mDeathNotifier = new DeathNotifier(this);
1069    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1070
1071    return NO_ERROR;
1072}
1073
1074status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1075{
1076    if (audioBuffer == NULL) {
1077        return BAD_VALUE;
1078    }
1079    if (mTransfer != TRANSFER_OBTAIN) {
1080        audioBuffer->frameCount = 0;
1081        audioBuffer->size = 0;
1082        audioBuffer->raw = NULL;
1083        return INVALID_OPERATION;
1084    }
1085
1086    const struct timespec *requested;
1087    if (waitCount == -1) {
1088        requested = &ClientProxy::kForever;
1089    } else if (waitCount == 0) {
1090        requested = &ClientProxy::kNonBlocking;
1091    } else if (waitCount > 0) {
1092        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1093        struct timespec timeout;
1094        timeout.tv_sec = ms / 1000;
1095        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1096        requested = &timeout;
1097    } else {
1098        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1099        requested = NULL;
1100    }
1101    return obtainBuffer(audioBuffer, requested);
1102}
1103
1104status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1105        struct timespec *elapsed, size_t *nonContig)
1106{
1107    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1108    uint32_t oldSequence = 0;
1109    uint32_t newSequence;
1110
1111    Proxy::Buffer buffer;
1112    status_t status = NO_ERROR;
1113
1114    static const int32_t kMaxTries = 5;
1115    int32_t tryCounter = kMaxTries;
1116
1117    do {
1118        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1119        // keep them from going away if another thread re-creates the track during obtainBuffer()
1120        sp<AudioTrackClientProxy> proxy;
1121        sp<IMemory> iMem;
1122
1123        {   // start of lock scope
1124            AutoMutex lock(mLock);
1125
1126            newSequence = mSequence;
1127            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1128            if (status == DEAD_OBJECT) {
1129                // re-create track, unless someone else has already done so
1130                if (newSequence == oldSequence) {
1131                    status = restoreTrack_l("obtainBuffer");
1132                    if (status != NO_ERROR) {
1133                        buffer.mFrameCount = 0;
1134                        buffer.mRaw = NULL;
1135                        buffer.mNonContig = 0;
1136                        break;
1137                    }
1138                }
1139            }
1140            oldSequence = newSequence;
1141
1142            // Keep the extra references
1143            proxy = mProxy;
1144            iMem = mCblkMemory;
1145
1146            if (mState == STATE_STOPPING) {
1147                status = -EINTR;
1148                buffer.mFrameCount = 0;
1149                buffer.mRaw = NULL;
1150                buffer.mNonContig = 0;
1151                break;
1152            }
1153
1154            // Non-blocking if track is stopped or paused
1155            if (mState != STATE_ACTIVE) {
1156                requested = &ClientProxy::kNonBlocking;
1157            }
1158
1159        }   // end of lock scope
1160
1161        buffer.mFrameCount = audioBuffer->frameCount;
1162        // FIXME starts the requested timeout and elapsed over from scratch
1163        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1164
1165    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1166
1167    audioBuffer->frameCount = buffer.mFrameCount;
1168    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1169    audioBuffer->raw = buffer.mRaw;
1170    if (nonContig != NULL) {
1171        *nonContig = buffer.mNonContig;
1172    }
1173    return status;
1174}
1175
1176void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1177{
1178    if (mTransfer == TRANSFER_SHARED) {
1179        return;
1180    }
1181
1182    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1183    if (stepCount == 0) {
1184        return;
1185    }
1186
1187    Proxy::Buffer buffer;
1188    buffer.mFrameCount = stepCount;
1189    buffer.mRaw = audioBuffer->raw;
1190
1191    AutoMutex lock(mLock);
1192    mInUnderrun = false;
1193    mProxy->releaseBuffer(&buffer);
1194
1195    // restart track if it was disabled by audioflinger due to previous underrun
1196    if (mState == STATE_ACTIVE) {
1197        audio_track_cblk_t* cblk = mCblk;
1198        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1199            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1200                    this, mName.string());
1201            // FIXME ignoring status
1202            mAudioTrack->start();
1203        }
1204    }
1205}
1206
1207// -------------------------------------------------------------------------
1208
1209ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1210{
1211    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1212        return INVALID_OPERATION;
1213    }
1214
1215    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1216        // Sanity-check: user is most-likely passing an error code, and it would
1217        // make the return value ambiguous (actualSize vs error).
1218        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1219        return BAD_VALUE;
1220    }
1221
1222    size_t written = 0;
1223    Buffer audioBuffer;
1224
1225    while (userSize >= mFrameSize) {
1226        audioBuffer.frameCount = userSize / mFrameSize;
1227
1228        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1229        if (err < 0) {
1230            if (written > 0) {
1231                break;
1232            }
1233            return ssize_t(err);
1234        }
1235
1236        size_t toWrite;
1237        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1238            // Divide capacity by 2 to take expansion into account
1239            toWrite = audioBuffer.size >> 1;
1240            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1241        } else {
1242            toWrite = audioBuffer.size;
1243            memcpy(audioBuffer.i8, buffer, toWrite);
1244        }
1245        buffer = ((const char *) buffer) + toWrite;
1246        userSize -= toWrite;
1247        written += toWrite;
1248
1249        releaseBuffer(&audioBuffer);
1250    }
1251
1252    return written;
1253}
1254
1255// -------------------------------------------------------------------------
1256
1257TimedAudioTrack::TimedAudioTrack() {
1258    mIsTimed = true;
1259}
1260
1261status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1262{
1263    AutoMutex lock(mLock);
1264    status_t result = UNKNOWN_ERROR;
1265
1266#if 1
1267    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1268    // while we are accessing the cblk
1269    sp<IAudioTrack> audioTrack = mAudioTrack;
1270    sp<IMemory> iMem = mCblkMemory;
1271#endif
1272
1273    // If the track is not invalid already, try to allocate a buffer.  alloc
1274    // fails indicating that the server is dead, flag the track as invalid so
1275    // we can attempt to restore in just a bit.
1276    audio_track_cblk_t* cblk = mCblk;
1277    if (!(cblk->mFlags & CBLK_INVALID)) {
1278        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1279        if (result == DEAD_OBJECT) {
1280            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1281        }
1282    }
1283
1284    // If the track is invalid at this point, attempt to restore it. and try the
1285    // allocation one more time.
1286    if (cblk->mFlags & CBLK_INVALID) {
1287        result = restoreTrack_l("allocateTimedBuffer");
1288
1289        if (result == NO_ERROR) {
1290            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1291        }
1292    }
1293
1294    return result;
1295}
1296
1297status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1298                                           int64_t pts)
1299{
1300    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1301    {
1302        AutoMutex lock(mLock);
1303        audio_track_cblk_t* cblk = mCblk;
1304        // restart track if it was disabled by audioflinger due to previous underrun
1305        if (buffer->size() != 0 && status == NO_ERROR &&
1306                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1307            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1308            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1309            // FIXME ignoring status
1310            mAudioTrack->start();
1311        }
1312    }
1313    return status;
1314}
1315
1316status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1317                                                TargetTimeline target)
1318{
1319    return mAudioTrack->setMediaTimeTransform(xform, target);
1320}
1321
1322// -------------------------------------------------------------------------
1323
1324nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1325{
1326    // Currently the AudioTrack thread is not created if there are no callbacks.
1327    // Would it ever make sense to run the thread, even without callbacks?
1328    // If so, then replace this by checks at each use for mCbf != NULL.
1329    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1330
1331    mLock.lock();
1332    if (mAwaitBoost) {
1333        mAwaitBoost = false;
1334        mLock.unlock();
1335        static const int32_t kMaxTries = 5;
1336        int32_t tryCounter = kMaxTries;
1337        uint32_t pollUs = 10000;
1338        do {
1339            int policy = sched_getscheduler(0);
1340            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1341                break;
1342            }
1343            usleep(pollUs);
1344            pollUs <<= 1;
1345        } while (tryCounter-- > 0);
1346        if (tryCounter < 0) {
1347            ALOGE("did not receive expected priority boost on time");
1348        }
1349        // Run again immediately
1350        return 0;
1351    }
1352
1353    // Can only reference mCblk while locked
1354    int32_t flags = android_atomic_and(
1355        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1356
1357    // Check for track invalidation
1358    if (flags & CBLK_INVALID) {
1359        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1360        // AudioSystem cache. We should not exit here but after calling the callback so
1361        // that the upper layers can recreate the track
1362        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1363            status_t status = restoreTrack_l("processAudioBuffer");
1364            mLock.unlock();
1365            // Run again immediately, but with a new IAudioTrack
1366            return 0;
1367        }
1368    }
1369
1370    bool waitStreamEnd = mState == STATE_STOPPING;
1371    bool active = mState == STATE_ACTIVE;
1372
1373    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1374    bool newUnderrun = false;
1375    if (flags & CBLK_UNDERRUN) {
1376#if 0
1377        // Currently in shared buffer mode, when the server reaches the end of buffer,
1378        // the track stays active in continuous underrun state.  It's up to the application
1379        // to pause or stop the track, or set the position to a new offset within buffer.
1380        // This was some experimental code to auto-pause on underrun.   Keeping it here
1381        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1382        if (mTransfer == TRANSFER_SHARED) {
1383            mState = STATE_PAUSED;
1384            active = false;
1385        }
1386#endif
1387        if (!mInUnderrun) {
1388            mInUnderrun = true;
1389            newUnderrun = true;
1390        }
1391    }
1392
1393    // Get current position of server
1394    size_t position = mProxy->getPosition();
1395
1396    // Manage marker callback
1397    bool markerReached = false;
1398    size_t markerPosition = mMarkerPosition;
1399    // FIXME fails for wraparound, need 64 bits
1400    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1401        mMarkerReached = markerReached = true;
1402    }
1403
1404    // Determine number of new position callback(s) that will be needed, while locked
1405    size_t newPosCount = 0;
1406    size_t newPosition = mNewPosition;
1407    size_t updatePeriod = mUpdatePeriod;
1408    // FIXME fails for wraparound, need 64 bits
1409    if (updatePeriod > 0 && position >= newPosition) {
1410        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1411        mNewPosition += updatePeriod * newPosCount;
1412    }
1413
1414    // Cache other fields that will be needed soon
1415    uint32_t loopPeriod = mLoopPeriod;
1416    uint32_t sampleRate = mSampleRate;
1417    size_t notificationFrames = mNotificationFramesAct;
1418    if (mRefreshRemaining) {
1419        mRefreshRemaining = false;
1420        mRemainingFrames = notificationFrames;
1421        mRetryOnPartialBuffer = false;
1422    }
1423    size_t misalignment = mProxy->getMisalignment();
1424    uint32_t sequence = mSequence;
1425
1426    // These fields don't need to be cached, because they are assigned only by set():
1427    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1428    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1429
1430    mLock.unlock();
1431
1432    if (waitStreamEnd) {
1433        AutoMutex lock(mLock);
1434
1435        sp<AudioTrackClientProxy> proxy = mProxy;
1436        sp<IMemory> iMem = mCblkMemory;
1437
1438        struct timespec timeout;
1439        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1440        timeout.tv_nsec = 0;
1441
1442        mLock.unlock();
1443        status_t status = mProxy->waitStreamEndDone(&timeout);
1444        mLock.lock();
1445        switch (status) {
1446        case NO_ERROR:
1447        case DEAD_OBJECT:
1448        case TIMED_OUT:
1449            mLock.unlock();
1450            mCbf(EVENT_STREAM_END, mUserData, NULL);
1451            mLock.lock();
1452            if (mState == STATE_STOPPING) {
1453                mState = STATE_STOPPED;
1454                if (status != DEAD_OBJECT) {
1455                   return NS_INACTIVE;
1456                }
1457            }
1458            return 0;
1459        default:
1460            return 0;
1461        }
1462    }
1463
1464    // perform callbacks while unlocked
1465    if (newUnderrun) {
1466        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1467    }
1468    // FIXME we will miss loops if loop cycle was signaled several times since last call
1469    //       to processAudioBuffer()
1470    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1471        mCbf(EVENT_LOOP_END, mUserData, NULL);
1472    }
1473    if (flags & CBLK_BUFFER_END) {
1474        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1475    }
1476    if (markerReached) {
1477        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1478    }
1479    while (newPosCount > 0) {
1480        size_t temp = newPosition;
1481        mCbf(EVENT_NEW_POS, mUserData, &temp);
1482        newPosition += updatePeriod;
1483        newPosCount--;
1484    }
1485
1486    if (mObservedSequence != sequence) {
1487        mObservedSequence = sequence;
1488        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1489        // for offloaded tracks, just wait for the upper layers to recreate the track
1490        if (isOffloaded()) {
1491            return NS_INACTIVE;
1492        }
1493    }
1494
1495    // if inactive, then don't run me again until re-started
1496    if (!active) {
1497        return NS_INACTIVE;
1498    }
1499
1500    // Compute the estimated time until the next timed event (position, markers, loops)
1501    // FIXME only for non-compressed audio
1502    uint32_t minFrames = ~0;
1503    if (!markerReached && position < markerPosition) {
1504        minFrames = markerPosition - position;
1505    }
1506    if (loopPeriod > 0 && loopPeriod < minFrames) {
1507        minFrames = loopPeriod;
1508    }
1509    if (updatePeriod > 0 && updatePeriod < minFrames) {
1510        minFrames = updatePeriod;
1511    }
1512
1513    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1514    static const uint32_t kPoll = 0;
1515    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1516        minFrames = kPoll * notificationFrames;
1517    }
1518
1519    // Convert frame units to time units
1520    nsecs_t ns = NS_WHENEVER;
1521    if (minFrames != (uint32_t) ~0) {
1522        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1523        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1524        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1525    }
1526
1527    // If not supplying data by EVENT_MORE_DATA, then we're done
1528    if (mTransfer != TRANSFER_CALLBACK) {
1529        return ns;
1530    }
1531
1532    struct timespec timeout;
1533    const struct timespec *requested = &ClientProxy::kForever;
1534    if (ns != NS_WHENEVER) {
1535        timeout.tv_sec = ns / 1000000000LL;
1536        timeout.tv_nsec = ns % 1000000000LL;
1537        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1538        requested = &timeout;
1539    }
1540
1541    while (mRemainingFrames > 0) {
1542
1543        Buffer audioBuffer;
1544        audioBuffer.frameCount = mRemainingFrames;
1545        size_t nonContig;
1546        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1547        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1548                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1549        requested = &ClientProxy::kNonBlocking;
1550        size_t avail = audioBuffer.frameCount + nonContig;
1551        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1552                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1553        if (err != NO_ERROR) {
1554            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1555                    (isOffloaded() && (err == DEAD_OBJECT))) {
1556                return 0;
1557            }
1558            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1559            return NS_NEVER;
1560        }
1561
1562        if (mRetryOnPartialBuffer && !isOffloaded()) {
1563            mRetryOnPartialBuffer = false;
1564            if (avail < mRemainingFrames) {
1565                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1566                if (ns < 0 || myns < ns) {
1567                    ns = myns;
1568                }
1569                return ns;
1570            }
1571        }
1572
1573        // Divide buffer size by 2 to take into account the expansion
1574        // due to 8 to 16 bit conversion: the callback must fill only half
1575        // of the destination buffer
1576        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1577            audioBuffer.size >>= 1;
1578        }
1579
1580        size_t reqSize = audioBuffer.size;
1581        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1582        size_t writtenSize = audioBuffer.size;
1583        size_t writtenFrames = writtenSize / mFrameSize;
1584
1585        // Sanity check on returned size
1586        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1587            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1588                    reqSize, (int) writtenSize);
1589            return NS_NEVER;
1590        }
1591
1592        if (writtenSize == 0) {
1593            // The callback is done filling buffers
1594            // Keep this thread going to handle timed events and
1595            // still try to get more data in intervals of WAIT_PERIOD_MS
1596            // but don't just loop and block the CPU, so wait
1597            return WAIT_PERIOD_MS * 1000000LL;
1598        }
1599
1600        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1601            // 8 to 16 bit conversion, note that source and destination are the same address
1602            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1603            audioBuffer.size <<= 1;
1604        }
1605
1606        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1607        audioBuffer.frameCount = releasedFrames;
1608        mRemainingFrames -= releasedFrames;
1609        if (misalignment >= releasedFrames) {
1610            misalignment -= releasedFrames;
1611        } else {
1612            misalignment = 0;
1613        }
1614
1615        releaseBuffer(&audioBuffer);
1616
1617        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1618        // if callback doesn't like to accept the full chunk
1619        if (writtenSize < reqSize) {
1620            continue;
1621        }
1622
1623        // There could be enough non-contiguous frames available to satisfy the remaining request
1624        if (mRemainingFrames <= nonContig) {
1625            continue;
1626        }
1627
1628#if 0
1629        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1630        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1631        // that total to a sum == notificationFrames.
1632        if (0 < misalignment && misalignment <= mRemainingFrames) {
1633            mRemainingFrames = misalignment;
1634            return (mRemainingFrames * 1100000000LL) / sampleRate;
1635        }
1636#endif
1637
1638    }
1639    mRemainingFrames = notificationFrames;
1640    mRetryOnPartialBuffer = true;
1641
1642    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1643    return 0;
1644}
1645
1646status_t AudioTrack::restoreTrack_l(const char *from)
1647{
1648    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1649          isOffloaded() ? "Offloaded" : "PCM", from);
1650    ++mSequence;
1651    status_t result;
1652
1653    // refresh the audio configuration cache in this process to make sure we get new
1654    // output parameters in getOutput_l() and createTrack_l()
1655    AudioSystem::clearAudioConfigCache();
1656
1657    if (isOffloaded()) {
1658        return DEAD_OBJECT;
1659    }
1660
1661    // force new output query from audio policy manager;
1662    mOutput = 0;
1663    audio_io_handle_t output = getOutput_l();
1664
1665    // if the new IAudioTrack is created, createTrack_l() will modify the
1666    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1667    // It will also delete the strong references on previous IAudioTrack and IMemory
1668
1669    // take the frames that will be lost by track recreation into account in saved position
1670    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1671    mNewPosition = position + mUpdatePeriod;
1672    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1673    result = createTrack_l(mStreamType,
1674                           mSampleRate,
1675                           mFormat,
1676                           mReqFrameCount,  // so that frame count never goes down
1677                           mFlags,
1678                           mSharedBuffer,
1679                           output,
1680                           position /*epoch*/);
1681
1682    if (result == NO_ERROR) {
1683        // continue playback from last known position, but
1684        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1685        if (mStaticProxy != NULL) {
1686            mLoopPeriod = 0;
1687            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1688        }
1689        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1690        //       track destruction have been played? This is critical for SoundPool implementation
1691        //       This must be broken, and needs to be tested/debugged.
1692#if 0
1693        // restore write index and set other indexes to reflect empty buffer status
1694        if (!strcmp(from, "start")) {
1695            // Make sure that a client relying on callback events indicating underrun or
1696            // the actual amount of audio frames played (e.g SoundPool) receives them.
1697            if (mSharedBuffer == 0) {
1698                // restart playback even if buffer is not completely filled.
1699                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1700            }
1701        }
1702#endif
1703        if (mState == STATE_ACTIVE) {
1704            result = mAudioTrack->start();
1705        }
1706    }
1707    if (result != NO_ERROR) {
1708        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1709        // As getOutput was called above and resulted in an output stream to be opened,
1710        // we need to release it.
1711        AudioSystem::releaseOutput(output);
1712        ALOGW("restoreTrack_l() failed status %d", result);
1713        mState = STATE_STOPPED;
1714    }
1715
1716    return result;
1717}
1718
1719status_t AudioTrack::setParameters(const String8& keyValuePairs)
1720{
1721    AutoMutex lock(mLock);
1722    return mAudioTrack->setParameters(keyValuePairs);
1723}
1724
1725status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1726{
1727    AutoMutex lock(mLock);
1728    // FIXME not implemented for fast tracks; should use proxy and SSQ
1729    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1730        return INVALID_OPERATION;
1731    }
1732    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1733        return INVALID_OPERATION;
1734    }
1735    status_t status = mAudioTrack->getTimestamp(timestamp);
1736    if (status == NO_ERROR) {
1737        timestamp.mPosition += mProxy->getEpoch();
1738    }
1739    return status;
1740}
1741
1742String8 AudioTrack::getParameters(const String8& keys)
1743{
1744    if (mOutput) {
1745        return AudioSystem::getParameters(mOutput, keys);
1746    } else {
1747        return String8::empty();
1748    }
1749}
1750
1751status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1752{
1753
1754    const size_t SIZE = 256;
1755    char buffer[SIZE];
1756    String8 result;
1757
1758    result.append(" AudioTrack::dump\n");
1759    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1760            mVolume[0], mVolume[1]);
1761    result.append(buffer);
1762    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1763            mChannelCount, mFrameCount);
1764    result.append(buffer);
1765    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1766    result.append(buffer);
1767    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1768    result.append(buffer);
1769    ::write(fd, result.string(), result.size());
1770    return NO_ERROR;
1771}
1772
1773uint32_t AudioTrack::getUnderrunFrames() const
1774{
1775    AutoMutex lock(mLock);
1776    return mProxy->getUnderrunFrames();
1777}
1778
1779// =========================================================================
1780
1781void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1782{
1783    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1784    if (audioTrack != 0) {
1785        AutoMutex lock(audioTrack->mLock);
1786        audioTrack->mProxy->binderDied();
1787    }
1788}
1789
1790// =========================================================================
1791
1792AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1793    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1794      mIgnoreNextPausedInt(false)
1795{
1796}
1797
1798AudioTrack::AudioTrackThread::~AudioTrackThread()
1799{
1800}
1801
1802bool AudioTrack::AudioTrackThread::threadLoop()
1803{
1804    {
1805        AutoMutex _l(mMyLock);
1806        if (mPaused) {
1807            mMyCond.wait(mMyLock);
1808            // caller will check for exitPending()
1809            return true;
1810        }
1811        if (mIgnoreNextPausedInt) {
1812            mIgnoreNextPausedInt = false;
1813            mPausedInt = false;
1814        }
1815        if (mPausedInt) {
1816            if (mPausedNs > 0) {
1817                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1818            } else {
1819                mMyCond.wait(mMyLock);
1820            }
1821            mPausedInt = false;
1822            return true;
1823        }
1824    }
1825    nsecs_t ns = mReceiver.processAudioBuffer(this);
1826    switch (ns) {
1827    case 0:
1828        return true;
1829    case NS_INACTIVE:
1830        pauseInternal();
1831        return true;
1832    case NS_NEVER:
1833        return false;
1834    case NS_WHENEVER:
1835        // FIXME increase poll interval, or make event-driven
1836        ns = 1000000000LL;
1837        // fall through
1838    default:
1839        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1840        pauseInternal(ns);
1841        return true;
1842    }
1843}
1844
1845void AudioTrack::AudioTrackThread::requestExit()
1846{
1847    // must be in this order to avoid a race condition
1848    Thread::requestExit();
1849    resume();
1850}
1851
1852void AudioTrack::AudioTrackThread::pause()
1853{
1854    AutoMutex _l(mMyLock);
1855    mPaused = true;
1856}
1857
1858void AudioTrack::AudioTrackThread::resume()
1859{
1860    AutoMutex _l(mMyLock);
1861    mIgnoreNextPausedInt = true;
1862    if (mPaused || mPausedInt) {
1863        mPaused = false;
1864        mPausedInt = false;
1865        mMyCond.signal();
1866    }
1867}
1868
1869void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1870{
1871    AutoMutex _l(mMyLock);
1872    mPausedInt = true;
1873    mPausedNs = ns;
1874}
1875
1876}; // namespace android
1877