AudioTrack.cpp revision 9cae217050aa1347d4ac5053c305754879e3f97f
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo,
105        int uid)
106    : mStatus(NO_INIT),
107      mIsTimed(false),
108      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
109      mPreviousSchedulingGroup(SP_DEFAULT)
110{
111    mStatus = set(streamType, sampleRate, format, channelMask,
112            frameCount, flags, cbf, user, notificationFrames,
113            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
114            offloadInfo, uid);
115}
116
117AudioTrack::AudioTrack(
118        audio_stream_type_t streamType,
119        uint32_t sampleRate,
120        audio_format_t format,
121        audio_channel_mask_t channelMask,
122        const sp<IMemory>& sharedBuffer,
123        audio_output_flags_t flags,
124        callback_t cbf,
125        void* user,
126        int notificationFrames,
127        int sessionId,
128        transfer_type transferType,
129        const audio_offload_info_t *offloadInfo,
130        int uid)
131    : mStatus(NO_INIT),
132      mIsTimed(false),
133      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
134      mPreviousSchedulingGroup(SP_DEFAULT)
135{
136    mStatus = set(streamType, sampleRate, format, channelMask,
137            0 /*frameCount*/, flags, cbf, user, notificationFrames,
138            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
139}
140
141AudioTrack::~AudioTrack()
142{
143    if (mStatus == NO_ERROR) {
144        // Make sure that callback function exits in the case where
145        // it is looping on buffer full condition in obtainBuffer().
146        // Otherwise the callback thread will never exit.
147        stop();
148        if (mAudioTrackThread != 0) {
149            mProxy->interrupt();
150            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
151            mAudioTrackThread->requestExitAndWait();
152            mAudioTrackThread.clear();
153        }
154        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
155        mAudioTrack.clear();
156        IPCThreadState::self()->flushCommands();
157        AudioSystem::releaseAudioSessionId(mSessionId);
158    }
159}
160
161status_t AudioTrack::set(
162        audio_stream_type_t streamType,
163        uint32_t sampleRate,
164        audio_format_t format,
165        audio_channel_mask_t channelMask,
166        int frameCountInt,
167        audio_output_flags_t flags,
168        callback_t cbf,
169        void* user,
170        int notificationFrames,
171        const sp<IMemory>& sharedBuffer,
172        bool threadCanCallJava,
173        int sessionId,
174        transfer_type transferType,
175        const audio_offload_info_t *offloadInfo,
176        int uid)
177{
178    switch (transferType) {
179    case TRANSFER_DEFAULT:
180        if (sharedBuffer != 0) {
181            transferType = TRANSFER_SHARED;
182        } else if (cbf == NULL || threadCanCallJava) {
183            transferType = TRANSFER_SYNC;
184        } else {
185            transferType = TRANSFER_CALLBACK;
186        }
187        break;
188    case TRANSFER_CALLBACK:
189        if (cbf == NULL || sharedBuffer != 0) {
190            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
191            return BAD_VALUE;
192        }
193        break;
194    case TRANSFER_OBTAIN:
195    case TRANSFER_SYNC:
196        if (sharedBuffer != 0) {
197            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
198            return BAD_VALUE;
199        }
200        break;
201    case TRANSFER_SHARED:
202        if (sharedBuffer == 0) {
203            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
204            return BAD_VALUE;
205        }
206        break;
207    default:
208        ALOGE("Invalid transfer type %d", transferType);
209        return BAD_VALUE;
210    }
211    mTransfer = transferType;
212
213    // FIXME "int" here is legacy and will be replaced by size_t later
214    if (frameCountInt < 0) {
215        ALOGE("Invalid frame count %d", frameCountInt);
216        return BAD_VALUE;
217    }
218    size_t frameCount = frameCountInt;
219
220    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
221            sharedBuffer->size());
222
223    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
224
225    AutoMutex lock(mLock);
226
227    // invariant that mAudioTrack != 0 is true only after set() returns successfully
228    if (mAudioTrack != 0) {
229        ALOGE("Track already in use");
230        return INVALID_OPERATION;
231    }
232
233    mOutput = 0;
234
235    // handle default values first.
236    if (streamType == AUDIO_STREAM_DEFAULT) {
237        streamType = AUDIO_STREAM_MUSIC;
238    }
239
240    if (sampleRate == 0) {
241        uint32_t afSampleRate;
242        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
243            return NO_INIT;
244        }
245        sampleRate = afSampleRate;
246    }
247    mSampleRate = sampleRate;
248
249    // these below should probably come from the audioFlinger too...
250    if (format == AUDIO_FORMAT_DEFAULT) {
251        format = AUDIO_FORMAT_PCM_16_BIT;
252    }
253    if (channelMask == 0) {
254        channelMask = AUDIO_CHANNEL_OUT_STEREO;
255    }
256
257    // validate parameters
258    if (!audio_is_valid_format(format)) {
259        ALOGE("Invalid format %d", format);
260        return BAD_VALUE;
261    }
262
263    // AudioFlinger does not currently support 8-bit data in shared memory
264    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
265        ALOGE("8-bit data in shared memory is not supported");
266        return BAD_VALUE;
267    }
268
269    // force direct flag if format is not linear PCM
270    // or offload was requested
271    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
272            || !audio_is_linear_pcm(format)) {
273        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
274                    ? "Offload request, forcing to Direct Output"
275                    : "Not linear PCM, forcing to Direct Output");
276        flags = (audio_output_flags_t)
277                // FIXME why can't we allow direct AND fast?
278                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
279    }
280    // only allow deep buffering for music stream type
281    if (streamType != AUDIO_STREAM_MUSIC) {
282        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
283    }
284
285    if (!audio_is_output_channel(channelMask)) {
286        ALOGE("Invalid channel mask %#x", channelMask);
287        return BAD_VALUE;
288    }
289    mChannelMask = channelMask;
290    uint32_t channelCount = popcount(channelMask);
291    mChannelCount = channelCount;
292
293    if (audio_is_linear_pcm(format)) {
294        mFrameSize = channelCount * audio_bytes_per_sample(format);
295        mFrameSizeAF = channelCount * sizeof(int16_t);
296    } else {
297        mFrameSize = sizeof(uint8_t);
298        mFrameSizeAF = sizeof(uint8_t);
299    }
300
301    audio_io_handle_t output = AudioSystem::getOutput(
302                                    streamType,
303                                    sampleRate, format, channelMask,
304                                    flags,
305                                    offloadInfo);
306
307    if (output == 0) {
308        ALOGE("Could not get audio output for stream type %d", streamType);
309        return BAD_VALUE;
310    }
311
312    mVolume[LEFT] = 1.0f;
313    mVolume[RIGHT] = 1.0f;
314    mSendLevel = 0.0f;
315    mFrameCount = frameCount;
316    mReqFrameCount = frameCount;
317    mNotificationFramesReq = notificationFrames;
318    mNotificationFramesAct = 0;
319    mSessionId = sessionId;
320    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
321        mClientUid = IPCThreadState::self()->getCallingUid();
322    } else {
323        mClientUid = uid;
324    }
325    mAuxEffectId = 0;
326    mFlags = flags;
327    mCbf = cbf;
328
329    if (cbf != NULL) {
330        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
331        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
332    }
333
334    // create the IAudioTrack
335    status_t status = createTrack_l(streamType,
336                                  sampleRate,
337                                  format,
338                                  frameCount,
339                                  flags,
340                                  sharedBuffer,
341                                  output,
342                                  0 /*epoch*/);
343
344    if (status != NO_ERROR) {
345        if (mAudioTrackThread != 0) {
346            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
347            mAudioTrackThread->requestExitAndWait();
348            mAudioTrackThread.clear();
349        }
350        //Use of direct and offloaded output streams is ref counted by audio policy manager.
351        // As getOutput was called above and resulted in an output stream to be opened,
352        // we need to release it.
353        AudioSystem::releaseOutput(output);
354        return status;
355    }
356
357    mStatus = NO_ERROR;
358    mStreamType = streamType;
359    mFormat = format;
360    mSharedBuffer = sharedBuffer;
361    mState = STATE_STOPPED;
362    mUserData = user;
363    mLoopPeriod = 0;
364    mMarkerPosition = 0;
365    mMarkerReached = false;
366    mNewPosition = 0;
367    mUpdatePeriod = 0;
368    AudioSystem::acquireAudioSessionId(mSessionId);
369    mSequence = 1;
370    mObservedSequence = mSequence;
371    mInUnderrun = false;
372    mOutput = output;
373
374    return NO_ERROR;
375}
376
377// -------------------------------------------------------------------------
378
379status_t AudioTrack::start()
380{
381    AutoMutex lock(mLock);
382
383    if (mState == STATE_ACTIVE) {
384        return INVALID_OPERATION;
385    }
386
387    mInUnderrun = true;
388
389    State previousState = mState;
390    if (previousState == STATE_PAUSED_STOPPING) {
391        mState = STATE_STOPPING;
392    } else {
393        mState = STATE_ACTIVE;
394    }
395    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
396        // reset current position as seen by client to 0
397        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
398        // force refresh of remaining frames by processAudioBuffer() as last
399        // write before stop could be partial.
400        mRefreshRemaining = true;
401    }
402    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
403    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
404
405    sp<AudioTrackThread> t = mAudioTrackThread;
406    if (t != 0) {
407        if (previousState == STATE_STOPPING) {
408            mProxy->interrupt();
409        } else {
410            t->resume();
411        }
412    } else {
413        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
414        get_sched_policy(0, &mPreviousSchedulingGroup);
415        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
416    }
417
418    status_t status = NO_ERROR;
419    if (!(flags & CBLK_INVALID)) {
420        status = mAudioTrack->start();
421        if (status == DEAD_OBJECT) {
422            flags |= CBLK_INVALID;
423        }
424    }
425    if (flags & CBLK_INVALID) {
426        status = restoreTrack_l("start");
427    }
428
429    if (status != NO_ERROR) {
430        ALOGE("start() status %d", status);
431        mState = previousState;
432        if (t != 0) {
433            if (previousState != STATE_STOPPING) {
434                t->pause();
435            }
436        } else {
437            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
438            set_sched_policy(0, mPreviousSchedulingGroup);
439        }
440    }
441
442    return status;
443}
444
445void AudioTrack::stop()
446{
447    AutoMutex lock(mLock);
448    // FIXME pause then stop should not be a nop
449    if (mState != STATE_ACTIVE) {
450        return;
451    }
452
453    if (isOffloaded()) {
454        mState = STATE_STOPPING;
455    } else {
456        mState = STATE_STOPPED;
457    }
458
459    mProxy->interrupt();
460    mAudioTrack->stop();
461    // the playback head position will reset to 0, so if a marker is set, we need
462    // to activate it again
463    mMarkerReached = false;
464#if 0
465    // Force flush if a shared buffer is used otherwise audioflinger
466    // will not stop before end of buffer is reached.
467    // It may be needed to make sure that we stop playback, likely in case looping is on.
468    if (mSharedBuffer != 0) {
469        flush_l();
470    }
471#endif
472
473    sp<AudioTrackThread> t = mAudioTrackThread;
474    if (t != 0) {
475        if (!isOffloaded()) {
476            t->pause();
477        }
478    } else {
479        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
480        set_sched_policy(0, mPreviousSchedulingGroup);
481    }
482}
483
484bool AudioTrack::stopped() const
485{
486    AutoMutex lock(mLock);
487    return mState != STATE_ACTIVE;
488}
489
490void AudioTrack::flush()
491{
492    if (mSharedBuffer != 0) {
493        return;
494    }
495    AutoMutex lock(mLock);
496    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
497        return;
498    }
499    flush_l();
500}
501
502void AudioTrack::flush_l()
503{
504    ALOG_ASSERT(mState != STATE_ACTIVE);
505
506    // clear playback marker and periodic update counter
507    mMarkerPosition = 0;
508    mMarkerReached = false;
509    mUpdatePeriod = 0;
510    mRefreshRemaining = true;
511
512    mState = STATE_FLUSHED;
513    if (isOffloaded()) {
514        mProxy->interrupt();
515    }
516    mProxy->flush();
517    mAudioTrack->flush();
518}
519
520void AudioTrack::pause()
521{
522    AutoMutex lock(mLock);
523    if (mState == STATE_ACTIVE) {
524        mState = STATE_PAUSED;
525    } else if (mState == STATE_STOPPING) {
526        mState = STATE_PAUSED_STOPPING;
527    } else {
528        return;
529    }
530    mProxy->interrupt();
531    mAudioTrack->pause();
532}
533
534status_t AudioTrack::setVolume(float left, float right)
535{
536    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
537        return BAD_VALUE;
538    }
539
540    AutoMutex lock(mLock);
541    mVolume[LEFT] = left;
542    mVolume[RIGHT] = right;
543
544    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
545
546    if (isOffloaded()) {
547        mAudioTrack->signal();
548    }
549    return NO_ERROR;
550}
551
552status_t AudioTrack::setVolume(float volume)
553{
554    return setVolume(volume, volume);
555}
556
557status_t AudioTrack::setAuxEffectSendLevel(float level)
558{
559    if (level < 0.0f || level > 1.0f) {
560        return BAD_VALUE;
561    }
562
563    AutoMutex lock(mLock);
564    mSendLevel = level;
565    mProxy->setSendLevel(level);
566
567    return NO_ERROR;
568}
569
570void AudioTrack::getAuxEffectSendLevel(float* level) const
571{
572    if (level != NULL) {
573        *level = mSendLevel;
574    }
575}
576
577status_t AudioTrack::setSampleRate(uint32_t rate)
578{
579    if (mIsTimed || isOffloaded()) {
580        return INVALID_OPERATION;
581    }
582
583    uint32_t afSamplingRate;
584    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
585        return NO_INIT;
586    }
587    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
588    if (rate == 0 || rate > afSamplingRate*2 ) {
589        return BAD_VALUE;
590    }
591
592    AutoMutex lock(mLock);
593    mSampleRate = rate;
594    mProxy->setSampleRate(rate);
595
596    return NO_ERROR;
597}
598
599uint32_t AudioTrack::getSampleRate() const
600{
601    if (mIsTimed) {
602        return 0;
603    }
604
605    AutoMutex lock(mLock);
606    return mSampleRate;
607}
608
609status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
610{
611    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
612        return INVALID_OPERATION;
613    }
614
615    if (loopCount == 0) {
616        ;
617    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
618            loopEnd - loopStart >= MIN_LOOP) {
619        ;
620    } else {
621        return BAD_VALUE;
622    }
623
624    AutoMutex lock(mLock);
625    // See setPosition() regarding setting parameters such as loop points or position while active
626    if (mState == STATE_ACTIVE) {
627        return INVALID_OPERATION;
628    }
629    setLoop_l(loopStart, loopEnd, loopCount);
630    return NO_ERROR;
631}
632
633void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
634{
635    // FIXME If setting a loop also sets position to start of loop, then
636    //       this is correct.  Otherwise it should be removed.
637    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
638    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
639    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
640}
641
642status_t AudioTrack::setMarkerPosition(uint32_t marker)
643{
644    // The only purpose of setting marker position is to get a callback
645    if (mCbf == NULL || isOffloaded()) {
646        return INVALID_OPERATION;
647    }
648
649    AutoMutex lock(mLock);
650    mMarkerPosition = marker;
651    mMarkerReached = false;
652
653    return NO_ERROR;
654}
655
656status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
657{
658    if (isOffloaded()) {
659        return INVALID_OPERATION;
660    }
661    if (marker == NULL) {
662        return BAD_VALUE;
663    }
664
665    AutoMutex lock(mLock);
666    *marker = mMarkerPosition;
667
668    return NO_ERROR;
669}
670
671status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
672{
673    // The only purpose of setting position update period is to get a callback
674    if (mCbf == NULL || isOffloaded()) {
675        return INVALID_OPERATION;
676    }
677
678    AutoMutex lock(mLock);
679    mNewPosition = mProxy->getPosition() + updatePeriod;
680    mUpdatePeriod = updatePeriod;
681    return NO_ERROR;
682}
683
684status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
685{
686    if (isOffloaded()) {
687        return INVALID_OPERATION;
688    }
689    if (updatePeriod == NULL) {
690        return BAD_VALUE;
691    }
692
693    AutoMutex lock(mLock);
694    *updatePeriod = mUpdatePeriod;
695
696    return NO_ERROR;
697}
698
699status_t AudioTrack::setPosition(uint32_t position)
700{
701    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
702        return INVALID_OPERATION;
703    }
704    if (position > mFrameCount) {
705        return BAD_VALUE;
706    }
707
708    AutoMutex lock(mLock);
709    // Currently we require that the player is inactive before setting parameters such as position
710    // or loop points.  Otherwise, there could be a race condition: the application could read the
711    // current position, compute a new position or loop parameters, and then set that position or
712    // loop parameters but it would do the "wrong" thing since the position has continued to advance
713    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
714    // to specify how it wants to handle such scenarios.
715    if (mState == STATE_ACTIVE) {
716        return INVALID_OPERATION;
717    }
718    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
719    mLoopPeriod = 0;
720    // FIXME Check whether loops and setting position are incompatible in old code.
721    // If we use setLoop for both purposes we lose the capability to set the position while looping.
722    mStaticProxy->setLoop(position, mFrameCount, 0);
723
724    return NO_ERROR;
725}
726
727status_t AudioTrack::getPosition(uint32_t *position) const
728{
729    if (position == NULL) {
730        return BAD_VALUE;
731    }
732
733    AutoMutex lock(mLock);
734    if (isOffloaded()) {
735        uint32_t dspFrames = 0;
736
737        if (mOutput != 0) {
738            uint32_t halFrames;
739            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
740        }
741        *position = dspFrames;
742    } else {
743        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
744        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
745                mProxy->getPosition();
746    }
747    return NO_ERROR;
748}
749
750status_t AudioTrack::getBufferPosition(size_t *position)
751{
752    if (mSharedBuffer == 0 || mIsTimed) {
753        return INVALID_OPERATION;
754    }
755    if (position == NULL) {
756        return BAD_VALUE;
757    }
758
759    AutoMutex lock(mLock);
760    *position = mStaticProxy->getBufferPosition();
761    return NO_ERROR;
762}
763
764status_t AudioTrack::reload()
765{
766    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
767        return INVALID_OPERATION;
768    }
769
770    AutoMutex lock(mLock);
771    // See setPosition() regarding setting parameters such as loop points or position while active
772    if (mState == STATE_ACTIVE) {
773        return INVALID_OPERATION;
774    }
775    mNewPosition = mUpdatePeriod;
776    mLoopPeriod = 0;
777    // FIXME The new code cannot reload while keeping a loop specified.
778    // Need to check how the old code handled this, and whether it's a significant change.
779    mStaticProxy->setLoop(0, mFrameCount, 0);
780    return NO_ERROR;
781}
782
783audio_io_handle_t AudioTrack::getOutput()
784{
785    AutoMutex lock(mLock);
786    return mOutput;
787}
788
789// must be called with mLock held
790audio_io_handle_t AudioTrack::getOutput_l()
791{
792    if (mOutput) {
793        return mOutput;
794    } else {
795        return AudioSystem::getOutput(mStreamType,
796                                      mSampleRate, mFormat, mChannelMask, mFlags);
797    }
798}
799
800status_t AudioTrack::attachAuxEffect(int effectId)
801{
802    AutoMutex lock(mLock);
803    status_t status = mAudioTrack->attachAuxEffect(effectId);
804    if (status == NO_ERROR) {
805        mAuxEffectId = effectId;
806    }
807    return status;
808}
809
810// -------------------------------------------------------------------------
811
812// must be called with mLock held
813status_t AudioTrack::createTrack_l(
814        audio_stream_type_t streamType,
815        uint32_t sampleRate,
816        audio_format_t format,
817        size_t frameCount,
818        audio_output_flags_t flags,
819        const sp<IMemory>& sharedBuffer,
820        audio_io_handle_t output,
821        size_t epoch)
822{
823    status_t status;
824    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
825    if (audioFlinger == 0) {
826        ALOGE("Could not get audioflinger");
827        return NO_INIT;
828    }
829
830    // Not all of these values are needed under all conditions, but it is easier to get them all
831
832    uint32_t afLatency;
833    status = AudioSystem::getLatency(output, streamType, &afLatency);
834    if (status != NO_ERROR) {
835        ALOGE("getLatency(%d) failed status %d", output, status);
836        return NO_INIT;
837    }
838
839    size_t afFrameCount;
840    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
841    if (status != NO_ERROR) {
842        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
843        return NO_INIT;
844    }
845
846    uint32_t afSampleRate;
847    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
848    if (status != NO_ERROR) {
849        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
850        return NO_INIT;
851    }
852
853    // Client decides whether the track is TIMED (see below), but can only express a preference
854    // for FAST.  Server will perform additional tests.
855    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
856            // either of these use cases:
857            // use case 1: shared buffer
858            (sharedBuffer != 0) ||
859            // use case 2: callback handler
860            (mCbf != NULL))) {
861        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
862        // once denied, do not request again if IAudioTrack is re-created
863        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
864        mFlags = flags;
865    }
866    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
867
868    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
869    //  n = 1   fast track; nBuffering is ignored
870    //  n = 2   normal track, no sample rate conversion
871    //  n = 3   normal track, with sample rate conversion
872    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
873    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
874    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
875
876    mNotificationFramesAct = mNotificationFramesReq;
877
878    if (!audio_is_linear_pcm(format)) {
879
880        if (sharedBuffer != 0) {
881            // Same comment as below about ignoring frameCount parameter for set()
882            frameCount = sharedBuffer->size();
883        } else if (frameCount == 0) {
884            frameCount = afFrameCount;
885        }
886        if (mNotificationFramesAct != frameCount) {
887            mNotificationFramesAct = frameCount;
888        }
889    } else if (sharedBuffer != 0) {
890
891        // Ensure that buffer alignment matches channel count
892        // 8-bit data in shared memory is not currently supported by AudioFlinger
893        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
894        if (mChannelCount > 1) {
895            // More than 2 channels does not require stronger alignment than stereo
896            alignment <<= 1;
897        }
898        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
899            ALOGE("Invalid buffer alignment: address %p, channel count %u",
900                    sharedBuffer->pointer(), mChannelCount);
901            return BAD_VALUE;
902        }
903
904        // When initializing a shared buffer AudioTrack via constructors,
905        // there's no frameCount parameter.
906        // But when initializing a shared buffer AudioTrack via set(),
907        // there _is_ a frameCount parameter.  We silently ignore it.
908        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
909
910    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
911
912        // FIXME move these calculations and associated checks to server
913
914        // Ensure that buffer depth covers at least audio hardware latency
915        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
916        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
917                afFrameCount, minBufCount, afSampleRate, afLatency);
918        if (minBufCount <= nBuffering) {
919            minBufCount = nBuffering;
920        }
921
922        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
923        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
924                ", afLatency=%d",
925                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
926
927        if (frameCount == 0) {
928            frameCount = minFrameCount;
929        } else if (frameCount < minFrameCount) {
930            // not ALOGW because it happens all the time when playing key clicks over A2DP
931            ALOGV("Minimum buffer size corrected from %d to %d",
932                     frameCount, minFrameCount);
933            frameCount = minFrameCount;
934        }
935        // Make sure that application is notified with sufficient margin before underrun
936        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
937            mNotificationFramesAct = frameCount/nBuffering;
938        }
939
940    } else {
941        // For fast tracks, the frame count calculations and checks are done by server
942    }
943
944    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
945    if (mIsTimed) {
946        trackFlags |= IAudioFlinger::TRACK_TIMED;
947    }
948
949    pid_t tid = -1;
950    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
951        trackFlags |= IAudioFlinger::TRACK_FAST;
952        if (mAudioTrackThread != 0) {
953            tid = mAudioTrackThread->getTid();
954        }
955    }
956
957    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
958        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
959    }
960
961    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
962                                                      sampleRate,
963                                                      // AudioFlinger only sees 16-bit PCM
964                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
965                                                              AUDIO_FORMAT_PCM_16_BIT : format,
966                                                      mChannelMask,
967                                                      frameCount,
968                                                      &trackFlags,
969                                                      sharedBuffer,
970                                                      output,
971                                                      tid,
972                                                      &mSessionId,
973                                                      mName,
974                                                      mClientUid,
975                                                      &status);
976
977    if (track == 0) {
978        ALOGE("AudioFlinger could not create track, status: %d", status);
979        return status;
980    }
981    sp<IMemory> iMem = track->getCblk();
982    if (iMem == 0) {
983        ALOGE("Could not get control block");
984        return NO_INIT;
985    }
986    // invariant that mAudioTrack != 0 is true only after set() returns successfully
987    if (mAudioTrack != 0) {
988        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
989        mDeathNotifier.clear();
990    }
991    mAudioTrack = track;
992    mCblkMemory = iMem;
993    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
994    mCblk = cblk;
995    size_t temp = cblk->frameCount_;
996    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
997        // In current design, AudioTrack client checks and ensures frame count validity before
998        // passing it to AudioFlinger so AudioFlinger should not return a different value except
999        // for fast track as it uses a special method of assigning frame count.
1000        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1001    }
1002    frameCount = temp;
1003    mAwaitBoost = false;
1004    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1005        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1006            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1007            mAwaitBoost = true;
1008            if (sharedBuffer == 0) {
1009                // double-buffering is not required for fast tracks, due to tighter scheduling
1010                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
1011                    mNotificationFramesAct = frameCount;
1012                }
1013            }
1014        } else {
1015            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1016            // once denied, do not request again if IAudioTrack is re-created
1017            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1018            mFlags = flags;
1019            if (sharedBuffer == 0) {
1020                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1021                    mNotificationFramesAct = frameCount/nBuffering;
1022                }
1023            }
1024        }
1025    }
1026    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1027        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1028            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1029        } else {
1030            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1031            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1032            mFlags = flags;
1033            return NO_INIT;
1034        }
1035    }
1036
1037    mRefreshRemaining = true;
1038
1039    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1040    // is the value of pointer() for the shared buffer, otherwise buffers points
1041    // immediately after the control block.  This address is for the mapping within client
1042    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1043    void* buffers;
1044    if (sharedBuffer == 0) {
1045        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1046    } else {
1047        buffers = sharedBuffer->pointer();
1048    }
1049
1050    mAudioTrack->attachAuxEffect(mAuxEffectId);
1051    // FIXME don't believe this lie
1052    mLatency = afLatency + (1000*frameCount) / sampleRate;
1053    mFrameCount = frameCount;
1054    // If IAudioTrack is re-created, don't let the requested frameCount
1055    // decrease.  This can confuse clients that cache frameCount().
1056    if (frameCount > mReqFrameCount) {
1057        mReqFrameCount = frameCount;
1058    }
1059
1060    // update proxy
1061    if (sharedBuffer == 0) {
1062        mStaticProxy.clear();
1063        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1064    } else {
1065        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1066        mProxy = mStaticProxy;
1067    }
1068    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1069            uint16_t(mVolume[LEFT] * 0x1000));
1070    mProxy->setSendLevel(mSendLevel);
1071    mProxy->setSampleRate(mSampleRate);
1072    mProxy->setEpoch(epoch);
1073    mProxy->setMinimum(mNotificationFramesAct);
1074
1075    mDeathNotifier = new DeathNotifier(this);
1076    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1077
1078    return NO_ERROR;
1079}
1080
1081status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1082{
1083    if (audioBuffer == NULL) {
1084        return BAD_VALUE;
1085    }
1086    if (mTransfer != TRANSFER_OBTAIN) {
1087        audioBuffer->frameCount = 0;
1088        audioBuffer->size = 0;
1089        audioBuffer->raw = NULL;
1090        return INVALID_OPERATION;
1091    }
1092
1093    const struct timespec *requested;
1094    if (waitCount == -1) {
1095        requested = &ClientProxy::kForever;
1096    } else if (waitCount == 0) {
1097        requested = &ClientProxy::kNonBlocking;
1098    } else if (waitCount > 0) {
1099        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1100        struct timespec timeout;
1101        timeout.tv_sec = ms / 1000;
1102        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1103        requested = &timeout;
1104    } else {
1105        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1106        requested = NULL;
1107    }
1108    return obtainBuffer(audioBuffer, requested);
1109}
1110
1111status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1112        struct timespec *elapsed, size_t *nonContig)
1113{
1114    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1115    uint32_t oldSequence = 0;
1116    uint32_t newSequence;
1117
1118    Proxy::Buffer buffer;
1119    status_t status = NO_ERROR;
1120
1121    static const int32_t kMaxTries = 5;
1122    int32_t tryCounter = kMaxTries;
1123
1124    do {
1125        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1126        // keep them from going away if another thread re-creates the track during obtainBuffer()
1127        sp<AudioTrackClientProxy> proxy;
1128        sp<IMemory> iMem;
1129
1130        {   // start of lock scope
1131            AutoMutex lock(mLock);
1132
1133            newSequence = mSequence;
1134            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1135            if (status == DEAD_OBJECT) {
1136                // re-create track, unless someone else has already done so
1137                if (newSequence == oldSequence) {
1138                    status = restoreTrack_l("obtainBuffer");
1139                    if (status != NO_ERROR) {
1140                        buffer.mFrameCount = 0;
1141                        buffer.mRaw = NULL;
1142                        buffer.mNonContig = 0;
1143                        break;
1144                    }
1145                }
1146            }
1147            oldSequence = newSequence;
1148
1149            // Keep the extra references
1150            proxy = mProxy;
1151            iMem = mCblkMemory;
1152
1153            if (mState == STATE_STOPPING) {
1154                status = -EINTR;
1155                buffer.mFrameCount = 0;
1156                buffer.mRaw = NULL;
1157                buffer.mNonContig = 0;
1158                break;
1159            }
1160
1161            // Non-blocking if track is stopped or paused
1162            if (mState != STATE_ACTIVE) {
1163                requested = &ClientProxy::kNonBlocking;
1164            }
1165
1166        }   // end of lock scope
1167
1168        buffer.mFrameCount = audioBuffer->frameCount;
1169        // FIXME starts the requested timeout and elapsed over from scratch
1170        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1171
1172    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1173
1174    audioBuffer->frameCount = buffer.mFrameCount;
1175    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1176    audioBuffer->raw = buffer.mRaw;
1177    if (nonContig != NULL) {
1178        *nonContig = buffer.mNonContig;
1179    }
1180    return status;
1181}
1182
1183void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1184{
1185    if (mTransfer == TRANSFER_SHARED) {
1186        return;
1187    }
1188
1189    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1190    if (stepCount == 0) {
1191        return;
1192    }
1193
1194    Proxy::Buffer buffer;
1195    buffer.mFrameCount = stepCount;
1196    buffer.mRaw = audioBuffer->raw;
1197
1198    AutoMutex lock(mLock);
1199    mInUnderrun = false;
1200    mProxy->releaseBuffer(&buffer);
1201
1202    // restart track if it was disabled by audioflinger due to previous underrun
1203    if (mState == STATE_ACTIVE) {
1204        audio_track_cblk_t* cblk = mCblk;
1205        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1206            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1207                    this, mName.string());
1208            // FIXME ignoring status
1209            mAudioTrack->start();
1210        }
1211    }
1212}
1213
1214// -------------------------------------------------------------------------
1215
1216ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1217{
1218    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1219        return INVALID_OPERATION;
1220    }
1221
1222    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1223        // Sanity-check: user is most-likely passing an error code, and it would
1224        // make the return value ambiguous (actualSize vs error).
1225        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1226        return BAD_VALUE;
1227    }
1228
1229    size_t written = 0;
1230    Buffer audioBuffer;
1231
1232    while (userSize >= mFrameSize) {
1233        audioBuffer.frameCount = userSize / mFrameSize;
1234
1235        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1236        if (err < 0) {
1237            if (written > 0) {
1238                break;
1239            }
1240            return ssize_t(err);
1241        }
1242
1243        size_t toWrite;
1244        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1245            // Divide capacity by 2 to take expansion into account
1246            toWrite = audioBuffer.size >> 1;
1247            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1248        } else {
1249            toWrite = audioBuffer.size;
1250            memcpy(audioBuffer.i8, buffer, toWrite);
1251        }
1252        buffer = ((const char *) buffer) + toWrite;
1253        userSize -= toWrite;
1254        written += toWrite;
1255
1256        releaseBuffer(&audioBuffer);
1257    }
1258
1259    return written;
1260}
1261
1262// -------------------------------------------------------------------------
1263
1264TimedAudioTrack::TimedAudioTrack() {
1265    mIsTimed = true;
1266}
1267
1268status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1269{
1270    AutoMutex lock(mLock);
1271    status_t result = UNKNOWN_ERROR;
1272
1273#if 1
1274    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1275    // while we are accessing the cblk
1276    sp<IAudioTrack> audioTrack = mAudioTrack;
1277    sp<IMemory> iMem = mCblkMemory;
1278#endif
1279
1280    // If the track is not invalid already, try to allocate a buffer.  alloc
1281    // fails indicating that the server is dead, flag the track as invalid so
1282    // we can attempt to restore in just a bit.
1283    audio_track_cblk_t* cblk = mCblk;
1284    if (!(cblk->mFlags & CBLK_INVALID)) {
1285        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1286        if (result == DEAD_OBJECT) {
1287            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1288        }
1289    }
1290
1291    // If the track is invalid at this point, attempt to restore it. and try the
1292    // allocation one more time.
1293    if (cblk->mFlags & CBLK_INVALID) {
1294        result = restoreTrack_l("allocateTimedBuffer");
1295
1296        if (result == NO_ERROR) {
1297            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1298        }
1299    }
1300
1301    return result;
1302}
1303
1304status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1305                                           int64_t pts)
1306{
1307    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1308    {
1309        AutoMutex lock(mLock);
1310        audio_track_cblk_t* cblk = mCblk;
1311        // restart track if it was disabled by audioflinger due to previous underrun
1312        if (buffer->size() != 0 && status == NO_ERROR &&
1313                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1314            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1315            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1316            // FIXME ignoring status
1317            mAudioTrack->start();
1318        }
1319    }
1320    return status;
1321}
1322
1323status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1324                                                TargetTimeline target)
1325{
1326    return mAudioTrack->setMediaTimeTransform(xform, target);
1327}
1328
1329// -------------------------------------------------------------------------
1330
1331nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1332{
1333    // Currently the AudioTrack thread is not created if there are no callbacks.
1334    // Would it ever make sense to run the thread, even without callbacks?
1335    // If so, then replace this by checks at each use for mCbf != NULL.
1336    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1337
1338    mLock.lock();
1339    if (mAwaitBoost) {
1340        mAwaitBoost = false;
1341        mLock.unlock();
1342        static const int32_t kMaxTries = 5;
1343        int32_t tryCounter = kMaxTries;
1344        uint32_t pollUs = 10000;
1345        do {
1346            int policy = sched_getscheduler(0);
1347            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1348                break;
1349            }
1350            usleep(pollUs);
1351            pollUs <<= 1;
1352        } while (tryCounter-- > 0);
1353        if (tryCounter < 0) {
1354            ALOGE("did not receive expected priority boost on time");
1355        }
1356        // Run again immediately
1357        return 0;
1358    }
1359
1360    // Can only reference mCblk while locked
1361    int32_t flags = android_atomic_and(
1362        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1363
1364    // Check for track invalidation
1365    if (flags & CBLK_INVALID) {
1366        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1367        // AudioSystem cache. We should not exit here but after calling the callback so
1368        // that the upper layers can recreate the track
1369        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1370            status_t status = restoreTrack_l("processAudioBuffer");
1371            mLock.unlock();
1372            // Run again immediately, but with a new IAudioTrack
1373            return 0;
1374        }
1375    }
1376
1377    bool waitStreamEnd = mState == STATE_STOPPING;
1378    bool active = mState == STATE_ACTIVE;
1379
1380    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1381    bool newUnderrun = false;
1382    if (flags & CBLK_UNDERRUN) {
1383#if 0
1384        // Currently in shared buffer mode, when the server reaches the end of buffer,
1385        // the track stays active in continuous underrun state.  It's up to the application
1386        // to pause or stop the track, or set the position to a new offset within buffer.
1387        // This was some experimental code to auto-pause on underrun.   Keeping it here
1388        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1389        if (mTransfer == TRANSFER_SHARED) {
1390            mState = STATE_PAUSED;
1391            active = false;
1392        }
1393#endif
1394        if (!mInUnderrun) {
1395            mInUnderrun = true;
1396            newUnderrun = true;
1397        }
1398    }
1399
1400    // Get current position of server
1401    size_t position = mProxy->getPosition();
1402
1403    // Manage marker callback
1404    bool markerReached = false;
1405    size_t markerPosition = mMarkerPosition;
1406    // FIXME fails for wraparound, need 64 bits
1407    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1408        mMarkerReached = markerReached = true;
1409    }
1410
1411    // Determine number of new position callback(s) that will be needed, while locked
1412    size_t newPosCount = 0;
1413    size_t newPosition = mNewPosition;
1414    size_t updatePeriod = mUpdatePeriod;
1415    // FIXME fails for wraparound, need 64 bits
1416    if (updatePeriod > 0 && position >= newPosition) {
1417        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1418        mNewPosition += updatePeriod * newPosCount;
1419    }
1420
1421    // Cache other fields that will be needed soon
1422    uint32_t loopPeriod = mLoopPeriod;
1423    uint32_t sampleRate = mSampleRate;
1424    size_t notificationFrames = mNotificationFramesAct;
1425    if (mRefreshRemaining) {
1426        mRefreshRemaining = false;
1427        mRemainingFrames = notificationFrames;
1428        mRetryOnPartialBuffer = false;
1429    }
1430    size_t misalignment = mProxy->getMisalignment();
1431    uint32_t sequence = mSequence;
1432
1433    // These fields don't need to be cached, because they are assigned only by set():
1434    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1435    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1436
1437    mLock.unlock();
1438
1439    if (waitStreamEnd) {
1440        AutoMutex lock(mLock);
1441
1442        sp<AudioTrackClientProxy> proxy = mProxy;
1443        sp<IMemory> iMem = mCblkMemory;
1444
1445        struct timespec timeout;
1446        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1447        timeout.tv_nsec = 0;
1448
1449        mLock.unlock();
1450        status_t status = mProxy->waitStreamEndDone(&timeout);
1451        mLock.lock();
1452        switch (status) {
1453        case NO_ERROR:
1454        case DEAD_OBJECT:
1455        case TIMED_OUT:
1456            mLock.unlock();
1457            mCbf(EVENT_STREAM_END, mUserData, NULL);
1458            mLock.lock();
1459            if (mState == STATE_STOPPING) {
1460                mState = STATE_STOPPED;
1461                if (status != DEAD_OBJECT) {
1462                   return NS_INACTIVE;
1463                }
1464            }
1465            return 0;
1466        default:
1467            return 0;
1468        }
1469    }
1470
1471    // perform callbacks while unlocked
1472    if (newUnderrun) {
1473        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1474    }
1475    // FIXME we will miss loops if loop cycle was signaled several times since last call
1476    //       to processAudioBuffer()
1477    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1478        mCbf(EVENT_LOOP_END, mUserData, NULL);
1479    }
1480    if (flags & CBLK_BUFFER_END) {
1481        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1482    }
1483    if (markerReached) {
1484        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1485    }
1486    while (newPosCount > 0) {
1487        size_t temp = newPosition;
1488        mCbf(EVENT_NEW_POS, mUserData, &temp);
1489        newPosition += updatePeriod;
1490        newPosCount--;
1491    }
1492
1493    if (mObservedSequence != sequence) {
1494        mObservedSequence = sequence;
1495        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1496        // for offloaded tracks, just wait for the upper layers to recreate the track
1497        if (isOffloaded()) {
1498            return NS_INACTIVE;
1499        }
1500    }
1501
1502    // if inactive, then don't run me again until re-started
1503    if (!active) {
1504        return NS_INACTIVE;
1505    }
1506
1507    // Compute the estimated time until the next timed event (position, markers, loops)
1508    // FIXME only for non-compressed audio
1509    uint32_t minFrames = ~0;
1510    if (!markerReached && position < markerPosition) {
1511        minFrames = markerPosition - position;
1512    }
1513    if (loopPeriod > 0 && loopPeriod < minFrames) {
1514        minFrames = loopPeriod;
1515    }
1516    if (updatePeriod > 0 && updatePeriod < minFrames) {
1517        minFrames = updatePeriod;
1518    }
1519
1520    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1521    static const uint32_t kPoll = 0;
1522    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1523        minFrames = kPoll * notificationFrames;
1524    }
1525
1526    // Convert frame units to time units
1527    nsecs_t ns = NS_WHENEVER;
1528    if (minFrames != (uint32_t) ~0) {
1529        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1530        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1531        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1532    }
1533
1534    // If not supplying data by EVENT_MORE_DATA, then we're done
1535    if (mTransfer != TRANSFER_CALLBACK) {
1536        return ns;
1537    }
1538
1539    struct timespec timeout;
1540    const struct timespec *requested = &ClientProxy::kForever;
1541    if (ns != NS_WHENEVER) {
1542        timeout.tv_sec = ns / 1000000000LL;
1543        timeout.tv_nsec = ns % 1000000000LL;
1544        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1545        requested = &timeout;
1546    }
1547
1548    while (mRemainingFrames > 0) {
1549
1550        Buffer audioBuffer;
1551        audioBuffer.frameCount = mRemainingFrames;
1552        size_t nonContig;
1553        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1554        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1555                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1556        requested = &ClientProxy::kNonBlocking;
1557        size_t avail = audioBuffer.frameCount + nonContig;
1558        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1559                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1560        if (err != NO_ERROR) {
1561            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1562                    (isOffloaded() && (err == DEAD_OBJECT))) {
1563                return 0;
1564            }
1565            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1566            return NS_NEVER;
1567        }
1568
1569        if (mRetryOnPartialBuffer && !isOffloaded()) {
1570            mRetryOnPartialBuffer = false;
1571            if (avail < mRemainingFrames) {
1572                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1573                if (ns < 0 || myns < ns) {
1574                    ns = myns;
1575                }
1576                return ns;
1577            }
1578        }
1579
1580        // Divide buffer size by 2 to take into account the expansion
1581        // due to 8 to 16 bit conversion: the callback must fill only half
1582        // of the destination buffer
1583        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1584            audioBuffer.size >>= 1;
1585        }
1586
1587        size_t reqSize = audioBuffer.size;
1588        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1589        size_t writtenSize = audioBuffer.size;
1590        size_t writtenFrames = writtenSize / mFrameSize;
1591
1592        // Sanity check on returned size
1593        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1594            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1595                    reqSize, (int) writtenSize);
1596            return NS_NEVER;
1597        }
1598
1599        if (writtenSize == 0) {
1600            // The callback is done filling buffers
1601            // Keep this thread going to handle timed events and
1602            // still try to get more data in intervals of WAIT_PERIOD_MS
1603            // but don't just loop and block the CPU, so wait
1604            return WAIT_PERIOD_MS * 1000000LL;
1605        }
1606
1607        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1608            // 8 to 16 bit conversion, note that source and destination are the same address
1609            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1610            audioBuffer.size <<= 1;
1611        }
1612
1613        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1614        audioBuffer.frameCount = releasedFrames;
1615        mRemainingFrames -= releasedFrames;
1616        if (misalignment >= releasedFrames) {
1617            misalignment -= releasedFrames;
1618        } else {
1619            misalignment = 0;
1620        }
1621
1622        releaseBuffer(&audioBuffer);
1623
1624        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1625        // if callback doesn't like to accept the full chunk
1626        if (writtenSize < reqSize) {
1627            continue;
1628        }
1629
1630        // There could be enough non-contiguous frames available to satisfy the remaining request
1631        if (mRemainingFrames <= nonContig) {
1632            continue;
1633        }
1634
1635#if 0
1636        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1637        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1638        // that total to a sum == notificationFrames.
1639        if (0 < misalignment && misalignment <= mRemainingFrames) {
1640            mRemainingFrames = misalignment;
1641            return (mRemainingFrames * 1100000000LL) / sampleRate;
1642        }
1643#endif
1644
1645    }
1646    mRemainingFrames = notificationFrames;
1647    mRetryOnPartialBuffer = true;
1648
1649    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1650    return 0;
1651}
1652
1653status_t AudioTrack::restoreTrack_l(const char *from)
1654{
1655    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1656          isOffloaded() ? "Offloaded" : "PCM", from);
1657    ++mSequence;
1658    status_t result;
1659
1660    // refresh the audio configuration cache in this process to make sure we get new
1661    // output parameters in getOutput_l() and createTrack_l()
1662    AudioSystem::clearAudioConfigCache();
1663
1664    if (isOffloaded()) {
1665        return DEAD_OBJECT;
1666    }
1667
1668    // force new output query from audio policy manager;
1669    mOutput = 0;
1670    audio_io_handle_t output = getOutput_l();
1671
1672    // if the new IAudioTrack is created, createTrack_l() will modify the
1673    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1674    // It will also delete the strong references on previous IAudioTrack and IMemory
1675
1676    // take the frames that will be lost by track recreation into account in saved position
1677    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1678    mNewPosition = position + mUpdatePeriod;
1679    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1680    result = createTrack_l(mStreamType,
1681                           mSampleRate,
1682                           mFormat,
1683                           mReqFrameCount,  // so that frame count never goes down
1684                           mFlags,
1685                           mSharedBuffer,
1686                           output,
1687                           position /*epoch*/);
1688
1689    if (result == NO_ERROR) {
1690        // continue playback from last known position, but
1691        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1692        if (mStaticProxy != NULL) {
1693            mLoopPeriod = 0;
1694            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1695        }
1696        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1697        //       track destruction have been played? This is critical for SoundPool implementation
1698        //       This must be broken, and needs to be tested/debugged.
1699#if 0
1700        // restore write index and set other indexes to reflect empty buffer status
1701        if (!strcmp(from, "start")) {
1702            // Make sure that a client relying on callback events indicating underrun or
1703            // the actual amount of audio frames played (e.g SoundPool) receives them.
1704            if (mSharedBuffer == 0) {
1705                // restart playback even if buffer is not completely filled.
1706                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1707            }
1708        }
1709#endif
1710        if (mState == STATE_ACTIVE) {
1711            result = mAudioTrack->start();
1712        }
1713    }
1714    if (result != NO_ERROR) {
1715        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1716        // As getOutput was called above and resulted in an output stream to be opened,
1717        // we need to release it.
1718        AudioSystem::releaseOutput(output);
1719        ALOGW("restoreTrack_l() failed status %d", result);
1720        mState = STATE_STOPPED;
1721    }
1722
1723    return result;
1724}
1725
1726status_t AudioTrack::setParameters(const String8& keyValuePairs)
1727{
1728    AutoMutex lock(mLock);
1729    return mAudioTrack->setParameters(keyValuePairs);
1730}
1731
1732status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1733{
1734    AutoMutex lock(mLock);
1735    // FIXME not implemented for fast tracks; should use proxy and SSQ
1736    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1737        return INVALID_OPERATION;
1738    }
1739    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1740        return INVALID_OPERATION;
1741    }
1742    status_t status = mAudioTrack->getTimestamp(timestamp);
1743    if (status == NO_ERROR) {
1744        timestamp.mPosition += mProxy->getEpoch();
1745    }
1746    return status;
1747}
1748
1749String8 AudioTrack::getParameters(const String8& keys)
1750{
1751    if (mOutput) {
1752        return AudioSystem::getParameters(mOutput, keys);
1753    } else {
1754        return String8::empty();
1755    }
1756}
1757
1758status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1759{
1760
1761    const size_t SIZE = 256;
1762    char buffer[SIZE];
1763    String8 result;
1764
1765    result.append(" AudioTrack::dump\n");
1766    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1767            mVolume[0], mVolume[1]);
1768    result.append(buffer);
1769    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1770            mChannelCount, mFrameCount);
1771    result.append(buffer);
1772    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1773    result.append(buffer);
1774    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1775    result.append(buffer);
1776    ::write(fd, result.string(), result.size());
1777    return NO_ERROR;
1778}
1779
1780uint32_t AudioTrack::getUnderrunFrames() const
1781{
1782    AutoMutex lock(mLock);
1783    return mProxy->getUnderrunFrames();
1784}
1785
1786// =========================================================================
1787
1788void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1789{
1790    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1791    if (audioTrack != 0) {
1792        AutoMutex lock(audioTrack->mLock);
1793        audioTrack->mProxy->binderDied();
1794    }
1795}
1796
1797// =========================================================================
1798
1799AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1800    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1801      mIgnoreNextPausedInt(false)
1802{
1803}
1804
1805AudioTrack::AudioTrackThread::~AudioTrackThread()
1806{
1807}
1808
1809bool AudioTrack::AudioTrackThread::threadLoop()
1810{
1811    {
1812        AutoMutex _l(mMyLock);
1813        if (mPaused) {
1814            mMyCond.wait(mMyLock);
1815            // caller will check for exitPending()
1816            return true;
1817        }
1818        if (mIgnoreNextPausedInt) {
1819            mIgnoreNextPausedInt = false;
1820            mPausedInt = false;
1821        }
1822        if (mPausedInt) {
1823            if (mPausedNs > 0) {
1824                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1825            } else {
1826                mMyCond.wait(mMyLock);
1827            }
1828            mPausedInt = false;
1829            return true;
1830        }
1831    }
1832    nsecs_t ns = mReceiver.processAudioBuffer(this);
1833    switch (ns) {
1834    case 0:
1835        return true;
1836    case NS_INACTIVE:
1837        pauseInternal();
1838        return true;
1839    case NS_NEVER:
1840        return false;
1841    case NS_WHENEVER:
1842        // FIXME increase poll interval, or make event-driven
1843        ns = 1000000000LL;
1844        // fall through
1845    default:
1846        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1847        pauseInternal(ns);
1848        return true;
1849    }
1850}
1851
1852void AudioTrack::AudioTrackThread::requestExit()
1853{
1854    // must be in this order to avoid a race condition
1855    Thread::requestExit();
1856    resume();
1857}
1858
1859void AudioTrack::AudioTrackThread::pause()
1860{
1861    AutoMutex _l(mMyLock);
1862    mPaused = true;
1863}
1864
1865void AudioTrack::AudioTrackThread::resume()
1866{
1867    AutoMutex _l(mMyLock);
1868    mIgnoreNextPausedInt = true;
1869    if (mPaused || mPausedInt) {
1870        mPaused = false;
1871        mPausedInt = false;
1872        mMyCond.signal();
1873    }
1874}
1875
1876void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1877{
1878    AutoMutex _l(mMyLock);
1879    mPausedInt = true;
1880    mPausedNs = ns;
1881}
1882
1883}; // namespace android
1884