AudioTrack.cpp revision cc21e4f1e41dfa17e7e2bef995fcd22c45f6bcd0
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo)
105    : mStatus(NO_INIT),
106      mIsTimed(false),
107      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
108      mPreviousSchedulingGroup(SP_DEFAULT)
109{
110    mStatus = set(streamType, sampleRate, format, channelMask,
111            frameCount, flags, cbf, user, notificationFrames,
112            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
113}
114
115AudioTrack::AudioTrack(
116        audio_stream_type_t streamType,
117        uint32_t sampleRate,
118        audio_format_t format,
119        audio_channel_mask_t channelMask,
120        const sp<IMemory>& sharedBuffer,
121        audio_output_flags_t flags,
122        callback_t cbf,
123        void* user,
124        int notificationFrames,
125        int sessionId,
126        transfer_type transferType,
127        const audio_offload_info_t *offloadInfo)
128    : mStatus(NO_INIT),
129      mIsTimed(false),
130      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
131      mPreviousSchedulingGroup(SP_DEFAULT)
132{
133    mStatus = set(streamType, sampleRate, format, channelMask,
134            0 /*frameCount*/, flags, cbf, user, notificationFrames,
135            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
136}
137
138AudioTrack::~AudioTrack()
139{
140    if (mStatus == NO_ERROR) {
141        // Make sure that callback function exits in the case where
142        // it is looping on buffer full condition in obtainBuffer().
143        // Otherwise the callback thread will never exit.
144        stop();
145        if (mAudioTrackThread != 0) {
146            mProxy->interrupt();
147            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
148            mAudioTrackThread->requestExitAndWait();
149            mAudioTrackThread.clear();
150        }
151        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
152        mAudioTrack.clear();
153        IPCThreadState::self()->flushCommands();
154        AudioSystem::releaseAudioSessionId(mSessionId);
155    }
156}
157
158status_t AudioTrack::set(
159        audio_stream_type_t streamType,
160        uint32_t sampleRate,
161        audio_format_t format,
162        audio_channel_mask_t channelMask,
163        int frameCountInt,
164        audio_output_flags_t flags,
165        callback_t cbf,
166        void* user,
167        int notificationFrames,
168        const sp<IMemory>& sharedBuffer,
169        bool threadCanCallJava,
170        int sessionId,
171        transfer_type transferType,
172        const audio_offload_info_t *offloadInfo)
173{
174    switch (transferType) {
175    case TRANSFER_DEFAULT:
176        if (sharedBuffer != 0) {
177            transferType = TRANSFER_SHARED;
178        } else if (cbf == NULL || threadCanCallJava) {
179            transferType = TRANSFER_SYNC;
180        } else {
181            transferType = TRANSFER_CALLBACK;
182        }
183        break;
184    case TRANSFER_CALLBACK:
185        if (cbf == NULL || sharedBuffer != 0) {
186            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
187            return BAD_VALUE;
188        }
189        break;
190    case TRANSFER_OBTAIN:
191    case TRANSFER_SYNC:
192        if (sharedBuffer != 0) {
193            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
194            return BAD_VALUE;
195        }
196        break;
197    case TRANSFER_SHARED:
198        if (sharedBuffer == 0) {
199            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
200            return BAD_VALUE;
201        }
202        break;
203    default:
204        ALOGE("Invalid transfer type %d", transferType);
205        return BAD_VALUE;
206    }
207    mTransfer = transferType;
208
209    // FIXME "int" here is legacy and will be replaced by size_t later
210    if (frameCountInt < 0) {
211        ALOGE("Invalid frame count %d", frameCountInt);
212        return BAD_VALUE;
213    }
214    size_t frameCount = frameCountInt;
215
216    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
217            sharedBuffer->size());
218
219    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
220
221    AutoMutex lock(mLock);
222
223    // invariant that mAudioTrack != 0 is true only after set() returns successfully
224    if (mAudioTrack != 0) {
225        ALOGE("Track already in use");
226        return INVALID_OPERATION;
227    }
228
229    mOutput = 0;
230
231    // handle default values first.
232    if (streamType == AUDIO_STREAM_DEFAULT) {
233        streamType = AUDIO_STREAM_MUSIC;
234    }
235
236    if (sampleRate == 0) {
237        uint32_t afSampleRate;
238        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
239            return NO_INIT;
240        }
241        sampleRate = afSampleRate;
242    }
243    mSampleRate = sampleRate;
244
245    // these below should probably come from the audioFlinger too...
246    if (format == AUDIO_FORMAT_DEFAULT) {
247        format = AUDIO_FORMAT_PCM_16_BIT;
248    }
249    if (channelMask == 0) {
250        channelMask = AUDIO_CHANNEL_OUT_STEREO;
251    }
252
253    // validate parameters
254    if (!audio_is_valid_format(format)) {
255        ALOGE("Invalid format %d", format);
256        return BAD_VALUE;
257    }
258
259    // AudioFlinger does not currently support 8-bit data in shared memory
260    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
261        ALOGE("8-bit data in shared memory is not supported");
262        return BAD_VALUE;
263    }
264
265    // force direct flag if format is not linear PCM
266    // or offload was requested
267    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
268            || !audio_is_linear_pcm(format)) {
269        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
270                    ? "Offload request, forcing to Direct Output"
271                    : "Not linear PCM, forcing to Direct Output");
272        flags = (audio_output_flags_t)
273                // FIXME why can't we allow direct AND fast?
274                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
275    }
276    // only allow deep buffering for music stream type
277    if (streamType != AUDIO_STREAM_MUSIC) {
278        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
279    }
280
281    if (!audio_is_output_channel(channelMask)) {
282        ALOGE("Invalid channel mask %#x", channelMask);
283        return BAD_VALUE;
284    }
285    mChannelMask = channelMask;
286    uint32_t channelCount = popcount(channelMask);
287    mChannelCount = channelCount;
288
289    if (audio_is_linear_pcm(format)) {
290        mFrameSize = channelCount * audio_bytes_per_sample(format);
291        mFrameSizeAF = channelCount * sizeof(int16_t);
292    } else {
293        mFrameSize = sizeof(uint8_t);
294        mFrameSizeAF = sizeof(uint8_t);
295    }
296
297    audio_io_handle_t output = AudioSystem::getOutput(
298                                    streamType,
299                                    sampleRate, format, channelMask,
300                                    flags,
301                                    offloadInfo);
302
303    if (output == 0) {
304        ALOGE("Could not get audio output for stream type %d", streamType);
305        return BAD_VALUE;
306    }
307
308    mVolume[LEFT] = 1.0f;
309    mVolume[RIGHT] = 1.0f;
310    mSendLevel = 0.0f;
311    mFrameCount = frameCount;
312    mReqFrameCount = frameCount;
313    mNotificationFramesReq = notificationFrames;
314    mNotificationFramesAct = 0;
315    mSessionId = sessionId;
316    mAuxEffectId = 0;
317    mFlags = flags;
318    mCbf = cbf;
319
320    if (cbf != NULL) {
321        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
322        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
323    }
324
325    // create the IAudioTrack
326    status_t status = createTrack_l(streamType,
327                                  sampleRate,
328                                  format,
329                                  frameCount,
330                                  flags,
331                                  sharedBuffer,
332                                  output,
333                                  0 /*epoch*/);
334
335    if (status != NO_ERROR) {
336        if (mAudioTrackThread != 0) {
337            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
338            mAudioTrackThread->requestExitAndWait();
339            mAudioTrackThread.clear();
340        }
341        //Use of direct and offloaded output streams is ref counted by audio policy manager.
342        // As getOutput was called above and resulted in an output stream to be opened,
343        // we need to release it.
344        AudioSystem::releaseOutput(output);
345        return status;
346    }
347
348    mStatus = NO_ERROR;
349    mStreamType = streamType;
350    mFormat = format;
351    mSharedBuffer = sharedBuffer;
352    mState = STATE_STOPPED;
353    mUserData = user;
354    mLoopPeriod = 0;
355    mMarkerPosition = 0;
356    mMarkerReached = false;
357    mNewPosition = 0;
358    mUpdatePeriod = 0;
359    AudioSystem::acquireAudioSessionId(mSessionId);
360    mSequence = 1;
361    mObservedSequence = mSequence;
362    mInUnderrun = false;
363    mOutput = output;
364
365    return NO_ERROR;
366}
367
368// -------------------------------------------------------------------------
369
370status_t AudioTrack::start()
371{
372    AutoMutex lock(mLock);
373
374    if (mState == STATE_ACTIVE) {
375        return INVALID_OPERATION;
376    }
377
378    mInUnderrun = true;
379
380    State previousState = mState;
381    if (previousState == STATE_PAUSED_STOPPING) {
382        mState = STATE_STOPPING;
383    } else {
384        mState = STATE_ACTIVE;
385    }
386    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
387        // reset current position as seen by client to 0
388        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
389        // force refresh of remaining frames by processAudioBuffer() as last
390        // write before stop could be partial.
391        mRefreshRemaining = true;
392    }
393    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
394    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
395
396    sp<AudioTrackThread> t = mAudioTrackThread;
397    if (t != 0) {
398        if (previousState == STATE_STOPPING) {
399            mProxy->interrupt();
400        } else {
401            t->resume();
402        }
403    } else {
404        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
405        get_sched_policy(0, &mPreviousSchedulingGroup);
406        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
407    }
408
409    status_t status = NO_ERROR;
410    if (!(flags & CBLK_INVALID)) {
411        status = mAudioTrack->start();
412        if (status == DEAD_OBJECT) {
413            flags |= CBLK_INVALID;
414        }
415    }
416    if (flags & CBLK_INVALID) {
417        status = restoreTrack_l("start");
418    }
419
420    if (status != NO_ERROR) {
421        ALOGE("start() status %d", status);
422        mState = previousState;
423        if (t != 0) {
424            if (previousState != STATE_STOPPING) {
425                t->pause();
426            }
427        } else {
428            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
429            set_sched_policy(0, mPreviousSchedulingGroup);
430        }
431    }
432
433    return status;
434}
435
436void AudioTrack::stop()
437{
438    AutoMutex lock(mLock);
439    // FIXME pause then stop should not be a nop
440    if (mState != STATE_ACTIVE) {
441        return;
442    }
443
444    if (isOffloaded()) {
445        mState = STATE_STOPPING;
446    } else {
447        mState = STATE_STOPPED;
448    }
449
450    mProxy->interrupt();
451    mAudioTrack->stop();
452    // the playback head position will reset to 0, so if a marker is set, we need
453    // to activate it again
454    mMarkerReached = false;
455#if 0
456    // Force flush if a shared buffer is used otherwise audioflinger
457    // will not stop before end of buffer is reached.
458    // It may be needed to make sure that we stop playback, likely in case looping is on.
459    if (mSharedBuffer != 0) {
460        flush_l();
461    }
462#endif
463
464    sp<AudioTrackThread> t = mAudioTrackThread;
465    if (t != 0) {
466        if (!isOffloaded()) {
467            t->pause();
468        }
469    } else {
470        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
471        set_sched_policy(0, mPreviousSchedulingGroup);
472    }
473}
474
475bool AudioTrack::stopped() const
476{
477    AutoMutex lock(mLock);
478    return mState != STATE_ACTIVE;
479}
480
481void AudioTrack::flush()
482{
483    if (mSharedBuffer != 0) {
484        return;
485    }
486    AutoMutex lock(mLock);
487    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
488        return;
489    }
490    flush_l();
491}
492
493void AudioTrack::flush_l()
494{
495    ALOG_ASSERT(mState != STATE_ACTIVE);
496
497    // clear playback marker and periodic update counter
498    mMarkerPosition = 0;
499    mMarkerReached = false;
500    mUpdatePeriod = 0;
501    mRefreshRemaining = true;
502
503    mState = STATE_FLUSHED;
504    if (isOffloaded()) {
505        mProxy->interrupt();
506    }
507    mProxy->flush();
508    mAudioTrack->flush();
509}
510
511void AudioTrack::pause()
512{
513    AutoMutex lock(mLock);
514    if (mState == STATE_ACTIVE) {
515        mState = STATE_PAUSED;
516    } else if (mState == STATE_STOPPING) {
517        mState = STATE_PAUSED_STOPPING;
518    } else {
519        return;
520    }
521    mProxy->interrupt();
522    mAudioTrack->pause();
523}
524
525status_t AudioTrack::setVolume(float left, float right)
526{
527    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
528        return BAD_VALUE;
529    }
530
531    AutoMutex lock(mLock);
532    mVolume[LEFT] = left;
533    mVolume[RIGHT] = right;
534
535    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
536
537    if (isOffloaded()) {
538        mAudioTrack->signal();
539    }
540    return NO_ERROR;
541}
542
543status_t AudioTrack::setVolume(float volume)
544{
545    return setVolume(volume, volume);
546}
547
548status_t AudioTrack::setAuxEffectSendLevel(float level)
549{
550    if (level < 0.0f || level > 1.0f) {
551        return BAD_VALUE;
552    }
553
554    AutoMutex lock(mLock);
555    mSendLevel = level;
556    mProxy->setSendLevel(level);
557
558    return NO_ERROR;
559}
560
561void AudioTrack::getAuxEffectSendLevel(float* level) const
562{
563    if (level != NULL) {
564        *level = mSendLevel;
565    }
566}
567
568status_t AudioTrack::setSampleRate(uint32_t rate)
569{
570    if (mIsTimed || isOffloaded()) {
571        return INVALID_OPERATION;
572    }
573
574    uint32_t afSamplingRate;
575    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
576        return NO_INIT;
577    }
578    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
579    if (rate == 0 || rate > afSamplingRate*2 ) {
580        return BAD_VALUE;
581    }
582
583    AutoMutex lock(mLock);
584    mSampleRate = rate;
585    mProxy->setSampleRate(rate);
586
587    return NO_ERROR;
588}
589
590uint32_t AudioTrack::getSampleRate() const
591{
592    if (mIsTimed) {
593        return 0;
594    }
595
596    AutoMutex lock(mLock);
597    return mSampleRate;
598}
599
600status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
601{
602    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
603        return INVALID_OPERATION;
604    }
605
606    if (loopCount == 0) {
607        ;
608    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
609            loopEnd - loopStart >= MIN_LOOP) {
610        ;
611    } else {
612        return BAD_VALUE;
613    }
614
615    AutoMutex lock(mLock);
616    // See setPosition() regarding setting parameters such as loop points or position while active
617    if (mState == STATE_ACTIVE) {
618        return INVALID_OPERATION;
619    }
620    setLoop_l(loopStart, loopEnd, loopCount);
621    return NO_ERROR;
622}
623
624void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
625{
626    // FIXME If setting a loop also sets position to start of loop, then
627    //       this is correct.  Otherwise it should be removed.
628    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
629    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
630    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
631}
632
633status_t AudioTrack::setMarkerPosition(uint32_t marker)
634{
635    // The only purpose of setting marker position is to get a callback
636    if (mCbf == NULL || isOffloaded()) {
637        return INVALID_OPERATION;
638    }
639
640    AutoMutex lock(mLock);
641    mMarkerPosition = marker;
642    mMarkerReached = false;
643
644    return NO_ERROR;
645}
646
647status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
648{
649    if (isOffloaded()) {
650        return INVALID_OPERATION;
651    }
652    if (marker == NULL) {
653        return BAD_VALUE;
654    }
655
656    AutoMutex lock(mLock);
657    *marker = mMarkerPosition;
658
659    return NO_ERROR;
660}
661
662status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
663{
664    // The only purpose of setting position update period is to get a callback
665    if (mCbf == NULL || isOffloaded()) {
666        return INVALID_OPERATION;
667    }
668
669    AutoMutex lock(mLock);
670    mNewPosition = mProxy->getPosition() + updatePeriod;
671    mUpdatePeriod = updatePeriod;
672    return NO_ERROR;
673}
674
675status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
676{
677    if (isOffloaded()) {
678        return INVALID_OPERATION;
679    }
680    if (updatePeriod == NULL) {
681        return BAD_VALUE;
682    }
683
684    AutoMutex lock(mLock);
685    *updatePeriod = mUpdatePeriod;
686
687    return NO_ERROR;
688}
689
690status_t AudioTrack::setPosition(uint32_t position)
691{
692    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
693        return INVALID_OPERATION;
694    }
695    if (position > mFrameCount) {
696        return BAD_VALUE;
697    }
698
699    AutoMutex lock(mLock);
700    // Currently we require that the player is inactive before setting parameters such as position
701    // or loop points.  Otherwise, there could be a race condition: the application could read the
702    // current position, compute a new position or loop parameters, and then set that position or
703    // loop parameters but it would do the "wrong" thing since the position has continued to advance
704    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
705    // to specify how it wants to handle such scenarios.
706    if (mState == STATE_ACTIVE) {
707        return INVALID_OPERATION;
708    }
709    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
710    mLoopPeriod = 0;
711    // FIXME Check whether loops and setting position are incompatible in old code.
712    // If we use setLoop for both purposes we lose the capability to set the position while looping.
713    mStaticProxy->setLoop(position, mFrameCount, 0);
714
715    return NO_ERROR;
716}
717
718status_t AudioTrack::getPosition(uint32_t *position) const
719{
720    if (position == NULL) {
721        return BAD_VALUE;
722    }
723
724    AutoMutex lock(mLock);
725    if (isOffloaded()) {
726        uint32_t dspFrames = 0;
727
728        if (mOutput != 0) {
729            uint32_t halFrames;
730            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
731        }
732        *position = dspFrames;
733    } else {
734        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
735        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
736                mProxy->getPosition();
737    }
738    return NO_ERROR;
739}
740
741status_t AudioTrack::getBufferPosition(size_t *position)
742{
743    if (mSharedBuffer == 0 || mIsTimed) {
744        return INVALID_OPERATION;
745    }
746    if (position == NULL) {
747        return BAD_VALUE;
748    }
749
750    AutoMutex lock(mLock);
751    *position = mStaticProxy->getBufferPosition();
752    return NO_ERROR;
753}
754
755status_t AudioTrack::reload()
756{
757    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
758        return INVALID_OPERATION;
759    }
760
761    AutoMutex lock(mLock);
762    // See setPosition() regarding setting parameters such as loop points or position while active
763    if (mState == STATE_ACTIVE) {
764        return INVALID_OPERATION;
765    }
766    mNewPosition = mUpdatePeriod;
767    mLoopPeriod = 0;
768    // FIXME The new code cannot reload while keeping a loop specified.
769    // Need to check how the old code handled this, and whether it's a significant change.
770    mStaticProxy->setLoop(0, mFrameCount, 0);
771    return NO_ERROR;
772}
773
774audio_io_handle_t AudioTrack::getOutput()
775{
776    AutoMutex lock(mLock);
777    return mOutput;
778}
779
780// must be called with mLock held
781audio_io_handle_t AudioTrack::getOutput_l()
782{
783    if (mOutput) {
784        return mOutput;
785    } else {
786        return AudioSystem::getOutput(mStreamType,
787                                      mSampleRate, mFormat, mChannelMask, mFlags);
788    }
789}
790
791status_t AudioTrack::attachAuxEffect(int effectId)
792{
793    AutoMutex lock(mLock);
794    status_t status = mAudioTrack->attachAuxEffect(effectId);
795    if (status == NO_ERROR) {
796        mAuxEffectId = effectId;
797    }
798    return status;
799}
800
801// -------------------------------------------------------------------------
802
803// must be called with mLock held
804status_t AudioTrack::createTrack_l(
805        audio_stream_type_t streamType,
806        uint32_t sampleRate,
807        audio_format_t format,
808        size_t frameCount,
809        audio_output_flags_t flags,
810        const sp<IMemory>& sharedBuffer,
811        audio_io_handle_t output,
812        size_t epoch)
813{
814    status_t status;
815    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
816    if (audioFlinger == 0) {
817        ALOGE("Could not get audioflinger");
818        return NO_INIT;
819    }
820
821    // Not all of these values are needed under all conditions, but it is easier to get them all
822
823    uint32_t afLatency;
824    status = AudioSystem::getLatency(output, streamType, &afLatency);
825    if (status != NO_ERROR) {
826        ALOGE("getLatency(%d) failed status %d", output, status);
827        return NO_INIT;
828    }
829
830    size_t afFrameCount;
831    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
832    if (status != NO_ERROR) {
833        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
834        return NO_INIT;
835    }
836
837    uint32_t afSampleRate;
838    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
839    if (status != NO_ERROR) {
840        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
841        return NO_INIT;
842    }
843
844    // Client decides whether the track is TIMED (see below), but can only express a preference
845    // for FAST.  Server will perform additional tests.
846    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
847            // either of these use cases:
848            // use case 1: shared buffer
849            (sharedBuffer != 0) ||
850            // use case 2: callback handler
851            (mCbf != NULL))) {
852        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
853        // once denied, do not request again if IAudioTrack is re-created
854        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
855        mFlags = flags;
856    }
857    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
858
859    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
860    //  n = 1   fast track; nBuffering is ignored
861    //  n = 2   normal track, no sample rate conversion
862    //  n = 3   normal track, with sample rate conversion
863    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
864    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
865    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
866
867    mNotificationFramesAct = mNotificationFramesReq;
868
869    if (!audio_is_linear_pcm(format)) {
870
871        if (sharedBuffer != 0) {
872            // Same comment as below about ignoring frameCount parameter for set()
873            frameCount = sharedBuffer->size();
874        } else if (frameCount == 0) {
875            frameCount = afFrameCount;
876        }
877        if (mNotificationFramesAct != frameCount) {
878            mNotificationFramesAct = frameCount;
879        }
880    } else if (sharedBuffer != 0) {
881
882        // Ensure that buffer alignment matches channel count
883        // 8-bit data in shared memory is not currently supported by AudioFlinger
884        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
885        if (mChannelCount > 1) {
886            // More than 2 channels does not require stronger alignment than stereo
887            alignment <<= 1;
888        }
889        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
890            ALOGE("Invalid buffer alignment: address %p, channel count %u",
891                    sharedBuffer->pointer(), mChannelCount);
892            return BAD_VALUE;
893        }
894
895        // When initializing a shared buffer AudioTrack via constructors,
896        // there's no frameCount parameter.
897        // But when initializing a shared buffer AudioTrack via set(),
898        // there _is_ a frameCount parameter.  We silently ignore it.
899        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
900
901    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
902
903        // FIXME move these calculations and associated checks to server
904
905        // Ensure that buffer depth covers at least audio hardware latency
906        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
907        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
908                afFrameCount, minBufCount, afSampleRate, afLatency);
909        if (minBufCount <= nBuffering) {
910            minBufCount = nBuffering;
911        }
912
913        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
914        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
915                ", afLatency=%d",
916                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
917
918        if (frameCount == 0) {
919            frameCount = minFrameCount;
920        } else if (frameCount < minFrameCount) {
921            // not ALOGW because it happens all the time when playing key clicks over A2DP
922            ALOGV("Minimum buffer size corrected from %d to %d",
923                     frameCount, minFrameCount);
924            frameCount = minFrameCount;
925        }
926        // Make sure that application is notified with sufficient margin before underrun
927        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
928            mNotificationFramesAct = frameCount/nBuffering;
929        }
930
931    } else {
932        // For fast tracks, the frame count calculations and checks are done by server
933    }
934
935    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
936    if (mIsTimed) {
937        trackFlags |= IAudioFlinger::TRACK_TIMED;
938    }
939
940    pid_t tid = -1;
941    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
942        trackFlags |= IAudioFlinger::TRACK_FAST;
943        if (mAudioTrackThread != 0) {
944            tid = mAudioTrackThread->getTid();
945        }
946    }
947
948    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
949        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
950    }
951
952    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
953                                                      sampleRate,
954                                                      // AudioFlinger only sees 16-bit PCM
955                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
956                                                              AUDIO_FORMAT_PCM_16_BIT : format,
957                                                      mChannelMask,
958                                                      frameCount,
959                                                      &trackFlags,
960                                                      sharedBuffer,
961                                                      output,
962                                                      tid,
963                                                      &mSessionId,
964                                                      mName,
965                                                      &status);
966
967    if (track == 0) {
968        ALOGE("AudioFlinger could not create track, status: %d", status);
969        return status;
970    }
971    sp<IMemory> iMem = track->getCblk();
972    if (iMem == 0) {
973        ALOGE("Could not get control block");
974        return NO_INIT;
975    }
976    // invariant that mAudioTrack != 0 is true only after set() returns successfully
977    if (mAudioTrack != 0) {
978        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
979        mDeathNotifier.clear();
980    }
981    mAudioTrack = track;
982    mCblkMemory = iMem;
983    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
984    mCblk = cblk;
985    size_t temp = cblk->frameCount_;
986    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
987        // In current design, AudioTrack client checks and ensures frame count validity before
988        // passing it to AudioFlinger so AudioFlinger should not return a different value except
989        // for fast track as it uses a special method of assigning frame count.
990        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
991    }
992    frameCount = temp;
993    mAwaitBoost = false;
994    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
995        if (trackFlags & IAudioFlinger::TRACK_FAST) {
996            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
997            mAwaitBoost = true;
998            if (sharedBuffer == 0) {
999                // double-buffering is not required for fast tracks, due to tighter scheduling
1000                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
1001                    mNotificationFramesAct = frameCount;
1002                }
1003            }
1004        } else {
1005            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1006            // once denied, do not request again if IAudioTrack is re-created
1007            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1008            mFlags = flags;
1009            if (sharedBuffer == 0) {
1010                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1011                    mNotificationFramesAct = frameCount/nBuffering;
1012                }
1013            }
1014        }
1015    }
1016    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1017        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1018            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1019        } else {
1020            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1021            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1022            mFlags = flags;
1023            return NO_INIT;
1024        }
1025    }
1026
1027    mRefreshRemaining = true;
1028
1029    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1030    // is the value of pointer() for the shared buffer, otherwise buffers points
1031    // immediately after the control block.  This address is for the mapping within client
1032    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1033    void* buffers;
1034    if (sharedBuffer == 0) {
1035        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1036    } else {
1037        buffers = sharedBuffer->pointer();
1038    }
1039
1040    mAudioTrack->attachAuxEffect(mAuxEffectId);
1041    // FIXME don't believe this lie
1042    mLatency = afLatency + (1000*frameCount) / sampleRate;
1043    mFrameCount = frameCount;
1044    // If IAudioTrack is re-created, don't let the requested frameCount
1045    // decrease.  This can confuse clients that cache frameCount().
1046    if (frameCount > mReqFrameCount) {
1047        mReqFrameCount = frameCount;
1048    }
1049
1050    // update proxy
1051    if (sharedBuffer == 0) {
1052        mStaticProxy.clear();
1053        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1054    } else {
1055        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1056        mProxy = mStaticProxy;
1057    }
1058    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1059            uint16_t(mVolume[LEFT] * 0x1000));
1060    mProxy->setSendLevel(mSendLevel);
1061    mProxy->setSampleRate(mSampleRate);
1062    mProxy->setEpoch(epoch);
1063    mProxy->setMinimum(mNotificationFramesAct);
1064
1065    mDeathNotifier = new DeathNotifier(this);
1066    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1067
1068    return NO_ERROR;
1069}
1070
1071status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1072{
1073    if (audioBuffer == NULL) {
1074        return BAD_VALUE;
1075    }
1076    if (mTransfer != TRANSFER_OBTAIN) {
1077        audioBuffer->frameCount = 0;
1078        audioBuffer->size = 0;
1079        audioBuffer->raw = NULL;
1080        return INVALID_OPERATION;
1081    }
1082
1083    const struct timespec *requested;
1084    if (waitCount == -1) {
1085        requested = &ClientProxy::kForever;
1086    } else if (waitCount == 0) {
1087        requested = &ClientProxy::kNonBlocking;
1088    } else if (waitCount > 0) {
1089        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1090        struct timespec timeout;
1091        timeout.tv_sec = ms / 1000;
1092        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1093        requested = &timeout;
1094    } else {
1095        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1096        requested = NULL;
1097    }
1098    return obtainBuffer(audioBuffer, requested);
1099}
1100
1101status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1102        struct timespec *elapsed, size_t *nonContig)
1103{
1104    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1105    uint32_t oldSequence = 0;
1106    uint32_t newSequence;
1107
1108    Proxy::Buffer buffer;
1109    status_t status = NO_ERROR;
1110
1111    static const int32_t kMaxTries = 5;
1112    int32_t tryCounter = kMaxTries;
1113
1114    do {
1115        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1116        // keep them from going away if another thread re-creates the track during obtainBuffer()
1117        sp<AudioTrackClientProxy> proxy;
1118        sp<IMemory> iMem;
1119
1120        {   // start of lock scope
1121            AutoMutex lock(mLock);
1122
1123            newSequence = mSequence;
1124            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1125            if (status == DEAD_OBJECT) {
1126                // re-create track, unless someone else has already done so
1127                if (newSequence == oldSequence) {
1128                    status = restoreTrack_l("obtainBuffer");
1129                    if (status != NO_ERROR) {
1130                        buffer.mFrameCount = 0;
1131                        buffer.mRaw = NULL;
1132                        buffer.mNonContig = 0;
1133                        break;
1134                    }
1135                }
1136            }
1137            oldSequence = newSequence;
1138
1139            // Keep the extra references
1140            proxy = mProxy;
1141            iMem = mCblkMemory;
1142
1143            if (mState == STATE_STOPPING) {
1144                status = -EINTR;
1145                buffer.mFrameCount = 0;
1146                buffer.mRaw = NULL;
1147                buffer.mNonContig = 0;
1148                break;
1149            }
1150
1151            // Non-blocking if track is stopped or paused
1152            if (mState != STATE_ACTIVE) {
1153                requested = &ClientProxy::kNonBlocking;
1154            }
1155
1156        }   // end of lock scope
1157
1158        buffer.mFrameCount = audioBuffer->frameCount;
1159        // FIXME starts the requested timeout and elapsed over from scratch
1160        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1161
1162    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1163
1164    audioBuffer->frameCount = buffer.mFrameCount;
1165    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1166    audioBuffer->raw = buffer.mRaw;
1167    if (nonContig != NULL) {
1168        *nonContig = buffer.mNonContig;
1169    }
1170    return status;
1171}
1172
1173void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1174{
1175    if (mTransfer == TRANSFER_SHARED) {
1176        return;
1177    }
1178
1179    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1180    if (stepCount == 0) {
1181        return;
1182    }
1183
1184    Proxy::Buffer buffer;
1185    buffer.mFrameCount = stepCount;
1186    buffer.mRaw = audioBuffer->raw;
1187
1188    AutoMutex lock(mLock);
1189    mInUnderrun = false;
1190    mProxy->releaseBuffer(&buffer);
1191
1192    // restart track if it was disabled by audioflinger due to previous underrun
1193    if (mState == STATE_ACTIVE) {
1194        audio_track_cblk_t* cblk = mCblk;
1195        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1196            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1197                    this, mName.string());
1198            // FIXME ignoring status
1199            mAudioTrack->start();
1200        }
1201    }
1202}
1203
1204// -------------------------------------------------------------------------
1205
1206ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1207{
1208    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1209        return INVALID_OPERATION;
1210    }
1211
1212    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1213        // Sanity-check: user is most-likely passing an error code, and it would
1214        // make the return value ambiguous (actualSize vs error).
1215        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1216        return BAD_VALUE;
1217    }
1218
1219    size_t written = 0;
1220    Buffer audioBuffer;
1221
1222    while (userSize >= mFrameSize) {
1223        audioBuffer.frameCount = userSize / mFrameSize;
1224
1225        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1226        if (err < 0) {
1227            if (written > 0) {
1228                break;
1229            }
1230            return ssize_t(err);
1231        }
1232
1233        size_t toWrite;
1234        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1235            // Divide capacity by 2 to take expansion into account
1236            toWrite = audioBuffer.size >> 1;
1237            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1238        } else {
1239            toWrite = audioBuffer.size;
1240            memcpy(audioBuffer.i8, buffer, toWrite);
1241        }
1242        buffer = ((const char *) buffer) + toWrite;
1243        userSize -= toWrite;
1244        written += toWrite;
1245
1246        releaseBuffer(&audioBuffer);
1247    }
1248
1249    return written;
1250}
1251
1252// -------------------------------------------------------------------------
1253
1254TimedAudioTrack::TimedAudioTrack() {
1255    mIsTimed = true;
1256}
1257
1258status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1259{
1260    AutoMutex lock(mLock);
1261    status_t result = UNKNOWN_ERROR;
1262
1263#if 1
1264    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1265    // while we are accessing the cblk
1266    sp<IAudioTrack> audioTrack = mAudioTrack;
1267    sp<IMemory> iMem = mCblkMemory;
1268#endif
1269
1270    // If the track is not invalid already, try to allocate a buffer.  alloc
1271    // fails indicating that the server is dead, flag the track as invalid so
1272    // we can attempt to restore in just a bit.
1273    audio_track_cblk_t* cblk = mCblk;
1274    if (!(cblk->mFlags & CBLK_INVALID)) {
1275        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1276        if (result == DEAD_OBJECT) {
1277            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1278        }
1279    }
1280
1281    // If the track is invalid at this point, attempt to restore it. and try the
1282    // allocation one more time.
1283    if (cblk->mFlags & CBLK_INVALID) {
1284        result = restoreTrack_l("allocateTimedBuffer");
1285
1286        if (result == NO_ERROR) {
1287            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1288        }
1289    }
1290
1291    return result;
1292}
1293
1294status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1295                                           int64_t pts)
1296{
1297    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1298    {
1299        AutoMutex lock(mLock);
1300        audio_track_cblk_t* cblk = mCblk;
1301        // restart track if it was disabled by audioflinger due to previous underrun
1302        if (buffer->size() != 0 && status == NO_ERROR &&
1303                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1304            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1305            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1306            // FIXME ignoring status
1307            mAudioTrack->start();
1308        }
1309    }
1310    return status;
1311}
1312
1313status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1314                                                TargetTimeline target)
1315{
1316    return mAudioTrack->setMediaTimeTransform(xform, target);
1317}
1318
1319// -------------------------------------------------------------------------
1320
1321nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1322{
1323    // Currently the AudioTrack thread is not created if there are no callbacks.
1324    // Would it ever make sense to run the thread, even without callbacks?
1325    // If so, then replace this by checks at each use for mCbf != NULL.
1326    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1327
1328    mLock.lock();
1329    if (mAwaitBoost) {
1330        mAwaitBoost = false;
1331        mLock.unlock();
1332        static const int32_t kMaxTries = 5;
1333        int32_t tryCounter = kMaxTries;
1334        uint32_t pollUs = 10000;
1335        do {
1336            int policy = sched_getscheduler(0);
1337            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1338                break;
1339            }
1340            usleep(pollUs);
1341            pollUs <<= 1;
1342        } while (tryCounter-- > 0);
1343        if (tryCounter < 0) {
1344            ALOGE("did not receive expected priority boost on time");
1345        }
1346        // Run again immediately
1347        return 0;
1348    }
1349
1350    // Can only reference mCblk while locked
1351    int32_t flags = android_atomic_and(
1352        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1353
1354    // Check for track invalidation
1355    if (flags & CBLK_INVALID) {
1356        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1357        // AudioSystem cache. We should not exit here but after calling the callback so
1358        // that the upper layers can recreate the track
1359        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1360            status_t status = restoreTrack_l("processAudioBuffer");
1361            mLock.unlock();
1362            // Run again immediately, but with a new IAudioTrack
1363            return 0;
1364        }
1365    }
1366
1367    bool waitStreamEnd = mState == STATE_STOPPING;
1368    bool active = mState == STATE_ACTIVE;
1369
1370    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1371    bool newUnderrun = false;
1372    if (flags & CBLK_UNDERRUN) {
1373#if 0
1374        // Currently in shared buffer mode, when the server reaches the end of buffer,
1375        // the track stays active in continuous underrun state.  It's up to the application
1376        // to pause or stop the track, or set the position to a new offset within buffer.
1377        // This was some experimental code to auto-pause on underrun.   Keeping it here
1378        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1379        if (mTransfer == TRANSFER_SHARED) {
1380            mState = STATE_PAUSED;
1381            active = false;
1382        }
1383#endif
1384        if (!mInUnderrun) {
1385            mInUnderrun = true;
1386            newUnderrun = true;
1387        }
1388    }
1389
1390    // Get current position of server
1391    size_t position = mProxy->getPosition();
1392
1393    // Manage marker callback
1394    bool markerReached = false;
1395    size_t markerPosition = mMarkerPosition;
1396    // FIXME fails for wraparound, need 64 bits
1397    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1398        mMarkerReached = markerReached = true;
1399    }
1400
1401    // Determine number of new position callback(s) that will be needed, while locked
1402    size_t newPosCount = 0;
1403    size_t newPosition = mNewPosition;
1404    size_t updatePeriod = mUpdatePeriod;
1405    // FIXME fails for wraparound, need 64 bits
1406    if (updatePeriod > 0 && position >= newPosition) {
1407        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1408        mNewPosition += updatePeriod * newPosCount;
1409    }
1410
1411    // Cache other fields that will be needed soon
1412    uint32_t loopPeriod = mLoopPeriod;
1413    uint32_t sampleRate = mSampleRate;
1414    size_t notificationFrames = mNotificationFramesAct;
1415    if (mRefreshRemaining) {
1416        mRefreshRemaining = false;
1417        mRemainingFrames = notificationFrames;
1418        mRetryOnPartialBuffer = false;
1419    }
1420    size_t misalignment = mProxy->getMisalignment();
1421    uint32_t sequence = mSequence;
1422
1423    // These fields don't need to be cached, because they are assigned only by set():
1424    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1425    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1426
1427    mLock.unlock();
1428
1429    if (waitStreamEnd) {
1430        AutoMutex lock(mLock);
1431
1432        sp<AudioTrackClientProxy> proxy = mProxy;
1433        sp<IMemory> iMem = mCblkMemory;
1434
1435        struct timespec timeout;
1436        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1437        timeout.tv_nsec = 0;
1438
1439        mLock.unlock();
1440        status_t status = mProxy->waitStreamEndDone(&timeout);
1441        mLock.lock();
1442        switch (status) {
1443        case NO_ERROR:
1444        case DEAD_OBJECT:
1445        case TIMED_OUT:
1446            mLock.unlock();
1447            mCbf(EVENT_STREAM_END, mUserData, NULL);
1448            mLock.lock();
1449            if (mState == STATE_STOPPING) {
1450                mState = STATE_STOPPED;
1451                if (status != DEAD_OBJECT) {
1452                   return NS_INACTIVE;
1453                }
1454            }
1455            return 0;
1456        default:
1457            return 0;
1458        }
1459    }
1460
1461    // perform callbacks while unlocked
1462    if (newUnderrun) {
1463        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1464    }
1465    // FIXME we will miss loops if loop cycle was signaled several times since last call
1466    //       to processAudioBuffer()
1467    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1468        mCbf(EVENT_LOOP_END, mUserData, NULL);
1469    }
1470    if (flags & CBLK_BUFFER_END) {
1471        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1472    }
1473    if (markerReached) {
1474        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1475    }
1476    while (newPosCount > 0) {
1477        size_t temp = newPosition;
1478        mCbf(EVENT_NEW_POS, mUserData, &temp);
1479        newPosition += updatePeriod;
1480        newPosCount--;
1481    }
1482
1483    if (mObservedSequence != sequence) {
1484        mObservedSequence = sequence;
1485        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1486        // for offloaded tracks, just wait for the upper layers to recreate the track
1487        if (isOffloaded()) {
1488            return NS_INACTIVE;
1489        }
1490    }
1491
1492    // if inactive, then don't run me again until re-started
1493    if (!active) {
1494        return NS_INACTIVE;
1495    }
1496
1497    // Compute the estimated time until the next timed event (position, markers, loops)
1498    // FIXME only for non-compressed audio
1499    uint32_t minFrames = ~0;
1500    if (!markerReached && position < markerPosition) {
1501        minFrames = markerPosition - position;
1502    }
1503    if (loopPeriod > 0 && loopPeriod < minFrames) {
1504        minFrames = loopPeriod;
1505    }
1506    if (updatePeriod > 0 && updatePeriod < minFrames) {
1507        minFrames = updatePeriod;
1508    }
1509
1510    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1511    static const uint32_t kPoll = 0;
1512    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1513        minFrames = kPoll * notificationFrames;
1514    }
1515
1516    // Convert frame units to time units
1517    nsecs_t ns = NS_WHENEVER;
1518    if (minFrames != (uint32_t) ~0) {
1519        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1520        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1521        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1522    }
1523
1524    // If not supplying data by EVENT_MORE_DATA, then we're done
1525    if (mTransfer != TRANSFER_CALLBACK) {
1526        return ns;
1527    }
1528
1529    struct timespec timeout;
1530    const struct timespec *requested = &ClientProxy::kForever;
1531    if (ns != NS_WHENEVER) {
1532        timeout.tv_sec = ns / 1000000000LL;
1533        timeout.tv_nsec = ns % 1000000000LL;
1534        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1535        requested = &timeout;
1536    }
1537
1538    while (mRemainingFrames > 0) {
1539
1540        Buffer audioBuffer;
1541        audioBuffer.frameCount = mRemainingFrames;
1542        size_t nonContig;
1543        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1544        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1545                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1546        requested = &ClientProxy::kNonBlocking;
1547        size_t avail = audioBuffer.frameCount + nonContig;
1548        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1549                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1550        if (err != NO_ERROR) {
1551            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1552                    (isOffloaded() && (err == DEAD_OBJECT))) {
1553                return 0;
1554            }
1555            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1556            return NS_NEVER;
1557        }
1558
1559        if (mRetryOnPartialBuffer && !isOffloaded()) {
1560            mRetryOnPartialBuffer = false;
1561            if (avail < mRemainingFrames) {
1562                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1563                if (ns < 0 || myns < ns) {
1564                    ns = myns;
1565                }
1566                return ns;
1567            }
1568        }
1569
1570        // Divide buffer size by 2 to take into account the expansion
1571        // due to 8 to 16 bit conversion: the callback must fill only half
1572        // of the destination buffer
1573        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1574            audioBuffer.size >>= 1;
1575        }
1576
1577        size_t reqSize = audioBuffer.size;
1578        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1579        size_t writtenSize = audioBuffer.size;
1580        size_t writtenFrames = writtenSize / mFrameSize;
1581
1582        // Sanity check on returned size
1583        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1584            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1585                    reqSize, (int) writtenSize);
1586            return NS_NEVER;
1587        }
1588
1589        if (writtenSize == 0) {
1590            // The callback is done filling buffers
1591            // Keep this thread going to handle timed events and
1592            // still try to get more data in intervals of WAIT_PERIOD_MS
1593            // but don't just loop and block the CPU, so wait
1594            return WAIT_PERIOD_MS * 1000000LL;
1595        }
1596
1597        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1598            // 8 to 16 bit conversion, note that source and destination are the same address
1599            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1600            audioBuffer.size <<= 1;
1601        }
1602
1603        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1604        audioBuffer.frameCount = releasedFrames;
1605        mRemainingFrames -= releasedFrames;
1606        if (misalignment >= releasedFrames) {
1607            misalignment -= releasedFrames;
1608        } else {
1609            misalignment = 0;
1610        }
1611
1612        releaseBuffer(&audioBuffer);
1613
1614        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1615        // if callback doesn't like to accept the full chunk
1616        if (writtenSize < reqSize) {
1617            continue;
1618        }
1619
1620        // There could be enough non-contiguous frames available to satisfy the remaining request
1621        if (mRemainingFrames <= nonContig) {
1622            continue;
1623        }
1624
1625#if 0
1626        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1627        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1628        // that total to a sum == notificationFrames.
1629        if (0 < misalignment && misalignment <= mRemainingFrames) {
1630            mRemainingFrames = misalignment;
1631            return (mRemainingFrames * 1100000000LL) / sampleRate;
1632        }
1633#endif
1634
1635    }
1636    mRemainingFrames = notificationFrames;
1637    mRetryOnPartialBuffer = true;
1638
1639    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1640    return 0;
1641}
1642
1643status_t AudioTrack::restoreTrack_l(const char *from)
1644{
1645    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1646          isOffloaded() ? "Offloaded" : "PCM", from);
1647    ++mSequence;
1648    status_t result;
1649
1650    // refresh the audio configuration cache in this process to make sure we get new
1651    // output parameters in getOutput_l() and createTrack_l()
1652    AudioSystem::clearAudioConfigCache();
1653
1654    if (isOffloaded()) {
1655        return DEAD_OBJECT;
1656    }
1657
1658    // force new output query from audio policy manager;
1659    mOutput = 0;
1660    audio_io_handle_t output = getOutput_l();
1661
1662    // if the new IAudioTrack is created, createTrack_l() will modify the
1663    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1664    // It will also delete the strong references on previous IAudioTrack and IMemory
1665
1666    // take the frames that will be lost by track recreation into account in saved position
1667    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1668    mNewPosition = position + mUpdatePeriod;
1669    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1670    result = createTrack_l(mStreamType,
1671                           mSampleRate,
1672                           mFormat,
1673                           mReqFrameCount,  // so that frame count never goes down
1674                           mFlags,
1675                           mSharedBuffer,
1676                           output,
1677                           position /*epoch*/);
1678
1679    if (result == NO_ERROR) {
1680        // continue playback from last known position, but
1681        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1682        if (mStaticProxy != NULL) {
1683            mLoopPeriod = 0;
1684            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1685        }
1686        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1687        //       track destruction have been played? This is critical for SoundPool implementation
1688        //       This must be broken, and needs to be tested/debugged.
1689#if 0
1690        // restore write index and set other indexes to reflect empty buffer status
1691        if (!strcmp(from, "start")) {
1692            // Make sure that a client relying on callback events indicating underrun or
1693            // the actual amount of audio frames played (e.g SoundPool) receives them.
1694            if (mSharedBuffer == 0) {
1695                // restart playback even if buffer is not completely filled.
1696                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1697            }
1698        }
1699#endif
1700        if (mState == STATE_ACTIVE) {
1701            result = mAudioTrack->start();
1702        }
1703    }
1704    if (result != NO_ERROR) {
1705        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1706        // As getOutput was called above and resulted in an output stream to be opened,
1707        // we need to release it.
1708        AudioSystem::releaseOutput(output);
1709        ALOGW("restoreTrack_l() failed status %d", result);
1710        mState = STATE_STOPPED;
1711    }
1712
1713    return result;
1714}
1715
1716status_t AudioTrack::setParameters(const String8& keyValuePairs)
1717{
1718    AutoMutex lock(mLock);
1719    return mAudioTrack->setParameters(keyValuePairs);
1720}
1721
1722status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1723{
1724    AutoMutex lock(mLock);
1725    // FIXME not implemented for fast tracks; should use proxy and SSQ
1726    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1727        return INVALID_OPERATION;
1728    }
1729    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1730        return INVALID_OPERATION;
1731    }
1732    status_t status = mAudioTrack->getTimestamp(timestamp);
1733    if (status == NO_ERROR) {
1734        timestamp.mPosition += mProxy->getEpoch();
1735    }
1736    return status;
1737}
1738
1739String8 AudioTrack::getParameters(const String8& keys)
1740{
1741    if (mOutput) {
1742        return AudioSystem::getParameters(mOutput, keys);
1743    } else {
1744        return String8::empty();
1745    }
1746}
1747
1748status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1749{
1750
1751    const size_t SIZE = 256;
1752    char buffer[SIZE];
1753    String8 result;
1754
1755    result.append(" AudioTrack::dump\n");
1756    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1757            mVolume[0], mVolume[1]);
1758    result.append(buffer);
1759    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1760            mChannelCount, mFrameCount);
1761    result.append(buffer);
1762    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1763    result.append(buffer);
1764    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1765    result.append(buffer);
1766    ::write(fd, result.string(), result.size());
1767    return NO_ERROR;
1768}
1769
1770uint32_t AudioTrack::getUnderrunFrames() const
1771{
1772    AutoMutex lock(mLock);
1773    return mProxy->getUnderrunFrames();
1774}
1775
1776// =========================================================================
1777
1778void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1779{
1780    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1781    if (audioTrack != 0) {
1782        AutoMutex lock(audioTrack->mLock);
1783        audioTrack->mProxy->binderDied();
1784    }
1785}
1786
1787// =========================================================================
1788
1789AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1790    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL)
1791{
1792}
1793
1794AudioTrack::AudioTrackThread::~AudioTrackThread()
1795{
1796}
1797
1798bool AudioTrack::AudioTrackThread::threadLoop()
1799{
1800    {
1801        AutoMutex _l(mMyLock);
1802        if (mPaused) {
1803            mMyCond.wait(mMyLock);
1804            // caller will check for exitPending()
1805            return true;
1806        }
1807        if (mPausedInt) {
1808            if (mPausedNs > 0) {
1809                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1810            } else {
1811                mMyCond.wait(mMyLock);
1812            }
1813            mPausedInt = false;
1814            return true;
1815        }
1816    }
1817    nsecs_t ns = mReceiver.processAudioBuffer(this);
1818    switch (ns) {
1819    case 0:
1820        return true;
1821    case NS_INACTIVE:
1822        pauseInternal();
1823        return true;
1824    case NS_NEVER:
1825        return false;
1826    case NS_WHENEVER:
1827        // FIXME increase poll interval, or make event-driven
1828        ns = 1000000000LL;
1829        // fall through
1830    default:
1831        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1832        pauseInternal(ns);
1833        return true;
1834    }
1835}
1836
1837void AudioTrack::AudioTrackThread::requestExit()
1838{
1839    // must be in this order to avoid a race condition
1840    Thread::requestExit();
1841    AutoMutex _l(mMyLock);
1842    if (mPaused || mPausedInt) {
1843        mPaused = false;
1844        mPausedInt = false;
1845        mMyCond.signal();
1846    }
1847}
1848
1849void AudioTrack::AudioTrackThread::pause()
1850{
1851    AutoMutex _l(mMyLock);
1852    mPaused = true;
1853}
1854
1855void AudioTrack::AudioTrackThread::resume()
1856{
1857    AutoMutex _l(mMyLock);
1858    if (mPaused || mPausedInt) {
1859        mPaused = false;
1860        mPausedInt = false;
1861        mMyCond.signal();
1862    }
1863}
1864
1865void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1866{
1867    AutoMutex _l(mMyLock);
1868    mPausedInt = true;
1869    mPausedNs = ns;
1870}
1871
1872}; // namespace android
1873