AudioTrack.cpp revision d054c32443a493513ab63529b0c8b1aca290278c
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo)
105    : mStatus(NO_INIT),
106      mIsTimed(false),
107      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
108      mPreviousSchedulingGroup(SP_DEFAULT)
109{
110    mStatus = set(streamType, sampleRate, format, channelMask,
111            frameCount, flags, cbf, user, notificationFrames,
112            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
113}
114
115AudioTrack::AudioTrack(
116        audio_stream_type_t streamType,
117        uint32_t sampleRate,
118        audio_format_t format,
119        audio_channel_mask_t channelMask,
120        const sp<IMemory>& sharedBuffer,
121        audio_output_flags_t flags,
122        callback_t cbf,
123        void* user,
124        int notificationFrames,
125        int sessionId,
126        transfer_type transferType,
127        const audio_offload_info_t *offloadInfo)
128    : mStatus(NO_INIT),
129      mIsTimed(false),
130      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
131      mPreviousSchedulingGroup(SP_DEFAULT)
132{
133    mStatus = set(streamType, sampleRate, format, channelMask,
134            0 /*frameCount*/, flags, cbf, user, notificationFrames,
135            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
136}
137
138AudioTrack::~AudioTrack()
139{
140    if (mStatus == NO_ERROR) {
141        // Make sure that callback function exits in the case where
142        // it is looping on buffer full condition in obtainBuffer().
143        // Otherwise the callback thread will never exit.
144        stop();
145        if (mAudioTrackThread != 0) {
146            mProxy->interrupt();
147            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
148            mAudioTrackThread->requestExitAndWait();
149            mAudioTrackThread.clear();
150        }
151        if (mAudioTrack != 0) {
152            mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
153            mAudioTrack.clear();
154        }
155        IPCThreadState::self()->flushCommands();
156        AudioSystem::releaseAudioSessionId(mSessionId);
157    }
158}
159
160status_t AudioTrack::set(
161        audio_stream_type_t streamType,
162        uint32_t sampleRate,
163        audio_format_t format,
164        audio_channel_mask_t channelMask,
165        int frameCountInt,
166        audio_output_flags_t flags,
167        callback_t cbf,
168        void* user,
169        int notificationFrames,
170        const sp<IMemory>& sharedBuffer,
171        bool threadCanCallJava,
172        int sessionId,
173        transfer_type transferType,
174        const audio_offload_info_t *offloadInfo)
175{
176    switch (transferType) {
177    case TRANSFER_DEFAULT:
178        if (sharedBuffer != 0) {
179            transferType = TRANSFER_SHARED;
180        } else if (cbf == NULL || threadCanCallJava) {
181            transferType = TRANSFER_SYNC;
182        } else {
183            transferType = TRANSFER_CALLBACK;
184        }
185        break;
186    case TRANSFER_CALLBACK:
187        if (cbf == NULL || sharedBuffer != 0) {
188            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
189            return BAD_VALUE;
190        }
191        break;
192    case TRANSFER_OBTAIN:
193    case TRANSFER_SYNC:
194        if (sharedBuffer != 0) {
195            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
196            return BAD_VALUE;
197        }
198        break;
199    case TRANSFER_SHARED:
200        if (sharedBuffer == 0) {
201            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
202            return BAD_VALUE;
203        }
204        break;
205    default:
206        ALOGE("Invalid transfer type %d", transferType);
207        return BAD_VALUE;
208    }
209    mTransfer = transferType;
210
211    // FIXME "int" here is legacy and will be replaced by size_t later
212    if (frameCountInt < 0) {
213        ALOGE("Invalid frame count %d", frameCountInt);
214        return BAD_VALUE;
215    }
216    size_t frameCount = frameCountInt;
217
218    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
219            sharedBuffer->size());
220
221    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
222
223    AutoMutex lock(mLock);
224
225    if (mAudioTrack != 0) {
226        ALOGE("Track already in use");
227        return INVALID_OPERATION;
228    }
229
230    mOutput = 0;
231
232    // handle default values first.
233    if (streamType == AUDIO_STREAM_DEFAULT) {
234        streamType = AUDIO_STREAM_MUSIC;
235    }
236
237    if (sampleRate == 0) {
238        uint32_t afSampleRate;
239        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
240            return NO_INIT;
241        }
242        sampleRate = afSampleRate;
243    }
244    mSampleRate = sampleRate;
245
246    // these below should probably come from the audioFlinger too...
247    if (format == AUDIO_FORMAT_DEFAULT) {
248        format = AUDIO_FORMAT_PCM_16_BIT;
249    }
250    if (channelMask == 0) {
251        channelMask = AUDIO_CHANNEL_OUT_STEREO;
252    }
253
254    // validate parameters
255    if (!audio_is_valid_format(format)) {
256        ALOGE("Invalid format %d", format);
257        return BAD_VALUE;
258    }
259
260    // AudioFlinger does not currently support 8-bit data in shared memory
261    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
262        ALOGE("8-bit data in shared memory is not supported");
263        return BAD_VALUE;
264    }
265
266    // force direct flag if format is not linear PCM
267    // or offload was requested
268    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
269            || !audio_is_linear_pcm(format)) {
270        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
271                    ? "Offload request, forcing to Direct Output"
272                    : "Not linear PCM, forcing to Direct Output");
273        flags = (audio_output_flags_t)
274                // FIXME why can't we allow direct AND fast?
275                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
276    }
277    // only allow deep buffering for music stream type
278    if (streamType != AUDIO_STREAM_MUSIC) {
279        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
280    }
281
282    if (!audio_is_output_channel(channelMask)) {
283        ALOGE("Invalid channel mask %#x", channelMask);
284        return BAD_VALUE;
285    }
286    mChannelMask = channelMask;
287    uint32_t channelCount = popcount(channelMask);
288    mChannelCount = channelCount;
289
290    if (audio_is_linear_pcm(format)) {
291        mFrameSize = channelCount * audio_bytes_per_sample(format);
292        mFrameSizeAF = channelCount * sizeof(int16_t);
293    } else {
294        mFrameSize = sizeof(uint8_t);
295        mFrameSizeAF = sizeof(uint8_t);
296    }
297
298    audio_io_handle_t output = AudioSystem::getOutput(
299                                    streamType,
300                                    sampleRate, format, channelMask,
301                                    flags,
302                                    offloadInfo);
303
304    if (output == 0) {
305        ALOGE("Could not get audio output for stream type %d", streamType);
306        return BAD_VALUE;
307    }
308
309    mVolume[LEFT] = 1.0f;
310    mVolume[RIGHT] = 1.0f;
311    mSendLevel = 0.0f;
312    mFrameCount = frameCount;
313    mReqFrameCount = frameCount;
314    mNotificationFramesReq = notificationFrames;
315    mNotificationFramesAct = 0;
316    mSessionId = sessionId;
317    mAuxEffectId = 0;
318    mFlags = flags;
319    mCbf = cbf;
320
321    if (cbf != NULL) {
322        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
323        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
324    }
325
326    // create the IAudioTrack
327    status_t status = createTrack_l(streamType,
328                                  sampleRate,
329                                  format,
330                                  frameCount,
331                                  flags,
332                                  sharedBuffer,
333                                  output,
334                                  0 /*epoch*/);
335
336    if (status != NO_ERROR) {
337        if (mAudioTrackThread != 0) {
338            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
339            mAudioTrackThread->requestExitAndWait();
340            mAudioTrackThread.clear();
341        }
342        //Use of direct and offloaded output streams is ref counted by audio policy manager.
343        // As getOutput was called above and resulted in an output stream to be opened,
344        // we need to release it.
345        AudioSystem::releaseOutput(output);
346        return status;
347    }
348
349    mStatus = NO_ERROR;
350    mStreamType = streamType;
351    mFormat = format;
352    mSharedBuffer = sharedBuffer;
353    mState = STATE_STOPPED;
354    mUserData = user;
355    mLoopPeriod = 0;
356    mMarkerPosition = 0;
357    mMarkerReached = false;
358    mNewPosition = 0;
359    mUpdatePeriod = 0;
360    AudioSystem::acquireAudioSessionId(mSessionId);
361    mSequence = 1;
362    mObservedSequence = mSequence;
363    mInUnderrun = false;
364    mOutput = output;
365
366    return NO_ERROR;
367}
368
369// -------------------------------------------------------------------------
370
371status_t AudioTrack::start()
372{
373    AutoMutex lock(mLock);
374
375    if (mState == STATE_ACTIVE) {
376        return INVALID_OPERATION;
377    }
378
379    mInUnderrun = true;
380
381    State previousState = mState;
382    if (previousState == STATE_PAUSED_STOPPING) {
383        mState = STATE_STOPPING;
384    } else {
385        mState = STATE_ACTIVE;
386    }
387    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
388        // reset current position as seen by client to 0
389        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
390    }
391    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
392    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
393
394    sp<AudioTrackThread> t = mAudioTrackThread;
395    if (t != 0) {
396        if (previousState == STATE_STOPPING) {
397            mProxy->interrupt();
398        } else {
399            t->resume();
400        }
401    } else {
402        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
403        get_sched_policy(0, &mPreviousSchedulingGroup);
404        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
405    }
406
407    status_t status = NO_ERROR;
408    if (!(flags & CBLK_INVALID)) {
409        status = mAudioTrack->start();
410        if (status == DEAD_OBJECT) {
411            flags |= CBLK_INVALID;
412        }
413    }
414    if (flags & CBLK_INVALID) {
415        status = restoreTrack_l("start");
416    }
417
418    if (status != NO_ERROR) {
419        ALOGE("start() status %d", status);
420        mState = previousState;
421        if (t != 0) {
422            if (previousState != STATE_STOPPING) {
423                t->pause();
424            }
425        } else {
426            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
427            set_sched_policy(0, mPreviousSchedulingGroup);
428        }
429    }
430
431    return status;
432}
433
434void AudioTrack::stop()
435{
436    AutoMutex lock(mLock);
437    // FIXME pause then stop should not be a nop
438    if (mState != STATE_ACTIVE) {
439        return;
440    }
441
442    if (isOffloaded()) {
443        mState = STATE_STOPPING;
444    } else {
445        mState = STATE_STOPPED;
446    }
447
448    mProxy->interrupt();
449    mAudioTrack->stop();
450    // the playback head position will reset to 0, so if a marker is set, we need
451    // to activate it again
452    mMarkerReached = false;
453#if 0
454    // Force flush if a shared buffer is used otherwise audioflinger
455    // will not stop before end of buffer is reached.
456    // It may be needed to make sure that we stop playback, likely in case looping is on.
457    if (mSharedBuffer != 0) {
458        flush_l();
459    }
460#endif
461
462    sp<AudioTrackThread> t = mAudioTrackThread;
463    if (t != 0) {
464        if (!isOffloaded()) {
465            t->pause();
466        }
467    } else {
468        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
469        set_sched_policy(0, mPreviousSchedulingGroup);
470    }
471}
472
473bool AudioTrack::stopped() const
474{
475    AutoMutex lock(mLock);
476    return mState != STATE_ACTIVE;
477}
478
479void AudioTrack::flush()
480{
481    if (mSharedBuffer != 0) {
482        return;
483    }
484    AutoMutex lock(mLock);
485    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
486        return;
487    }
488    flush_l();
489}
490
491void AudioTrack::flush_l()
492{
493    ALOG_ASSERT(mState != STATE_ACTIVE);
494
495    // clear playback marker and periodic update counter
496    mMarkerPosition = 0;
497    mMarkerReached = false;
498    mUpdatePeriod = 0;
499    mRefreshRemaining = true;
500
501    mState = STATE_FLUSHED;
502    if (isOffloaded()) {
503        mProxy->interrupt();
504    }
505    mProxy->flush();
506    mAudioTrack->flush();
507}
508
509void AudioTrack::pause()
510{
511    AutoMutex lock(mLock);
512    if (mState == STATE_ACTIVE) {
513        mState = STATE_PAUSED;
514    } else if (mState == STATE_STOPPING) {
515        mState = STATE_PAUSED_STOPPING;
516    } else {
517        return;
518    }
519    mProxy->interrupt();
520    mAudioTrack->pause();
521}
522
523status_t AudioTrack::setVolume(float left, float right)
524{
525    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
526        return BAD_VALUE;
527    }
528
529    AutoMutex lock(mLock);
530    mVolume[LEFT] = left;
531    mVolume[RIGHT] = right;
532
533    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
534
535    return NO_ERROR;
536}
537
538status_t AudioTrack::setVolume(float volume)
539{
540    return setVolume(volume, volume);
541}
542
543status_t AudioTrack::setAuxEffectSendLevel(float level)
544{
545    if (level < 0.0f || level > 1.0f) {
546        return BAD_VALUE;
547    }
548
549    AutoMutex lock(mLock);
550    mSendLevel = level;
551    mProxy->setSendLevel(level);
552
553    return NO_ERROR;
554}
555
556void AudioTrack::getAuxEffectSendLevel(float* level) const
557{
558    if (level != NULL) {
559        *level = mSendLevel;
560    }
561}
562
563status_t AudioTrack::setSampleRate(uint32_t rate)
564{
565    if (mIsTimed || isOffloaded()) {
566        return INVALID_OPERATION;
567    }
568
569    uint32_t afSamplingRate;
570    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
571        return NO_INIT;
572    }
573    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
574    if (rate == 0 || rate > afSamplingRate*2 ) {
575        return BAD_VALUE;
576    }
577
578    AutoMutex lock(mLock);
579    mSampleRate = rate;
580    mProxy->setSampleRate(rate);
581
582    return NO_ERROR;
583}
584
585uint32_t AudioTrack::getSampleRate() const
586{
587    if (mIsTimed) {
588        return 0;
589    }
590
591    AutoMutex lock(mLock);
592    return mSampleRate;
593}
594
595status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
596{
597    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
598        return INVALID_OPERATION;
599    }
600
601    if (loopCount == 0) {
602        ;
603    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
604            loopEnd - loopStart >= MIN_LOOP) {
605        ;
606    } else {
607        return BAD_VALUE;
608    }
609
610    AutoMutex lock(mLock);
611    // See setPosition() regarding setting parameters such as loop points or position while active
612    if (mState == STATE_ACTIVE) {
613        return INVALID_OPERATION;
614    }
615    setLoop_l(loopStart, loopEnd, loopCount);
616    return NO_ERROR;
617}
618
619void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
620{
621    // FIXME If setting a loop also sets position to start of loop, then
622    //       this is correct.  Otherwise it should be removed.
623    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
624    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
625    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
626}
627
628status_t AudioTrack::setMarkerPosition(uint32_t marker)
629{
630    // The only purpose of setting marker position is to get a callback
631    if (mCbf == NULL || isOffloaded()) {
632        return INVALID_OPERATION;
633    }
634
635    AutoMutex lock(mLock);
636    mMarkerPosition = marker;
637    mMarkerReached = false;
638
639    return NO_ERROR;
640}
641
642status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
643{
644    if (isOffloaded()) {
645        return INVALID_OPERATION;
646    }
647    if (marker == NULL) {
648        return BAD_VALUE;
649    }
650
651    AutoMutex lock(mLock);
652    *marker = mMarkerPosition;
653
654    return NO_ERROR;
655}
656
657status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
658{
659    // The only purpose of setting position update period is to get a callback
660    if (mCbf == NULL || isOffloaded()) {
661        return INVALID_OPERATION;
662    }
663
664    AutoMutex lock(mLock);
665    mNewPosition = mProxy->getPosition() + updatePeriod;
666    mUpdatePeriod = updatePeriod;
667    return NO_ERROR;
668}
669
670status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
671{
672    if (isOffloaded()) {
673        return INVALID_OPERATION;
674    }
675    if (updatePeriod == NULL) {
676        return BAD_VALUE;
677    }
678
679    AutoMutex lock(mLock);
680    *updatePeriod = mUpdatePeriod;
681
682    return NO_ERROR;
683}
684
685status_t AudioTrack::setPosition(uint32_t position)
686{
687    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
688        return INVALID_OPERATION;
689    }
690    if (position > mFrameCount) {
691        return BAD_VALUE;
692    }
693
694    AutoMutex lock(mLock);
695    // Currently we require that the player is inactive before setting parameters such as position
696    // or loop points.  Otherwise, there could be a race condition: the application could read the
697    // current position, compute a new position or loop parameters, and then set that position or
698    // loop parameters but it would do the "wrong" thing since the position has continued to advance
699    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
700    // to specify how it wants to handle such scenarios.
701    if (mState == STATE_ACTIVE) {
702        return INVALID_OPERATION;
703    }
704    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
705    mLoopPeriod = 0;
706    // FIXME Check whether loops and setting position are incompatible in old code.
707    // If we use setLoop for both purposes we lose the capability to set the position while looping.
708    mStaticProxy->setLoop(position, mFrameCount, 0);
709
710    return NO_ERROR;
711}
712
713status_t AudioTrack::getPosition(uint32_t *position) const
714{
715    if (position == NULL) {
716        return BAD_VALUE;
717    }
718
719    AutoMutex lock(mLock);
720    if (isOffloaded()) {
721        uint32_t dspFrames = 0;
722
723        if (mOutput != 0) {
724            uint32_t halFrames;
725            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
726        }
727        *position = dspFrames;
728    } else {
729        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
730        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
731                mProxy->getPosition();
732    }
733    return NO_ERROR;
734}
735
736status_t AudioTrack::getBufferPosition(size_t *position)
737{
738    if (mSharedBuffer == 0 || mIsTimed) {
739        return INVALID_OPERATION;
740    }
741    if (position == NULL) {
742        return BAD_VALUE;
743    }
744
745    AutoMutex lock(mLock);
746    *position = mStaticProxy->getBufferPosition();
747    return NO_ERROR;
748}
749
750status_t AudioTrack::reload()
751{
752    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
753        return INVALID_OPERATION;
754    }
755
756    AutoMutex lock(mLock);
757    // See setPosition() regarding setting parameters such as loop points or position while active
758    if (mState == STATE_ACTIVE) {
759        return INVALID_OPERATION;
760    }
761    mNewPosition = mUpdatePeriod;
762    mLoopPeriod = 0;
763    // FIXME The new code cannot reload while keeping a loop specified.
764    // Need to check how the old code handled this, and whether it's a significant change.
765    mStaticProxy->setLoop(0, mFrameCount, 0);
766    return NO_ERROR;
767}
768
769audio_io_handle_t AudioTrack::getOutput()
770{
771    AutoMutex lock(mLock);
772    return mOutput;
773}
774
775// must be called with mLock held
776audio_io_handle_t AudioTrack::getOutput_l()
777{
778    if (mOutput) {
779        return mOutput;
780    } else {
781        return AudioSystem::getOutput(mStreamType,
782                                      mSampleRate, mFormat, mChannelMask, mFlags);
783    }
784}
785
786status_t AudioTrack::attachAuxEffect(int effectId)
787{
788    AutoMutex lock(mLock);
789    status_t status = mAudioTrack->attachAuxEffect(effectId);
790    if (status == NO_ERROR) {
791        mAuxEffectId = effectId;
792    }
793    return status;
794}
795
796// -------------------------------------------------------------------------
797
798// must be called with mLock held
799status_t AudioTrack::createTrack_l(
800        audio_stream_type_t streamType,
801        uint32_t sampleRate,
802        audio_format_t format,
803        size_t frameCount,
804        audio_output_flags_t flags,
805        const sp<IMemory>& sharedBuffer,
806        audio_io_handle_t output,
807        size_t epoch)
808{
809    status_t status;
810    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
811    if (audioFlinger == 0) {
812        ALOGE("Could not get audioflinger");
813        return NO_INIT;
814    }
815
816    uint32_t afLatency;
817    if ((status = AudioSystem::getLatency(output, streamType, &afLatency)) != NO_ERROR) {
818        ALOGE("getLatency(%d) failed status %d", output, status);
819        return NO_INIT;
820    }
821
822    // Client decides whether the track is TIMED (see below), but can only express a preference
823    // for FAST.  Server will perform additional tests.
824    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
825            // either of these use cases:
826            // use case 1: shared buffer
827            (sharedBuffer != 0) ||
828            // use case 2: callback handler
829            (mCbf != NULL))) {
830        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
831        // once denied, do not request again if IAudioTrack is re-created
832        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
833        mFlags = flags;
834    }
835    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
836
837    mNotificationFramesAct = mNotificationFramesReq;
838
839    if (!audio_is_linear_pcm(format)) {
840
841        if (sharedBuffer != 0) {
842            // Same comment as below about ignoring frameCount parameter for set()
843            frameCount = sharedBuffer->size();
844        } else if (frameCount == 0) {
845            size_t afFrameCount;
846            status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
847            if (status != NO_ERROR) {
848                ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType,
849                        status);
850                return NO_INIT;
851            }
852            frameCount = afFrameCount;
853        }
854        if (mNotificationFramesAct != frameCount) {
855            mNotificationFramesAct = frameCount;
856        }
857    } else if (sharedBuffer != 0) {
858
859        // Ensure that buffer alignment matches channel count
860        // 8-bit data in shared memory is not currently supported by AudioFlinger
861        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
862        if (mChannelCount > 1) {
863            // More than 2 channels does not require stronger alignment than stereo
864            alignment <<= 1;
865        }
866        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
867            ALOGE("Invalid buffer alignment: address %p, channel count %u",
868                    sharedBuffer->pointer(), mChannelCount);
869            return BAD_VALUE;
870        }
871
872        // When initializing a shared buffer AudioTrack via constructors,
873        // there's no frameCount parameter.
874        // But when initializing a shared buffer AudioTrack via set(),
875        // there _is_ a frameCount parameter.  We silently ignore it.
876        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
877
878    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
879
880        // FIXME move these calculations and associated checks to server
881        uint32_t afSampleRate;
882        status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
883        if (status != NO_ERROR) {
884            ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType,
885                    status);
886            return NO_INIT;
887        }
888        size_t afFrameCount;
889        status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
890        if (status != NO_ERROR) {
891            ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
892            return NO_INIT;
893        }
894
895        // Ensure that buffer depth covers at least audio hardware latency
896        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
897        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
898                afFrameCount, minBufCount, afSampleRate, afLatency);
899        if (minBufCount <= 2) {
900            minBufCount = sampleRate == afSampleRate ? 2 : 3;
901        }
902
903        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
904        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
905                ", afLatency=%d",
906                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
907
908        if (frameCount == 0) {
909            frameCount = minFrameCount;
910        }
911        // Make sure that application is notified with sufficient margin
912        // before underrun
913        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
914            mNotificationFramesAct = frameCount/2;
915        }
916        if (frameCount < minFrameCount) {
917            // not ALOGW because it happens all the time when playing key clicks over A2DP
918            ALOGV("Minimum buffer size corrected from %d to %d",
919                     frameCount, minFrameCount);
920            frameCount = minFrameCount;
921        }
922
923    } else {
924        // For fast tracks, the frame count calculations and checks are done by server
925    }
926
927    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
928    if (mIsTimed) {
929        trackFlags |= IAudioFlinger::TRACK_TIMED;
930    }
931
932    pid_t tid = -1;
933    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
934        trackFlags |= IAudioFlinger::TRACK_FAST;
935        if (mAudioTrackThread != 0) {
936            tid = mAudioTrackThread->getTid();
937        }
938    }
939
940    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
941        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
942    }
943
944    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
945                                                      sampleRate,
946                                                      // AudioFlinger only sees 16-bit PCM
947                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
948                                                              AUDIO_FORMAT_PCM_16_BIT : format,
949                                                      mChannelMask,
950                                                      frameCount,
951                                                      &trackFlags,
952                                                      sharedBuffer,
953                                                      output,
954                                                      tid,
955                                                      &mSessionId,
956                                                      mName,
957                                                      &status);
958
959    if (track == 0) {
960        ALOGE("AudioFlinger could not create track, status: %d", status);
961        return status;
962    }
963    sp<IMemory> iMem = track->getCblk();
964    if (iMem == 0) {
965        ALOGE("Could not get control block");
966        return NO_INIT;
967    }
968    if (mAudioTrack != 0) {
969        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
970        mDeathNotifier.clear();
971    }
972    mAudioTrack = track;
973    mCblkMemory = iMem;
974    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
975    mCblk = cblk;
976    size_t temp = cblk->frameCount_;
977    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
978        // In current design, AudioTrack client checks and ensures frame count validity before
979        // passing it to AudioFlinger so AudioFlinger should not return a different value except
980        // for fast track as it uses a special method of assigning frame count.
981        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
982    }
983    frameCount = temp;
984    mAwaitBoost = false;
985    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
986        if (trackFlags & IAudioFlinger::TRACK_FAST) {
987            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
988            mAwaitBoost = true;
989            if (sharedBuffer == 0) {
990                // double-buffering is not required for fast tracks, due to tighter scheduling
991                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
992                    mNotificationFramesAct = frameCount;
993                }
994            }
995        } else {
996            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
997            // once denied, do not request again if IAudioTrack is re-created
998            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
999            mFlags = flags;
1000            if (sharedBuffer == 0) {
1001                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
1002                    mNotificationFramesAct = frameCount/2;
1003                }
1004            }
1005        }
1006    }
1007    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1008        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1009            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1010        } else {
1011            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1012            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1013            mFlags = flags;
1014            return NO_INIT;
1015        }
1016    }
1017
1018    mRefreshRemaining = true;
1019
1020    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1021    // is the value of pointer() for the shared buffer, otherwise buffers points
1022    // immediately after the control block.  This address is for the mapping within client
1023    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1024    void* buffers;
1025    if (sharedBuffer == 0) {
1026        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1027    } else {
1028        buffers = sharedBuffer->pointer();
1029    }
1030
1031    mAudioTrack->attachAuxEffect(mAuxEffectId);
1032    // FIXME don't believe this lie
1033    mLatency = afLatency + (1000*frameCount) / sampleRate;
1034    mFrameCount = frameCount;
1035    // If IAudioTrack is re-created, don't let the requested frameCount
1036    // decrease.  This can confuse clients that cache frameCount().
1037    if (frameCount > mReqFrameCount) {
1038        mReqFrameCount = frameCount;
1039    }
1040
1041    // update proxy
1042    if (sharedBuffer == 0) {
1043        mStaticProxy.clear();
1044        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1045    } else {
1046        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1047        mProxy = mStaticProxy;
1048    }
1049    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1050            uint16_t(mVolume[LEFT] * 0x1000));
1051    mProxy->setSendLevel(mSendLevel);
1052    mProxy->setSampleRate(mSampleRate);
1053    mProxy->setEpoch(epoch);
1054    mProxy->setMinimum(mNotificationFramesAct);
1055
1056    mDeathNotifier = new DeathNotifier(this);
1057    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1058
1059    return NO_ERROR;
1060}
1061
1062status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1063{
1064    if (audioBuffer == NULL) {
1065        return BAD_VALUE;
1066    }
1067    if (mTransfer != TRANSFER_OBTAIN) {
1068        audioBuffer->frameCount = 0;
1069        audioBuffer->size = 0;
1070        audioBuffer->raw = NULL;
1071        return INVALID_OPERATION;
1072    }
1073
1074    const struct timespec *requested;
1075    if (waitCount == -1) {
1076        requested = &ClientProxy::kForever;
1077    } else if (waitCount == 0) {
1078        requested = &ClientProxy::kNonBlocking;
1079    } else if (waitCount > 0) {
1080        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1081        struct timespec timeout;
1082        timeout.tv_sec = ms / 1000;
1083        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1084        requested = &timeout;
1085    } else {
1086        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1087        requested = NULL;
1088    }
1089    return obtainBuffer(audioBuffer, requested);
1090}
1091
1092status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1093        struct timespec *elapsed, size_t *nonContig)
1094{
1095    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1096    uint32_t oldSequence = 0;
1097    uint32_t newSequence;
1098
1099    Proxy::Buffer buffer;
1100    status_t status = NO_ERROR;
1101
1102    static const int32_t kMaxTries = 5;
1103    int32_t tryCounter = kMaxTries;
1104
1105    do {
1106        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1107        // keep them from going away if another thread re-creates the track during obtainBuffer()
1108        sp<AudioTrackClientProxy> proxy;
1109        sp<IMemory> iMem;
1110
1111        {   // start of lock scope
1112            AutoMutex lock(mLock);
1113
1114            newSequence = mSequence;
1115            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1116            if (status == DEAD_OBJECT) {
1117                // re-create track, unless someone else has already done so
1118                if (newSequence == oldSequence) {
1119                    status = restoreTrack_l("obtainBuffer");
1120                    if (status != NO_ERROR) {
1121                        buffer.mFrameCount = 0;
1122                        buffer.mRaw = NULL;
1123                        buffer.mNonContig = 0;
1124                        break;
1125                    }
1126                }
1127            }
1128            oldSequence = newSequence;
1129
1130            // Keep the extra references
1131            proxy = mProxy;
1132            iMem = mCblkMemory;
1133
1134            if (mState == STATE_STOPPING) {
1135                status = -EINTR;
1136                buffer.mFrameCount = 0;
1137                buffer.mRaw = NULL;
1138                buffer.mNonContig = 0;
1139                break;
1140            }
1141
1142            // Non-blocking if track is stopped or paused
1143            if (mState != STATE_ACTIVE) {
1144                requested = &ClientProxy::kNonBlocking;
1145            }
1146
1147        }   // end of lock scope
1148
1149        buffer.mFrameCount = audioBuffer->frameCount;
1150        // FIXME starts the requested timeout and elapsed over from scratch
1151        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1152
1153    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1154
1155    audioBuffer->frameCount = buffer.mFrameCount;
1156    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1157    audioBuffer->raw = buffer.mRaw;
1158    if (nonContig != NULL) {
1159        *nonContig = buffer.mNonContig;
1160    }
1161    return status;
1162}
1163
1164void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1165{
1166    if (mTransfer == TRANSFER_SHARED) {
1167        return;
1168    }
1169
1170    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1171    if (stepCount == 0) {
1172        return;
1173    }
1174
1175    Proxy::Buffer buffer;
1176    buffer.mFrameCount = stepCount;
1177    buffer.mRaw = audioBuffer->raw;
1178
1179    AutoMutex lock(mLock);
1180    mInUnderrun = false;
1181    mProxy->releaseBuffer(&buffer);
1182
1183    // restart track if it was disabled by audioflinger due to previous underrun
1184    if (mState == STATE_ACTIVE) {
1185        audio_track_cblk_t* cblk = mCblk;
1186        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1187            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1188                    this, mName.string());
1189            // FIXME ignoring status
1190            mAudioTrack->start();
1191        }
1192    }
1193}
1194
1195// -------------------------------------------------------------------------
1196
1197ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1198{
1199    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1200        return INVALID_OPERATION;
1201    }
1202
1203    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1204        // Sanity-check: user is most-likely passing an error code, and it would
1205        // make the return value ambiguous (actualSize vs error).
1206        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1207        return BAD_VALUE;
1208    }
1209
1210    size_t written = 0;
1211    Buffer audioBuffer;
1212
1213    while (userSize >= mFrameSize) {
1214        audioBuffer.frameCount = userSize / mFrameSize;
1215
1216        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1217        if (err < 0) {
1218            if (written > 0) {
1219                break;
1220            }
1221            return ssize_t(err);
1222        }
1223
1224        size_t toWrite;
1225        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1226            // Divide capacity by 2 to take expansion into account
1227            toWrite = audioBuffer.size >> 1;
1228            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1229        } else {
1230            toWrite = audioBuffer.size;
1231            memcpy(audioBuffer.i8, buffer, toWrite);
1232        }
1233        buffer = ((const char *) buffer) + toWrite;
1234        userSize -= toWrite;
1235        written += toWrite;
1236
1237        releaseBuffer(&audioBuffer);
1238    }
1239
1240    return written;
1241}
1242
1243// -------------------------------------------------------------------------
1244
1245TimedAudioTrack::TimedAudioTrack() {
1246    mIsTimed = true;
1247}
1248
1249status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1250{
1251    AutoMutex lock(mLock);
1252    status_t result = UNKNOWN_ERROR;
1253
1254#if 1
1255    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1256    // while we are accessing the cblk
1257    sp<IAudioTrack> audioTrack = mAudioTrack;
1258    sp<IMemory> iMem = mCblkMemory;
1259#endif
1260
1261    // If the track is not invalid already, try to allocate a buffer.  alloc
1262    // fails indicating that the server is dead, flag the track as invalid so
1263    // we can attempt to restore in just a bit.
1264    audio_track_cblk_t* cblk = mCblk;
1265    if (!(cblk->mFlags & CBLK_INVALID)) {
1266        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1267        if (result == DEAD_OBJECT) {
1268            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1269        }
1270    }
1271
1272    // If the track is invalid at this point, attempt to restore it. and try the
1273    // allocation one more time.
1274    if (cblk->mFlags & CBLK_INVALID) {
1275        result = restoreTrack_l("allocateTimedBuffer");
1276
1277        if (result == NO_ERROR) {
1278            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1279        }
1280    }
1281
1282    return result;
1283}
1284
1285status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1286                                           int64_t pts)
1287{
1288    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1289    {
1290        AutoMutex lock(mLock);
1291        audio_track_cblk_t* cblk = mCblk;
1292        // restart track if it was disabled by audioflinger due to previous underrun
1293        if (buffer->size() != 0 && status == NO_ERROR &&
1294                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1295            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1296            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1297            // FIXME ignoring status
1298            mAudioTrack->start();
1299        }
1300    }
1301    return status;
1302}
1303
1304status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1305                                                TargetTimeline target)
1306{
1307    return mAudioTrack->setMediaTimeTransform(xform, target);
1308}
1309
1310// -------------------------------------------------------------------------
1311
1312nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1313{
1314    // Currently the AudioTrack thread is not created if there are no callbacks.
1315    // Would it ever make sense to run the thread, even without callbacks?
1316    // If so, then replace this by checks at each use for mCbf != NULL.
1317    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1318
1319    mLock.lock();
1320    if (mAwaitBoost) {
1321        mAwaitBoost = false;
1322        mLock.unlock();
1323        static const int32_t kMaxTries = 5;
1324        int32_t tryCounter = kMaxTries;
1325        uint32_t pollUs = 10000;
1326        do {
1327            int policy = sched_getscheduler(0);
1328            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1329                break;
1330            }
1331            usleep(pollUs);
1332            pollUs <<= 1;
1333        } while (tryCounter-- > 0);
1334        if (tryCounter < 0) {
1335            ALOGE("did not receive expected priority boost on time");
1336        }
1337        // Run again immediately
1338        return 0;
1339    }
1340
1341    // Can only reference mCblk while locked
1342    int32_t flags = android_atomic_and(
1343        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1344
1345    // Check for track invalidation
1346    if (flags & CBLK_INVALID) {
1347        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1348        // AudioSystem cache. We should not exit here but after calling the callback so
1349        // that the upper layers can recreate the track
1350        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1351            status_t status = restoreTrack_l("processAudioBuffer");
1352            mLock.unlock();
1353            // Run again immediately, but with a new IAudioTrack
1354            return 0;
1355        }
1356    }
1357
1358    bool waitStreamEnd = mState == STATE_STOPPING;
1359    bool active = mState == STATE_ACTIVE;
1360
1361    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1362    bool newUnderrun = false;
1363    if (flags & CBLK_UNDERRUN) {
1364#if 0
1365        // Currently in shared buffer mode, when the server reaches the end of buffer,
1366        // the track stays active in continuous underrun state.  It's up to the application
1367        // to pause or stop the track, or set the position to a new offset within buffer.
1368        // This was some experimental code to auto-pause on underrun.   Keeping it here
1369        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1370        if (mTransfer == TRANSFER_SHARED) {
1371            mState = STATE_PAUSED;
1372            active = false;
1373        }
1374#endif
1375        if (!mInUnderrun) {
1376            mInUnderrun = true;
1377            newUnderrun = true;
1378        }
1379    }
1380
1381    // Get current position of server
1382    size_t position = mProxy->getPosition();
1383
1384    // Manage marker callback
1385    bool markerReached = false;
1386    size_t markerPosition = mMarkerPosition;
1387    // FIXME fails for wraparound, need 64 bits
1388    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1389        mMarkerReached = markerReached = true;
1390    }
1391
1392    // Determine number of new position callback(s) that will be needed, while locked
1393    size_t newPosCount = 0;
1394    size_t newPosition = mNewPosition;
1395    size_t updatePeriod = mUpdatePeriod;
1396    // FIXME fails for wraparound, need 64 bits
1397    if (updatePeriod > 0 && position >= newPosition) {
1398        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1399        mNewPosition += updatePeriod * newPosCount;
1400    }
1401
1402    // Cache other fields that will be needed soon
1403    uint32_t loopPeriod = mLoopPeriod;
1404    uint32_t sampleRate = mSampleRate;
1405    size_t notificationFrames = mNotificationFramesAct;
1406    if (mRefreshRemaining) {
1407        mRefreshRemaining = false;
1408        mRemainingFrames = notificationFrames;
1409        mRetryOnPartialBuffer = false;
1410    }
1411    size_t misalignment = mProxy->getMisalignment();
1412    uint32_t sequence = mSequence;
1413
1414    // These fields don't need to be cached, because they are assigned only by set():
1415    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1416    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1417
1418    mLock.unlock();
1419
1420    if (waitStreamEnd) {
1421        AutoMutex lock(mLock);
1422
1423        sp<AudioTrackClientProxy> proxy = mProxy;
1424        sp<IMemory> iMem = mCblkMemory;
1425
1426        struct timespec timeout;
1427        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1428        timeout.tv_nsec = 0;
1429
1430        mLock.unlock();
1431        status_t status = mProxy->waitStreamEndDone(&timeout);
1432        mLock.lock();
1433        switch (status) {
1434        case NO_ERROR:
1435        case DEAD_OBJECT:
1436        case TIMED_OUT:
1437            mLock.unlock();
1438            mCbf(EVENT_STREAM_END, mUserData, NULL);
1439            mLock.lock();
1440            if (mState == STATE_STOPPING) {
1441                mState = STATE_STOPPED;
1442                if (status != DEAD_OBJECT) {
1443                   return NS_INACTIVE;
1444                }
1445            }
1446            return 0;
1447        default:
1448            return 0;
1449        }
1450    }
1451
1452    // perform callbacks while unlocked
1453    if (newUnderrun) {
1454        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1455    }
1456    // FIXME we will miss loops if loop cycle was signaled several times since last call
1457    //       to processAudioBuffer()
1458    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1459        mCbf(EVENT_LOOP_END, mUserData, NULL);
1460    }
1461    if (flags & CBLK_BUFFER_END) {
1462        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1463    }
1464    if (markerReached) {
1465        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1466    }
1467    while (newPosCount > 0) {
1468        size_t temp = newPosition;
1469        mCbf(EVENT_NEW_POS, mUserData, &temp);
1470        newPosition += updatePeriod;
1471        newPosCount--;
1472    }
1473
1474    if (mObservedSequence != sequence) {
1475        mObservedSequence = sequence;
1476        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1477        // for offloaded tracks, just wait for the upper layers to recreate the track
1478        if (isOffloaded()) {
1479            return NS_INACTIVE;
1480        }
1481    }
1482
1483    // if inactive, then don't run me again until re-started
1484    if (!active) {
1485        return NS_INACTIVE;
1486    }
1487
1488    // Compute the estimated time until the next timed event (position, markers, loops)
1489    // FIXME only for non-compressed audio
1490    uint32_t minFrames = ~0;
1491    if (!markerReached && position < markerPosition) {
1492        minFrames = markerPosition - position;
1493    }
1494    if (loopPeriod > 0 && loopPeriod < minFrames) {
1495        minFrames = loopPeriod;
1496    }
1497    if (updatePeriod > 0 && updatePeriod < minFrames) {
1498        minFrames = updatePeriod;
1499    }
1500
1501    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1502    static const uint32_t kPoll = 0;
1503    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1504        minFrames = kPoll * notificationFrames;
1505    }
1506
1507    // Convert frame units to time units
1508    nsecs_t ns = NS_WHENEVER;
1509    if (minFrames != (uint32_t) ~0) {
1510        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1511        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1512        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1513    }
1514
1515    // If not supplying data by EVENT_MORE_DATA, then we're done
1516    if (mTransfer != TRANSFER_CALLBACK) {
1517        return ns;
1518    }
1519
1520    struct timespec timeout;
1521    const struct timespec *requested = &ClientProxy::kForever;
1522    if (ns != NS_WHENEVER) {
1523        timeout.tv_sec = ns / 1000000000LL;
1524        timeout.tv_nsec = ns % 1000000000LL;
1525        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1526        requested = &timeout;
1527    }
1528
1529    while (mRemainingFrames > 0) {
1530
1531        Buffer audioBuffer;
1532        audioBuffer.frameCount = mRemainingFrames;
1533        size_t nonContig;
1534        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1535        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1536                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1537        requested = &ClientProxy::kNonBlocking;
1538        size_t avail = audioBuffer.frameCount + nonContig;
1539        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1540                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1541        if (err != NO_ERROR) {
1542            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1543                    (isOffloaded() && (err == DEAD_OBJECT))) {
1544                return 0;
1545            }
1546            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1547            return NS_NEVER;
1548        }
1549
1550        if (mRetryOnPartialBuffer) {
1551            mRetryOnPartialBuffer = false;
1552            if (avail < mRemainingFrames) {
1553                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1554                if (ns < 0 || myns < ns) {
1555                    ns = myns;
1556                }
1557                return ns;
1558            }
1559        }
1560
1561        // Divide buffer size by 2 to take into account the expansion
1562        // due to 8 to 16 bit conversion: the callback must fill only half
1563        // of the destination buffer
1564        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1565            audioBuffer.size >>= 1;
1566        }
1567
1568        size_t reqSize = audioBuffer.size;
1569        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1570        size_t writtenSize = audioBuffer.size;
1571        size_t writtenFrames = writtenSize / mFrameSize;
1572
1573        // Sanity check on returned size
1574        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1575            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1576                    reqSize, (int) writtenSize);
1577            return NS_NEVER;
1578        }
1579
1580        if (writtenSize == 0) {
1581            // The callback is done filling buffers
1582            // Keep this thread going to handle timed events and
1583            // still try to get more data in intervals of WAIT_PERIOD_MS
1584            // but don't just loop and block the CPU, so wait
1585            return WAIT_PERIOD_MS * 1000000LL;
1586        }
1587
1588        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1589            // 8 to 16 bit conversion, note that source and destination are the same address
1590            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1591            audioBuffer.size <<= 1;
1592        }
1593
1594        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1595        audioBuffer.frameCount = releasedFrames;
1596        mRemainingFrames -= releasedFrames;
1597        if (misalignment >= releasedFrames) {
1598            misalignment -= releasedFrames;
1599        } else {
1600            misalignment = 0;
1601        }
1602
1603        releaseBuffer(&audioBuffer);
1604
1605        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1606        // if callback doesn't like to accept the full chunk
1607        if (writtenSize < reqSize) {
1608            continue;
1609        }
1610
1611        // There could be enough non-contiguous frames available to satisfy the remaining request
1612        if (mRemainingFrames <= nonContig) {
1613            continue;
1614        }
1615
1616#if 0
1617        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1618        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1619        // that total to a sum == notificationFrames.
1620        if (0 < misalignment && misalignment <= mRemainingFrames) {
1621            mRemainingFrames = misalignment;
1622            return (mRemainingFrames * 1100000000LL) / sampleRate;
1623        }
1624#endif
1625
1626    }
1627    mRemainingFrames = notificationFrames;
1628    mRetryOnPartialBuffer = true;
1629
1630    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1631    return 0;
1632}
1633
1634status_t AudioTrack::restoreTrack_l(const char *from)
1635{
1636    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1637          isOffloaded() ? "Offloaded" : "PCM", from);
1638    ++mSequence;
1639    status_t result;
1640
1641    // refresh the audio configuration cache in this process to make sure we get new
1642    // output parameters in getOutput_l() and createTrack_l()
1643    AudioSystem::clearAudioConfigCache();
1644
1645    if (isOffloaded()) {
1646        return DEAD_OBJECT;
1647    }
1648
1649    // force new output query from audio policy manager;
1650    mOutput = 0;
1651    audio_io_handle_t output = getOutput_l();
1652
1653    // if the new IAudioTrack is created, createTrack_l() will modify the
1654    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1655    // It will also delete the strong references on previous IAudioTrack and IMemory
1656    size_t position = mProxy->getPosition();
1657    mNewPosition = position + mUpdatePeriod;
1658    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1659    result = createTrack_l(mStreamType,
1660                           mSampleRate,
1661                           mFormat,
1662                           mReqFrameCount,  // so that frame count never goes down
1663                           mFlags,
1664                           mSharedBuffer,
1665                           output,
1666                           position /*epoch*/);
1667
1668    if (result == NO_ERROR) {
1669        // continue playback from last known position, but
1670        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1671        if (mStaticProxy != NULL) {
1672            mLoopPeriod = 0;
1673            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1674        }
1675        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1676        //       track destruction have been played? This is critical for SoundPool implementation
1677        //       This must be broken, and needs to be tested/debugged.
1678#if 0
1679        // restore write index and set other indexes to reflect empty buffer status
1680        if (!strcmp(from, "start")) {
1681            // Make sure that a client relying on callback events indicating underrun or
1682            // the actual amount of audio frames played (e.g SoundPool) receives them.
1683            if (mSharedBuffer == 0) {
1684                // restart playback even if buffer is not completely filled.
1685                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1686            }
1687        }
1688#endif
1689        if (mState == STATE_ACTIVE) {
1690            result = mAudioTrack->start();
1691        }
1692    }
1693    if (result != NO_ERROR) {
1694        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1695        // As getOutput was called above and resulted in an output stream to be opened,
1696        // we need to release it.
1697        AudioSystem::releaseOutput(output);
1698        ALOGW("restoreTrack_l() failed status %d", result);
1699        mState = STATE_STOPPED;
1700    }
1701
1702    return result;
1703}
1704
1705status_t AudioTrack::setParameters(const String8& keyValuePairs)
1706{
1707    AutoMutex lock(mLock);
1708    if (mAudioTrack != 0) {
1709        return mAudioTrack->setParameters(keyValuePairs);
1710    } else {
1711        return NO_INIT;
1712    }
1713}
1714
1715String8 AudioTrack::getParameters(const String8& keys)
1716{
1717    if (mOutput) {
1718        return AudioSystem::getParameters(mOutput, keys);
1719    } else {
1720        return String8::empty();
1721    }
1722}
1723
1724status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1725{
1726
1727    const size_t SIZE = 256;
1728    char buffer[SIZE];
1729    String8 result;
1730
1731    result.append(" AudioTrack::dump\n");
1732    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1733            mVolume[0], mVolume[1]);
1734    result.append(buffer);
1735    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1736            mChannelCount, mFrameCount);
1737    result.append(buffer);
1738    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1739    result.append(buffer);
1740    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1741    result.append(buffer);
1742    ::write(fd, result.string(), result.size());
1743    return NO_ERROR;
1744}
1745
1746uint32_t AudioTrack::getUnderrunFrames() const
1747{
1748    AutoMutex lock(mLock);
1749    return mProxy->getUnderrunFrames();
1750}
1751
1752// =========================================================================
1753
1754void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1755{
1756    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1757    if (audioTrack != 0) {
1758        AutoMutex lock(audioTrack->mLock);
1759        audioTrack->mProxy->binderDied();
1760    }
1761}
1762
1763// =========================================================================
1764
1765AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1766    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
1767{
1768}
1769
1770AudioTrack::AudioTrackThread::~AudioTrackThread()
1771{
1772}
1773
1774bool AudioTrack::AudioTrackThread::threadLoop()
1775{
1776    {
1777        AutoMutex _l(mMyLock);
1778        if (mPaused) {
1779            mMyCond.wait(mMyLock);
1780            // caller will check for exitPending()
1781            return true;
1782        }
1783    }
1784    nsecs_t ns = mReceiver.processAudioBuffer(this);
1785    switch (ns) {
1786    case 0:
1787        return true;
1788    case NS_WHENEVER:
1789        sleep(1);
1790        return true;
1791    case NS_INACTIVE:
1792        pauseConditional();
1793        return true;
1794    case NS_NEVER:
1795        return false;
1796    default:
1797        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1798        struct timespec req;
1799        req.tv_sec = ns / 1000000000LL;
1800        req.tv_nsec = ns % 1000000000LL;
1801        nanosleep(&req, NULL /*rem*/);
1802        return true;
1803    }
1804}
1805
1806void AudioTrack::AudioTrackThread::requestExit()
1807{
1808    // must be in this order to avoid a race condition
1809    Thread::requestExit();
1810    resume();
1811}
1812
1813void AudioTrack::AudioTrackThread::pause()
1814{
1815    AutoMutex _l(mMyLock);
1816    mPaused = true;
1817    mResumeLatch = false;
1818}
1819
1820void AudioTrack::AudioTrackThread::pauseConditional()
1821{
1822    AutoMutex _l(mMyLock);
1823    if (mResumeLatch) {
1824        mResumeLatch = false;
1825    } else {
1826        mPaused = true;
1827    }
1828}
1829
1830void AudioTrack::AudioTrackThread::resume()
1831{
1832    AutoMutex _l(mMyLock);
1833    if (mPaused) {
1834        mPaused = false;
1835        mResumeLatch = false;
1836        mMyCond.signal();
1837    } else {
1838        mResumeLatch = true;
1839    }
1840}
1841
1842}; // namespace android
1843