AudioTrack.cpp revision dd5f4c8c4059f890e81b28b026a688febb4e1dd9
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // FIXME merge with similar code in createTrack_l(), except we're missing
48    //       some information here that is available in createTrack_l():
49    //          audio_io_handle_t output
50    //          audio_format_t format
51    //          audio_channel_mask_t channelMask
52    //          audio_output_flags_t flags
53    uint32_t afSampleRate;
54    status_t status;
55    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
56    if (status != NO_ERROR) {
57        return status;
58    }
59    size_t afFrameCount;
60    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
61    if (status != NO_ERROR) {
62        return status;
63    }
64    uint32_t afLatency;
65    status = AudioSystem::getOutputLatency(&afLatency, streamType);
66    if (status != NO_ERROR) {
67        return status;
68    }
69
70    // Ensure that buffer depth covers at least audio hardware latency
71    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
72    if (minBufCount < 2) {
73        minBufCount = 2;
74    }
75
76    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
77            afFrameCount * minBufCount * sampleRate / afSampleRate;
78    // The formula above should always produce a non-zero value, but return an error
79    // in the unlikely event that it does not, as that's part of the API contract.
80    if (*frameCount == 0) {
81        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
82                streamType, sampleRate);
83        return BAD_VALUE;
84    }
85    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
86            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
87    return NO_ERROR;
88}
89
90// ---------------------------------------------------------------------------
91
92AudioTrack::AudioTrack()
93    : mStatus(NO_INIT),
94      mIsTimed(false),
95      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
96      mPreviousSchedulingGroup(SP_DEFAULT)
97{
98}
99
100AudioTrack::AudioTrack(
101        audio_stream_type_t streamType,
102        uint32_t sampleRate,
103        audio_format_t format,
104        audio_channel_mask_t channelMask,
105        int frameCount,
106        audio_output_flags_t flags,
107        callback_t cbf,
108        void* user,
109        int notificationFrames,
110        int sessionId,
111        transfer_type transferType,
112        const audio_offload_info_t *offloadInfo,
113        int uid)
114    : mStatus(NO_INIT),
115      mIsTimed(false),
116      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
117      mPreviousSchedulingGroup(SP_DEFAULT)
118{
119    mStatus = set(streamType, sampleRate, format, channelMask,
120            frameCount, flags, cbf, user, notificationFrames,
121            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
122            offloadInfo, uid);
123}
124
125AudioTrack::AudioTrack(
126        audio_stream_type_t streamType,
127        uint32_t sampleRate,
128        audio_format_t format,
129        audio_channel_mask_t channelMask,
130        const sp<IMemory>& sharedBuffer,
131        audio_output_flags_t flags,
132        callback_t cbf,
133        void* user,
134        int notificationFrames,
135        int sessionId,
136        transfer_type transferType,
137        const audio_offload_info_t *offloadInfo,
138        int uid)
139    : mStatus(NO_INIT),
140      mIsTimed(false),
141      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
142      mPreviousSchedulingGroup(SP_DEFAULT)
143{
144    mStatus = set(streamType, sampleRate, format, channelMask,
145            0 /*frameCount*/, flags, cbf, user, notificationFrames,
146            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
147}
148
149AudioTrack::~AudioTrack()
150{
151    if (mStatus == NO_ERROR) {
152        // Make sure that callback function exits in the case where
153        // it is looping on buffer full condition in obtainBuffer().
154        // Otherwise the callback thread will never exit.
155        stop();
156        if (mAudioTrackThread != 0) {
157            mProxy->interrupt();
158            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
159            mAudioTrackThread->requestExitAndWait();
160            mAudioTrackThread.clear();
161        }
162        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
163        mAudioTrack.clear();
164        IPCThreadState::self()->flushCommands();
165        AudioSystem::releaseAudioSessionId(mSessionId);
166    }
167}
168
169status_t AudioTrack::set(
170        audio_stream_type_t streamType,
171        uint32_t sampleRate,
172        audio_format_t format,
173        audio_channel_mask_t channelMask,
174        int frameCountInt,
175        audio_output_flags_t flags,
176        callback_t cbf,
177        void* user,
178        int notificationFrames,
179        const sp<IMemory>& sharedBuffer,
180        bool threadCanCallJava,
181        int sessionId,
182        transfer_type transferType,
183        const audio_offload_info_t *offloadInfo,
184        int uid)
185{
186    switch (transferType) {
187    case TRANSFER_DEFAULT:
188        if (sharedBuffer != 0) {
189            transferType = TRANSFER_SHARED;
190        } else if (cbf == NULL || threadCanCallJava) {
191            transferType = TRANSFER_SYNC;
192        } else {
193            transferType = TRANSFER_CALLBACK;
194        }
195        break;
196    case TRANSFER_CALLBACK:
197        if (cbf == NULL || sharedBuffer != 0) {
198            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
199            return BAD_VALUE;
200        }
201        break;
202    case TRANSFER_OBTAIN:
203    case TRANSFER_SYNC:
204        if (sharedBuffer != 0) {
205            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
206            return BAD_VALUE;
207        }
208        break;
209    case TRANSFER_SHARED:
210        if (sharedBuffer == 0) {
211            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
212            return BAD_VALUE;
213        }
214        break;
215    default:
216        ALOGE("Invalid transfer type %d", transferType);
217        return BAD_VALUE;
218    }
219    mSharedBuffer = sharedBuffer;
220    mTransfer = transferType;
221
222    // FIXME "int" here is legacy and will be replaced by size_t later
223    if (frameCountInt < 0) {
224        ALOGE("Invalid frame count %d", frameCountInt);
225        return BAD_VALUE;
226    }
227    size_t frameCount = frameCountInt;
228
229    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
230            sharedBuffer->size());
231
232    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
233
234    AutoMutex lock(mLock);
235
236    // invariant that mAudioTrack != 0 is true only after set() returns successfully
237    if (mAudioTrack != 0) {
238        ALOGE("Track already in use");
239        return INVALID_OPERATION;
240    }
241
242    mOutput = 0;
243
244    // handle default values first.
245    if (streamType == AUDIO_STREAM_DEFAULT) {
246        streamType = AUDIO_STREAM_MUSIC;
247    }
248    if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
249        ALOGE("Invalid stream type %d", streamType);
250        return BAD_VALUE;
251    }
252    mStreamType = streamType;
253
254    status_t status;
255    if (sampleRate == 0) {
256        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
257        if (status != NO_ERROR) {
258            ALOGE("Could not get output sample rate for stream type %d; status %d",
259                    streamType, status);
260            return status;
261        }
262    }
263    mSampleRate = sampleRate;
264
265    // these below should probably come from the audioFlinger too...
266    if (format == AUDIO_FORMAT_DEFAULT) {
267        format = AUDIO_FORMAT_PCM_16_BIT;
268    }
269
270    // validate parameters
271    if (!audio_is_valid_format(format)) {
272        ALOGE("Invalid format %d", format);
273        return BAD_VALUE;
274    }
275    mFormat = format;
276
277    if (!audio_is_output_channel(channelMask)) {
278        ALOGE("Invalid channel mask %#x", channelMask);
279        return BAD_VALUE;
280    }
281
282    // AudioFlinger does not currently support 8-bit data in shared memory
283    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
284        ALOGE("8-bit data in shared memory is not supported");
285        return BAD_VALUE;
286    }
287
288    // force direct flag if format is not linear PCM
289    // or offload was requested
290    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
291            || !audio_is_linear_pcm(format)) {
292        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
293                    ? "Offload request, forcing to Direct Output"
294                    : "Not linear PCM, forcing to Direct Output");
295        flags = (audio_output_flags_t)
296                // FIXME why can't we allow direct AND fast?
297                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
298    }
299    // only allow deep buffering for music stream type
300    if (streamType != AUDIO_STREAM_MUSIC) {
301        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
302    }
303
304    mChannelMask = channelMask;
305    uint32_t channelCount = popcount(channelMask);
306    mChannelCount = channelCount;
307
308    if (audio_is_linear_pcm(format)) {
309        mFrameSize = channelCount * audio_bytes_per_sample(format);
310        mFrameSizeAF = channelCount * sizeof(int16_t);
311    } else {
312        mFrameSize = sizeof(uint8_t);
313        mFrameSizeAF = sizeof(uint8_t);
314    }
315
316    audio_io_handle_t output = AudioSystem::getOutput(
317                                    streamType,
318                                    sampleRate, format, channelMask,
319                                    flags,
320                                    offloadInfo);
321
322    if (output == 0) {
323        ALOGE("Could not get audio output for stream type %d", streamType);
324        return BAD_VALUE;
325    }
326
327    mVolume[LEFT] = 1.0f;
328    mVolume[RIGHT] = 1.0f;
329    mSendLevel = 0.0f;
330    // mFrameCount is initialized in createTrack_l
331    mReqFrameCount = frameCount;
332    mNotificationFramesReq = notificationFrames;
333    mNotificationFramesAct = 0;
334    mSessionId = sessionId;
335    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
336        mClientUid = IPCThreadState::self()->getCallingUid();
337    } else {
338        mClientUid = uid;
339    }
340    mAuxEffectId = 0;
341    mFlags = flags;
342    mCbf = cbf;
343
344    if (cbf != NULL) {
345        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
346        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
347    }
348
349    // create the IAudioTrack
350    status = createTrack_l(streamType,
351                                  sampleRate,
352                                  format,
353                                  frameCount,
354                                  flags,
355                                  sharedBuffer,
356                                  output,
357                                  0 /*epoch*/);
358
359    if (status != NO_ERROR) {
360        if (mAudioTrackThread != 0) {
361            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
362            mAudioTrackThread->requestExitAndWait();
363            mAudioTrackThread.clear();
364        }
365        //Use of direct and offloaded output streams is ref counted by audio policy manager.
366        // As getOutput was called above and resulted in an output stream to be opened,
367        // we need to release it.
368        AudioSystem::releaseOutput(output);
369        return status;
370    }
371
372    mStatus = NO_ERROR;
373    mState = STATE_STOPPED;
374    mUserData = user;
375    mLoopPeriod = 0;
376    mMarkerPosition = 0;
377    mMarkerReached = false;
378    mNewPosition = 0;
379    mUpdatePeriod = 0;
380    AudioSystem::acquireAudioSessionId(mSessionId);
381    mSequence = 1;
382    mObservedSequence = mSequence;
383    mInUnderrun = false;
384    mOutput = output;
385
386    return NO_ERROR;
387}
388
389// -------------------------------------------------------------------------
390
391status_t AudioTrack::start()
392{
393    AutoMutex lock(mLock);
394
395    if (mState == STATE_ACTIVE) {
396        return INVALID_OPERATION;
397    }
398
399    mInUnderrun = true;
400
401    State previousState = mState;
402    if (previousState == STATE_PAUSED_STOPPING) {
403        mState = STATE_STOPPING;
404    } else {
405        mState = STATE_ACTIVE;
406    }
407    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
408        // reset current position as seen by client to 0
409        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
410        // force refresh of remaining frames by processAudioBuffer() as last
411        // write before stop could be partial.
412        mRefreshRemaining = true;
413    }
414    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
415    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
416
417    sp<AudioTrackThread> t = mAudioTrackThread;
418    if (t != 0) {
419        if (previousState == STATE_STOPPING) {
420            mProxy->interrupt();
421        } else {
422            t->resume();
423        }
424    } else {
425        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
426        get_sched_policy(0, &mPreviousSchedulingGroup);
427        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
428    }
429
430    status_t status = NO_ERROR;
431    if (!(flags & CBLK_INVALID)) {
432        status = mAudioTrack->start();
433        if (status == DEAD_OBJECT) {
434            flags |= CBLK_INVALID;
435        }
436    }
437    if (flags & CBLK_INVALID) {
438        status = restoreTrack_l("start");
439    }
440
441    if (status != NO_ERROR) {
442        ALOGE("start() status %d", status);
443        mState = previousState;
444        if (t != 0) {
445            if (previousState != STATE_STOPPING) {
446                t->pause();
447            }
448        } else {
449            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
450            set_sched_policy(0, mPreviousSchedulingGroup);
451        }
452    }
453
454    return status;
455}
456
457void AudioTrack::stop()
458{
459    AutoMutex lock(mLock);
460    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
461        return;
462    }
463
464    if (isOffloaded_l()) {
465        mState = STATE_STOPPING;
466    } else {
467        mState = STATE_STOPPED;
468    }
469
470    mProxy->interrupt();
471    mAudioTrack->stop();
472    // the playback head position will reset to 0, so if a marker is set, we need
473    // to activate it again
474    mMarkerReached = false;
475#if 0
476    // Force flush if a shared buffer is used otherwise audioflinger
477    // will not stop before end of buffer is reached.
478    // It may be needed to make sure that we stop playback, likely in case looping is on.
479    if (mSharedBuffer != 0) {
480        flush_l();
481    }
482#endif
483
484    sp<AudioTrackThread> t = mAudioTrackThread;
485    if (t != 0) {
486        if (!isOffloaded_l()) {
487            t->pause();
488        }
489    } else {
490        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
491        set_sched_policy(0, mPreviousSchedulingGroup);
492    }
493}
494
495bool AudioTrack::stopped() const
496{
497    AutoMutex lock(mLock);
498    return mState != STATE_ACTIVE;
499}
500
501void AudioTrack::flush()
502{
503    if (mSharedBuffer != 0) {
504        return;
505    }
506    AutoMutex lock(mLock);
507    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
508        return;
509    }
510    flush_l();
511}
512
513void AudioTrack::flush_l()
514{
515    ALOG_ASSERT(mState != STATE_ACTIVE);
516
517    // clear playback marker and periodic update counter
518    mMarkerPosition = 0;
519    mMarkerReached = false;
520    mUpdatePeriod = 0;
521    mRefreshRemaining = true;
522
523    mState = STATE_FLUSHED;
524    if (isOffloaded_l()) {
525        mProxy->interrupt();
526    }
527    mProxy->flush();
528    mAudioTrack->flush();
529}
530
531void AudioTrack::pause()
532{
533    AutoMutex lock(mLock);
534    if (mState == STATE_ACTIVE) {
535        mState = STATE_PAUSED;
536    } else if (mState == STATE_STOPPING) {
537        mState = STATE_PAUSED_STOPPING;
538    } else {
539        return;
540    }
541    mProxy->interrupt();
542    mAudioTrack->pause();
543}
544
545status_t AudioTrack::setVolume(float left, float right)
546{
547    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
548        return BAD_VALUE;
549    }
550
551    AutoMutex lock(mLock);
552    mVolume[LEFT] = left;
553    mVolume[RIGHT] = right;
554
555    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
556
557    if (isOffloaded_l()) {
558        mAudioTrack->signal();
559    }
560    return NO_ERROR;
561}
562
563status_t AudioTrack::setVolume(float volume)
564{
565    return setVolume(volume, volume);
566}
567
568status_t AudioTrack::setAuxEffectSendLevel(float level)
569{
570    if (level < 0.0f || level > 1.0f) {
571        return BAD_VALUE;
572    }
573
574    AutoMutex lock(mLock);
575    mSendLevel = level;
576    mProxy->setSendLevel(level);
577
578    return NO_ERROR;
579}
580
581void AudioTrack::getAuxEffectSendLevel(float* level) const
582{
583    if (level != NULL) {
584        *level = mSendLevel;
585    }
586}
587
588status_t AudioTrack::setSampleRate(uint32_t rate)
589{
590    if (mIsTimed || isOffloaded()) {
591        return INVALID_OPERATION;
592    }
593
594    uint32_t afSamplingRate;
595    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
596        return NO_INIT;
597    }
598    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
599    if (rate == 0 || rate > afSamplingRate*2 ) {
600        return BAD_VALUE;
601    }
602
603    AutoMutex lock(mLock);
604    mSampleRate = rate;
605    mProxy->setSampleRate(rate);
606
607    return NO_ERROR;
608}
609
610uint32_t AudioTrack::getSampleRate() const
611{
612    if (mIsTimed) {
613        return 0;
614    }
615
616    AutoMutex lock(mLock);
617
618    // sample rate can be updated during playback by the offloaded decoder so we need to
619    // query the HAL and update if needed.
620// FIXME use Proxy return channel to update the rate from server and avoid polling here
621    if (isOffloaded_l()) {
622        if (mOutput != 0) {
623            uint32_t sampleRate = 0;
624            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
625            if (status == NO_ERROR) {
626                mSampleRate = sampleRate;
627            }
628        }
629    }
630    return mSampleRate;
631}
632
633status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
634{
635    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
636        return INVALID_OPERATION;
637    }
638
639    if (loopCount == 0) {
640        ;
641    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
642            loopEnd - loopStart >= MIN_LOOP) {
643        ;
644    } else {
645        return BAD_VALUE;
646    }
647
648    AutoMutex lock(mLock);
649    // See setPosition() regarding setting parameters such as loop points or position while active
650    if (mState == STATE_ACTIVE) {
651        return INVALID_OPERATION;
652    }
653    setLoop_l(loopStart, loopEnd, loopCount);
654    return NO_ERROR;
655}
656
657void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
658{
659    // FIXME If setting a loop also sets position to start of loop, then
660    //       this is correct.  Otherwise it should be removed.
661    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
662    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
663    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
664}
665
666status_t AudioTrack::setMarkerPosition(uint32_t marker)
667{
668    // The only purpose of setting marker position is to get a callback
669    if (mCbf == NULL || isOffloaded()) {
670        return INVALID_OPERATION;
671    }
672
673    AutoMutex lock(mLock);
674    mMarkerPosition = marker;
675    mMarkerReached = false;
676
677    return NO_ERROR;
678}
679
680status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
681{
682    if (isOffloaded()) {
683        return INVALID_OPERATION;
684    }
685    if (marker == NULL) {
686        return BAD_VALUE;
687    }
688
689    AutoMutex lock(mLock);
690    *marker = mMarkerPosition;
691
692    return NO_ERROR;
693}
694
695status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
696{
697    // The only purpose of setting position update period is to get a callback
698    if (mCbf == NULL || isOffloaded()) {
699        return INVALID_OPERATION;
700    }
701
702    AutoMutex lock(mLock);
703    mNewPosition = mProxy->getPosition() + updatePeriod;
704    mUpdatePeriod = updatePeriod;
705    return NO_ERROR;
706}
707
708status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
709{
710    if (isOffloaded()) {
711        return INVALID_OPERATION;
712    }
713    if (updatePeriod == NULL) {
714        return BAD_VALUE;
715    }
716
717    AutoMutex lock(mLock);
718    *updatePeriod = mUpdatePeriod;
719
720    return NO_ERROR;
721}
722
723status_t AudioTrack::setPosition(uint32_t position)
724{
725    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
726        return INVALID_OPERATION;
727    }
728    if (position > mFrameCount) {
729        return BAD_VALUE;
730    }
731
732    AutoMutex lock(mLock);
733    // Currently we require that the player is inactive before setting parameters such as position
734    // or loop points.  Otherwise, there could be a race condition: the application could read the
735    // current position, compute a new position or loop parameters, and then set that position or
736    // loop parameters but it would do the "wrong" thing since the position has continued to advance
737    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
738    // to specify how it wants to handle such scenarios.
739    if (mState == STATE_ACTIVE) {
740        return INVALID_OPERATION;
741    }
742    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
743    mLoopPeriod = 0;
744    // FIXME Check whether loops and setting position are incompatible in old code.
745    // If we use setLoop for both purposes we lose the capability to set the position while looping.
746    mStaticProxy->setLoop(position, mFrameCount, 0);
747
748    return NO_ERROR;
749}
750
751status_t AudioTrack::getPosition(uint32_t *position) const
752{
753    if (position == NULL) {
754        return BAD_VALUE;
755    }
756
757    AutoMutex lock(mLock);
758    if (isOffloaded_l()) {
759        uint32_t dspFrames = 0;
760
761        if (mOutput != 0) {
762            uint32_t halFrames;
763            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
764        }
765        *position = dspFrames;
766    } else {
767        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
768        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
769                mProxy->getPosition();
770    }
771    return NO_ERROR;
772}
773
774status_t AudioTrack::getBufferPosition(size_t *position)
775{
776    if (mSharedBuffer == 0 || mIsTimed) {
777        return INVALID_OPERATION;
778    }
779    if (position == NULL) {
780        return BAD_VALUE;
781    }
782
783    AutoMutex lock(mLock);
784    *position = mStaticProxy->getBufferPosition();
785    return NO_ERROR;
786}
787
788status_t AudioTrack::reload()
789{
790    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
791        return INVALID_OPERATION;
792    }
793
794    AutoMutex lock(mLock);
795    // See setPosition() regarding setting parameters such as loop points or position while active
796    if (mState == STATE_ACTIVE) {
797        return INVALID_OPERATION;
798    }
799    mNewPosition = mUpdatePeriod;
800    mLoopPeriod = 0;
801    // FIXME The new code cannot reload while keeping a loop specified.
802    // Need to check how the old code handled this, and whether it's a significant change.
803    mStaticProxy->setLoop(0, mFrameCount, 0);
804    return NO_ERROR;
805}
806
807audio_io_handle_t AudioTrack::getOutput()
808{
809    AutoMutex lock(mLock);
810    return mOutput;
811}
812
813// must be called with mLock held
814audio_io_handle_t AudioTrack::getOutput_l()
815{
816    if (mOutput) {
817        return mOutput;
818    } else {
819        return AudioSystem::getOutput(mStreamType,
820                                      mSampleRate, mFormat, mChannelMask, mFlags);
821    }
822}
823
824status_t AudioTrack::attachAuxEffect(int effectId)
825{
826    AutoMutex lock(mLock);
827    status_t status = mAudioTrack->attachAuxEffect(effectId);
828    if (status == NO_ERROR) {
829        mAuxEffectId = effectId;
830    }
831    return status;
832}
833
834// -------------------------------------------------------------------------
835
836// must be called with mLock held
837status_t AudioTrack::createTrack_l(
838        audio_stream_type_t streamType,
839        uint32_t sampleRate,
840        audio_format_t format,
841        size_t frameCount,
842        audio_output_flags_t flags,
843        const sp<IMemory>& sharedBuffer,
844        audio_io_handle_t output,
845        size_t epoch)
846{
847    status_t status;
848    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
849    if (audioFlinger == 0) {
850        ALOGE("Could not get audioflinger");
851        return NO_INIT;
852    }
853
854    // Not all of these values are needed under all conditions, but it is easier to get them all
855
856    uint32_t afLatency;
857    status = AudioSystem::getLatency(output, streamType, &afLatency);
858    if (status != NO_ERROR) {
859        ALOGE("getLatency(%d) failed status %d", output, status);
860        return NO_INIT;
861    }
862
863    size_t afFrameCount;
864    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
865    if (status != NO_ERROR) {
866        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
867        return NO_INIT;
868    }
869
870    uint32_t afSampleRate;
871    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
872    if (status != NO_ERROR) {
873        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
874        return NO_INIT;
875    }
876
877    // Client decides whether the track is TIMED (see below), but can only express a preference
878    // for FAST.  Server will perform additional tests.
879    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
880            // either of these use cases:
881            // use case 1: shared buffer
882            (sharedBuffer != 0) ||
883            // use case 2: callback handler
884            (mCbf != NULL))) {
885        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
886        // once denied, do not request again if IAudioTrack is re-created
887        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
888        mFlags = flags;
889    }
890    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
891
892    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
893    //  n = 1   fast track with single buffering; nBuffering is ignored
894    //  n = 2   fast track with double buffering
895    //  n = 2   normal track, no sample rate conversion
896    //  n = 3   normal track, with sample rate conversion
897    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
898    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
899    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
900
901    mNotificationFramesAct = mNotificationFramesReq;
902
903    if (!audio_is_linear_pcm(format)) {
904
905        if (sharedBuffer != 0) {
906            // Same comment as below about ignoring frameCount parameter for set()
907            frameCount = sharedBuffer->size();
908        } else if (frameCount == 0) {
909            frameCount = afFrameCount;
910        }
911        if (mNotificationFramesAct != frameCount) {
912            mNotificationFramesAct = frameCount;
913        }
914    } else if (sharedBuffer != 0) {
915
916        // Ensure that buffer alignment matches channel count
917        // 8-bit data in shared memory is not currently supported by AudioFlinger
918        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
919        if (mChannelCount > 1) {
920            // More than 2 channels does not require stronger alignment than stereo
921            alignment <<= 1;
922        }
923        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
924            ALOGE("Invalid buffer alignment: address %p, channel count %u",
925                    sharedBuffer->pointer(), mChannelCount);
926            return BAD_VALUE;
927        }
928
929        // When initializing a shared buffer AudioTrack via constructors,
930        // there's no frameCount parameter.
931        // But when initializing a shared buffer AudioTrack via set(),
932        // there _is_ a frameCount parameter.  We silently ignore it.
933        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
934
935    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
936
937        // FIXME move these calculations and associated checks to server
938
939        // Ensure that buffer depth covers at least audio hardware latency
940        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
941        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
942                afFrameCount, minBufCount, afSampleRate, afLatency);
943        if (minBufCount <= nBuffering) {
944            minBufCount = nBuffering;
945        }
946
947        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
948        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
949                ", afLatency=%d",
950                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
951
952        if (frameCount == 0) {
953            frameCount = minFrameCount;
954        } else if (frameCount < minFrameCount) {
955            // not ALOGW because it happens all the time when playing key clicks over A2DP
956            ALOGV("Minimum buffer size corrected from %d to %d",
957                     frameCount, minFrameCount);
958            frameCount = minFrameCount;
959        }
960        // Make sure that application is notified with sufficient margin before underrun
961        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
962            mNotificationFramesAct = frameCount/nBuffering;
963        }
964
965    } else {
966        // For fast tracks, the frame count calculations and checks are done by server
967    }
968
969    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
970    if (mIsTimed) {
971        trackFlags |= IAudioFlinger::TRACK_TIMED;
972    }
973
974    pid_t tid = -1;
975    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
976        trackFlags |= IAudioFlinger::TRACK_FAST;
977        if (mAudioTrackThread != 0) {
978            tid = mAudioTrackThread->getTid();
979        }
980    }
981
982    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
983        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
984    }
985
986    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
987                                                      sampleRate,
988                                                      // AudioFlinger only sees 16-bit PCM
989                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
990                                                              AUDIO_FORMAT_PCM_16_BIT : format,
991                                                      mChannelMask,
992                                                      frameCount,
993                                                      &trackFlags,
994                                                      sharedBuffer,
995                                                      output,
996                                                      tid,
997                                                      &mSessionId,
998                                                      mName,
999                                                      mClientUid,
1000                                                      &status);
1001
1002    if (track == 0) {
1003        ALOGE("AudioFlinger could not create track, status: %d", status);
1004        return status;
1005    }
1006    sp<IMemory> iMem = track->getCblk();
1007    if (iMem == 0) {
1008        ALOGE("Could not get control block");
1009        return NO_INIT;
1010    }
1011    void *iMemPointer = iMem->pointer();
1012    if (iMemPointer == NULL) {
1013        ALOGE("Could not get control block pointer");
1014        return NO_INIT;
1015    }
1016    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1017    if (mAudioTrack != 0) {
1018        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1019        mDeathNotifier.clear();
1020    }
1021    mAudioTrack = track;
1022    mCblkMemory = iMem;
1023    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1024    mCblk = cblk;
1025    size_t temp = cblk->frameCount_;
1026    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1027        // In current design, AudioTrack client checks and ensures frame count validity before
1028        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1029        // for fast track as it uses a special method of assigning frame count.
1030        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1031    }
1032    frameCount = temp;
1033    mAwaitBoost = false;
1034    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1035        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1036            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1037            mAwaitBoost = true;
1038            if (sharedBuffer == 0) {
1039                // Theoretically double-buffering is not required for fast tracks,
1040                // due to tighter scheduling.  But in practice, to accommodate kernels with
1041                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1042                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1043                    mNotificationFramesAct = frameCount/nBuffering;
1044                }
1045            }
1046        } else {
1047            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1048            // once denied, do not request again if IAudioTrack is re-created
1049            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1050            mFlags = flags;
1051            if (sharedBuffer == 0) {
1052                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1053                    mNotificationFramesAct = frameCount/nBuffering;
1054                }
1055            }
1056        }
1057    }
1058    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1059        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1060            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1061        } else {
1062            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1063            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1064            mFlags = flags;
1065            return NO_INIT;
1066        }
1067    }
1068
1069    mRefreshRemaining = true;
1070
1071    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1072    // is the value of pointer() for the shared buffer, otherwise buffers points
1073    // immediately after the control block.  This address is for the mapping within client
1074    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1075    void* buffers;
1076    if (sharedBuffer == 0) {
1077        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1078    } else {
1079        buffers = sharedBuffer->pointer();
1080    }
1081
1082    mAudioTrack->attachAuxEffect(mAuxEffectId);
1083    // FIXME don't believe this lie
1084    mLatency = afLatency + (1000*frameCount) / sampleRate;
1085    mFrameCount = frameCount;
1086    // If IAudioTrack is re-created, don't let the requested frameCount
1087    // decrease.  This can confuse clients that cache frameCount().
1088    if (frameCount > mReqFrameCount) {
1089        mReqFrameCount = frameCount;
1090    }
1091
1092    // update proxy
1093    if (sharedBuffer == 0) {
1094        mStaticProxy.clear();
1095        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1096    } else {
1097        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1098        mProxy = mStaticProxy;
1099    }
1100    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1101            uint16_t(mVolume[LEFT] * 0x1000));
1102    mProxy->setSendLevel(mSendLevel);
1103    mProxy->setSampleRate(mSampleRate);
1104    mProxy->setEpoch(epoch);
1105    mProxy->setMinimum(mNotificationFramesAct);
1106
1107    mDeathNotifier = new DeathNotifier(this);
1108    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1109
1110    return NO_ERROR;
1111}
1112
1113status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1114{
1115    if (audioBuffer == NULL) {
1116        return BAD_VALUE;
1117    }
1118    if (mTransfer != TRANSFER_OBTAIN) {
1119        audioBuffer->frameCount = 0;
1120        audioBuffer->size = 0;
1121        audioBuffer->raw = NULL;
1122        return INVALID_OPERATION;
1123    }
1124
1125    const struct timespec *requested;
1126    if (waitCount == -1) {
1127        requested = &ClientProxy::kForever;
1128    } else if (waitCount == 0) {
1129        requested = &ClientProxy::kNonBlocking;
1130    } else if (waitCount > 0) {
1131        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1132        struct timespec timeout;
1133        timeout.tv_sec = ms / 1000;
1134        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1135        requested = &timeout;
1136    } else {
1137        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1138        requested = NULL;
1139    }
1140    return obtainBuffer(audioBuffer, requested);
1141}
1142
1143status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1144        struct timespec *elapsed, size_t *nonContig)
1145{
1146    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1147    uint32_t oldSequence = 0;
1148    uint32_t newSequence;
1149
1150    Proxy::Buffer buffer;
1151    status_t status = NO_ERROR;
1152
1153    static const int32_t kMaxTries = 5;
1154    int32_t tryCounter = kMaxTries;
1155
1156    do {
1157        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1158        // keep them from going away if another thread re-creates the track during obtainBuffer()
1159        sp<AudioTrackClientProxy> proxy;
1160        sp<IMemory> iMem;
1161
1162        {   // start of lock scope
1163            AutoMutex lock(mLock);
1164
1165            newSequence = mSequence;
1166            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1167            if (status == DEAD_OBJECT) {
1168                // re-create track, unless someone else has already done so
1169                if (newSequence == oldSequence) {
1170                    status = restoreTrack_l("obtainBuffer");
1171                    if (status != NO_ERROR) {
1172                        buffer.mFrameCount = 0;
1173                        buffer.mRaw = NULL;
1174                        buffer.mNonContig = 0;
1175                        break;
1176                    }
1177                }
1178            }
1179            oldSequence = newSequence;
1180
1181            // Keep the extra references
1182            proxy = mProxy;
1183            iMem = mCblkMemory;
1184
1185            if (mState == STATE_STOPPING) {
1186                status = -EINTR;
1187                buffer.mFrameCount = 0;
1188                buffer.mRaw = NULL;
1189                buffer.mNonContig = 0;
1190                break;
1191            }
1192
1193            // Non-blocking if track is stopped or paused
1194            if (mState != STATE_ACTIVE) {
1195                requested = &ClientProxy::kNonBlocking;
1196            }
1197
1198        }   // end of lock scope
1199
1200        buffer.mFrameCount = audioBuffer->frameCount;
1201        // FIXME starts the requested timeout and elapsed over from scratch
1202        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1203
1204    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1205
1206    audioBuffer->frameCount = buffer.mFrameCount;
1207    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1208    audioBuffer->raw = buffer.mRaw;
1209    if (nonContig != NULL) {
1210        *nonContig = buffer.mNonContig;
1211    }
1212    return status;
1213}
1214
1215void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1216{
1217    if (mTransfer == TRANSFER_SHARED) {
1218        return;
1219    }
1220
1221    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1222    if (stepCount == 0) {
1223        return;
1224    }
1225
1226    Proxy::Buffer buffer;
1227    buffer.mFrameCount = stepCount;
1228    buffer.mRaw = audioBuffer->raw;
1229
1230    AutoMutex lock(mLock);
1231    mInUnderrun = false;
1232    mProxy->releaseBuffer(&buffer);
1233
1234    // restart track if it was disabled by audioflinger due to previous underrun
1235    if (mState == STATE_ACTIVE) {
1236        audio_track_cblk_t* cblk = mCblk;
1237        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1238            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1239                    this, mName.string());
1240            // FIXME ignoring status
1241            mAudioTrack->start();
1242        }
1243    }
1244}
1245
1246// -------------------------------------------------------------------------
1247
1248ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1249{
1250    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1251        return INVALID_OPERATION;
1252    }
1253
1254    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1255        // Sanity-check: user is most-likely passing an error code, and it would
1256        // make the return value ambiguous (actualSize vs error).
1257        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1258        return BAD_VALUE;
1259    }
1260
1261    size_t written = 0;
1262    Buffer audioBuffer;
1263
1264    while (userSize >= mFrameSize) {
1265        audioBuffer.frameCount = userSize / mFrameSize;
1266
1267        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1268        if (err < 0) {
1269            if (written > 0) {
1270                break;
1271            }
1272            return ssize_t(err);
1273        }
1274
1275        size_t toWrite;
1276        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1277            // Divide capacity by 2 to take expansion into account
1278            toWrite = audioBuffer.size >> 1;
1279            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1280        } else {
1281            toWrite = audioBuffer.size;
1282            memcpy(audioBuffer.i8, buffer, toWrite);
1283        }
1284        buffer = ((const char *) buffer) + toWrite;
1285        userSize -= toWrite;
1286        written += toWrite;
1287
1288        releaseBuffer(&audioBuffer);
1289    }
1290
1291    return written;
1292}
1293
1294// -------------------------------------------------------------------------
1295
1296TimedAudioTrack::TimedAudioTrack() {
1297    mIsTimed = true;
1298}
1299
1300status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1301{
1302    AutoMutex lock(mLock);
1303    status_t result = UNKNOWN_ERROR;
1304
1305#if 1
1306    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1307    // while we are accessing the cblk
1308    sp<IAudioTrack> audioTrack = mAudioTrack;
1309    sp<IMemory> iMem = mCblkMemory;
1310#endif
1311
1312    // If the track is not invalid already, try to allocate a buffer.  alloc
1313    // fails indicating that the server is dead, flag the track as invalid so
1314    // we can attempt to restore in just a bit.
1315    audio_track_cblk_t* cblk = mCblk;
1316    if (!(cblk->mFlags & CBLK_INVALID)) {
1317        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1318        if (result == DEAD_OBJECT) {
1319            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1320        }
1321    }
1322
1323    // If the track is invalid at this point, attempt to restore it. and try the
1324    // allocation one more time.
1325    if (cblk->mFlags & CBLK_INVALID) {
1326        result = restoreTrack_l("allocateTimedBuffer");
1327
1328        if (result == NO_ERROR) {
1329            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1330        }
1331    }
1332
1333    return result;
1334}
1335
1336status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1337                                           int64_t pts)
1338{
1339    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1340    {
1341        AutoMutex lock(mLock);
1342        audio_track_cblk_t* cblk = mCblk;
1343        // restart track if it was disabled by audioflinger due to previous underrun
1344        if (buffer->size() != 0 && status == NO_ERROR &&
1345                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1346            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1347            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1348            // FIXME ignoring status
1349            mAudioTrack->start();
1350        }
1351    }
1352    return status;
1353}
1354
1355status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1356                                                TargetTimeline target)
1357{
1358    return mAudioTrack->setMediaTimeTransform(xform, target);
1359}
1360
1361// -------------------------------------------------------------------------
1362
1363nsecs_t AudioTrack::processAudioBuffer()
1364{
1365    // Currently the AudioTrack thread is not created if there are no callbacks.
1366    // Would it ever make sense to run the thread, even without callbacks?
1367    // If so, then replace this by checks at each use for mCbf != NULL.
1368    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1369
1370    mLock.lock();
1371    if (mAwaitBoost) {
1372        mAwaitBoost = false;
1373        mLock.unlock();
1374        static const int32_t kMaxTries = 5;
1375        int32_t tryCounter = kMaxTries;
1376        uint32_t pollUs = 10000;
1377        do {
1378            int policy = sched_getscheduler(0);
1379            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1380                break;
1381            }
1382            usleep(pollUs);
1383            pollUs <<= 1;
1384        } while (tryCounter-- > 0);
1385        if (tryCounter < 0) {
1386            ALOGE("did not receive expected priority boost on time");
1387        }
1388        // Run again immediately
1389        return 0;
1390    }
1391
1392    // Can only reference mCblk while locked
1393    int32_t flags = android_atomic_and(
1394        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1395
1396    // Check for track invalidation
1397    if (flags & CBLK_INVALID) {
1398        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1399        // AudioSystem cache. We should not exit here but after calling the callback so
1400        // that the upper layers can recreate the track
1401        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1402            status_t status = restoreTrack_l("processAudioBuffer");
1403            mLock.unlock();
1404            // Run again immediately, but with a new IAudioTrack
1405            return 0;
1406        }
1407    }
1408
1409    bool waitStreamEnd = mState == STATE_STOPPING;
1410    bool active = mState == STATE_ACTIVE;
1411
1412    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1413    bool newUnderrun = false;
1414    if (flags & CBLK_UNDERRUN) {
1415#if 0
1416        // Currently in shared buffer mode, when the server reaches the end of buffer,
1417        // the track stays active in continuous underrun state.  It's up to the application
1418        // to pause or stop the track, or set the position to a new offset within buffer.
1419        // This was some experimental code to auto-pause on underrun.   Keeping it here
1420        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1421        if (mTransfer == TRANSFER_SHARED) {
1422            mState = STATE_PAUSED;
1423            active = false;
1424        }
1425#endif
1426        if (!mInUnderrun) {
1427            mInUnderrun = true;
1428            newUnderrun = true;
1429        }
1430    }
1431
1432    // Get current position of server
1433    size_t position = mProxy->getPosition();
1434
1435    // Manage marker callback
1436    bool markerReached = false;
1437    size_t markerPosition = mMarkerPosition;
1438    // FIXME fails for wraparound, need 64 bits
1439    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1440        mMarkerReached = markerReached = true;
1441    }
1442
1443    // Determine number of new position callback(s) that will be needed, while locked
1444    size_t newPosCount = 0;
1445    size_t newPosition = mNewPosition;
1446    size_t updatePeriod = mUpdatePeriod;
1447    // FIXME fails for wraparound, need 64 bits
1448    if (updatePeriod > 0 && position >= newPosition) {
1449        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1450        mNewPosition += updatePeriod * newPosCount;
1451    }
1452
1453    // Cache other fields that will be needed soon
1454    uint32_t loopPeriod = mLoopPeriod;
1455    uint32_t sampleRate = mSampleRate;
1456    size_t notificationFrames = mNotificationFramesAct;
1457    if (mRefreshRemaining) {
1458        mRefreshRemaining = false;
1459        mRemainingFrames = notificationFrames;
1460        mRetryOnPartialBuffer = false;
1461    }
1462    size_t misalignment = mProxy->getMisalignment();
1463    uint32_t sequence = mSequence;
1464
1465    // These fields don't need to be cached, because they are assigned only by set():
1466    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1467    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1468
1469    mLock.unlock();
1470
1471    if (waitStreamEnd) {
1472        AutoMutex lock(mLock);
1473
1474        sp<AudioTrackClientProxy> proxy = mProxy;
1475        sp<IMemory> iMem = mCblkMemory;
1476
1477        struct timespec timeout;
1478        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1479        timeout.tv_nsec = 0;
1480
1481        mLock.unlock();
1482        status_t status = mProxy->waitStreamEndDone(&timeout);
1483        mLock.lock();
1484        switch (status) {
1485        case NO_ERROR:
1486        case DEAD_OBJECT:
1487        case TIMED_OUT:
1488            mLock.unlock();
1489            mCbf(EVENT_STREAM_END, mUserData, NULL);
1490            mLock.lock();
1491            if (mState == STATE_STOPPING) {
1492                mState = STATE_STOPPED;
1493                if (status != DEAD_OBJECT) {
1494                   return NS_INACTIVE;
1495                }
1496            }
1497            return 0;
1498        default:
1499            return 0;
1500        }
1501    }
1502
1503    // perform callbacks while unlocked
1504    if (newUnderrun) {
1505        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1506    }
1507    // FIXME we will miss loops if loop cycle was signaled several times since last call
1508    //       to processAudioBuffer()
1509    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1510        mCbf(EVENT_LOOP_END, mUserData, NULL);
1511    }
1512    if (flags & CBLK_BUFFER_END) {
1513        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1514    }
1515    if (markerReached) {
1516        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1517    }
1518    while (newPosCount > 0) {
1519        size_t temp = newPosition;
1520        mCbf(EVENT_NEW_POS, mUserData, &temp);
1521        newPosition += updatePeriod;
1522        newPosCount--;
1523    }
1524
1525    if (mObservedSequence != sequence) {
1526        mObservedSequence = sequence;
1527        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1528        // for offloaded tracks, just wait for the upper layers to recreate the track
1529        if (isOffloaded()) {
1530            return NS_INACTIVE;
1531        }
1532    }
1533
1534    // if inactive, then don't run me again until re-started
1535    if (!active) {
1536        return NS_INACTIVE;
1537    }
1538
1539    // Compute the estimated time until the next timed event (position, markers, loops)
1540    // FIXME only for non-compressed audio
1541    uint32_t minFrames = ~0;
1542    if (!markerReached && position < markerPosition) {
1543        minFrames = markerPosition - position;
1544    }
1545    if (loopPeriod > 0 && loopPeriod < minFrames) {
1546        minFrames = loopPeriod;
1547    }
1548    if (updatePeriod > 0 && updatePeriod < minFrames) {
1549        minFrames = updatePeriod;
1550    }
1551
1552    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1553    static const uint32_t kPoll = 0;
1554    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1555        minFrames = kPoll * notificationFrames;
1556    }
1557
1558    // Convert frame units to time units
1559    nsecs_t ns = NS_WHENEVER;
1560    if (minFrames != (uint32_t) ~0) {
1561        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1562        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1563        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1564    }
1565
1566    // If not supplying data by EVENT_MORE_DATA, then we're done
1567    if (mTransfer != TRANSFER_CALLBACK) {
1568        return ns;
1569    }
1570
1571    struct timespec timeout;
1572    const struct timespec *requested = &ClientProxy::kForever;
1573    if (ns != NS_WHENEVER) {
1574        timeout.tv_sec = ns / 1000000000LL;
1575        timeout.tv_nsec = ns % 1000000000LL;
1576        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1577        requested = &timeout;
1578    }
1579
1580    while (mRemainingFrames > 0) {
1581
1582        Buffer audioBuffer;
1583        audioBuffer.frameCount = mRemainingFrames;
1584        size_t nonContig;
1585        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1586        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1587                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1588        requested = &ClientProxy::kNonBlocking;
1589        size_t avail = audioBuffer.frameCount + nonContig;
1590        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1591                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1592        if (err != NO_ERROR) {
1593            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1594                    (isOffloaded() && (err == DEAD_OBJECT))) {
1595                return 0;
1596            }
1597            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1598            return NS_NEVER;
1599        }
1600
1601        if (mRetryOnPartialBuffer && !isOffloaded()) {
1602            mRetryOnPartialBuffer = false;
1603            if (avail < mRemainingFrames) {
1604                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1605                if (ns < 0 || myns < ns) {
1606                    ns = myns;
1607                }
1608                return ns;
1609            }
1610        }
1611
1612        // Divide buffer size by 2 to take into account the expansion
1613        // due to 8 to 16 bit conversion: the callback must fill only half
1614        // of the destination buffer
1615        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1616            audioBuffer.size >>= 1;
1617        }
1618
1619        size_t reqSize = audioBuffer.size;
1620        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1621        size_t writtenSize = audioBuffer.size;
1622        size_t writtenFrames = writtenSize / mFrameSize;
1623
1624        // Sanity check on returned size
1625        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1626            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1627                    reqSize, (int) writtenSize);
1628            return NS_NEVER;
1629        }
1630
1631        if (writtenSize == 0) {
1632            // The callback is done filling buffers
1633            // Keep this thread going to handle timed events and
1634            // still try to get more data in intervals of WAIT_PERIOD_MS
1635            // but don't just loop and block the CPU, so wait
1636            return WAIT_PERIOD_MS * 1000000LL;
1637        }
1638
1639        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1640            // 8 to 16 bit conversion, note that source and destination are the same address
1641            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1642            audioBuffer.size <<= 1;
1643        }
1644
1645        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1646        audioBuffer.frameCount = releasedFrames;
1647        mRemainingFrames -= releasedFrames;
1648        if (misalignment >= releasedFrames) {
1649            misalignment -= releasedFrames;
1650        } else {
1651            misalignment = 0;
1652        }
1653
1654        releaseBuffer(&audioBuffer);
1655
1656        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1657        // if callback doesn't like to accept the full chunk
1658        if (writtenSize < reqSize) {
1659            continue;
1660        }
1661
1662        // There could be enough non-contiguous frames available to satisfy the remaining request
1663        if (mRemainingFrames <= nonContig) {
1664            continue;
1665        }
1666
1667#if 0
1668        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1669        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1670        // that total to a sum == notificationFrames.
1671        if (0 < misalignment && misalignment <= mRemainingFrames) {
1672            mRemainingFrames = misalignment;
1673            return (mRemainingFrames * 1100000000LL) / sampleRate;
1674        }
1675#endif
1676
1677    }
1678    mRemainingFrames = notificationFrames;
1679    mRetryOnPartialBuffer = true;
1680
1681    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1682    return 0;
1683}
1684
1685status_t AudioTrack::restoreTrack_l(const char *from)
1686{
1687    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1688          isOffloaded_l() ? "Offloaded" : "PCM", from);
1689    ++mSequence;
1690    status_t result;
1691
1692    // refresh the audio configuration cache in this process to make sure we get new
1693    // output parameters in getOutput_l() and createTrack_l()
1694    AudioSystem::clearAudioConfigCache();
1695
1696    if (isOffloaded_l()) {
1697        // FIXME re-creation of offloaded tracks is not yet implemented
1698        return DEAD_OBJECT;
1699    }
1700
1701    // force new output query from audio policy manager;
1702    mOutput = 0;
1703    audio_io_handle_t output = getOutput_l();
1704
1705    // if the new IAudioTrack is created, createTrack_l() will modify the
1706    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1707    // It will also delete the strong references on previous IAudioTrack and IMemory
1708
1709    // take the frames that will be lost by track recreation into account in saved position
1710    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1711    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1712    result = createTrack_l(mStreamType,
1713                           mSampleRate,
1714                           mFormat,
1715                           mReqFrameCount,  // so that frame count never goes down
1716                           mFlags,
1717                           mSharedBuffer,
1718                           output,
1719                           position /*epoch*/);
1720
1721    if (result == NO_ERROR) {
1722        // continue playback from last known position, but
1723        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1724        if (mStaticProxy != NULL) {
1725            mLoopPeriod = 0;
1726            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1727        }
1728        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1729        //       track destruction have been played? This is critical for SoundPool implementation
1730        //       This must be broken, and needs to be tested/debugged.
1731#if 0
1732        // restore write index and set other indexes to reflect empty buffer status
1733        if (!strcmp(from, "start")) {
1734            // Make sure that a client relying on callback events indicating underrun or
1735            // the actual amount of audio frames played (e.g SoundPool) receives them.
1736            if (mSharedBuffer == 0) {
1737                // restart playback even if buffer is not completely filled.
1738                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1739            }
1740        }
1741#endif
1742        if (mState == STATE_ACTIVE) {
1743            result = mAudioTrack->start();
1744        }
1745    }
1746    if (result != NO_ERROR) {
1747        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1748        // As getOutput was called above and resulted in an output stream to be opened,
1749        // we need to release it.
1750        AudioSystem::releaseOutput(output);
1751        ALOGW("restoreTrack_l() failed status %d", result);
1752        mState = STATE_STOPPED;
1753    }
1754
1755    return result;
1756}
1757
1758status_t AudioTrack::setParameters(const String8& keyValuePairs)
1759{
1760    AutoMutex lock(mLock);
1761    return mAudioTrack->setParameters(keyValuePairs);
1762}
1763
1764status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1765{
1766    AutoMutex lock(mLock);
1767    // FIXME not implemented for fast tracks; should use proxy and SSQ
1768    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1769        return INVALID_OPERATION;
1770    }
1771    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1772        return INVALID_OPERATION;
1773    }
1774    status_t status = mAudioTrack->getTimestamp(timestamp);
1775    if (status == NO_ERROR) {
1776        timestamp.mPosition += mProxy->getEpoch();
1777    }
1778    return status;
1779}
1780
1781String8 AudioTrack::getParameters(const String8& keys)
1782{
1783    audio_io_handle_t output = getOutput();
1784    if (output != 0) {
1785        return AudioSystem::getParameters(output, keys);
1786    } else {
1787        return String8::empty();
1788    }
1789}
1790
1791bool AudioTrack::isOffloaded() const
1792{
1793    AutoMutex lock(mLock);
1794    return isOffloaded_l();
1795}
1796
1797status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1798{
1799
1800    const size_t SIZE = 256;
1801    char buffer[SIZE];
1802    String8 result;
1803
1804    result.append(" AudioTrack::dump\n");
1805    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1806            mVolume[0], mVolume[1]);
1807    result.append(buffer);
1808    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1809            mChannelCount, mFrameCount);
1810    result.append(buffer);
1811    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1812    result.append(buffer);
1813    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1814    result.append(buffer);
1815    ::write(fd, result.string(), result.size());
1816    return NO_ERROR;
1817}
1818
1819uint32_t AudioTrack::getUnderrunFrames() const
1820{
1821    AutoMutex lock(mLock);
1822    return mProxy->getUnderrunFrames();
1823}
1824
1825// =========================================================================
1826
1827void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1828{
1829    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1830    if (audioTrack != 0) {
1831        AutoMutex lock(audioTrack->mLock);
1832        audioTrack->mProxy->binderDied();
1833    }
1834}
1835
1836// =========================================================================
1837
1838AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1839    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1840      mIgnoreNextPausedInt(false)
1841{
1842}
1843
1844AudioTrack::AudioTrackThread::~AudioTrackThread()
1845{
1846}
1847
1848bool AudioTrack::AudioTrackThread::threadLoop()
1849{
1850    {
1851        AutoMutex _l(mMyLock);
1852        if (mPaused) {
1853            mMyCond.wait(mMyLock);
1854            // caller will check for exitPending()
1855            return true;
1856        }
1857        if (mIgnoreNextPausedInt) {
1858            mIgnoreNextPausedInt = false;
1859            mPausedInt = false;
1860        }
1861        if (mPausedInt) {
1862            if (mPausedNs > 0) {
1863                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1864            } else {
1865                mMyCond.wait(mMyLock);
1866            }
1867            mPausedInt = false;
1868            return true;
1869        }
1870    }
1871    nsecs_t ns = mReceiver.processAudioBuffer();
1872    switch (ns) {
1873    case 0:
1874        return true;
1875    case NS_INACTIVE:
1876        pauseInternal();
1877        return true;
1878    case NS_NEVER:
1879        return false;
1880    case NS_WHENEVER:
1881        // FIXME increase poll interval, or make event-driven
1882        ns = 1000000000LL;
1883        // fall through
1884    default:
1885        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1886        pauseInternal(ns);
1887        return true;
1888    }
1889}
1890
1891void AudioTrack::AudioTrackThread::requestExit()
1892{
1893    // must be in this order to avoid a race condition
1894    Thread::requestExit();
1895    resume();
1896}
1897
1898void AudioTrack::AudioTrackThread::pause()
1899{
1900    AutoMutex _l(mMyLock);
1901    mPaused = true;
1902}
1903
1904void AudioTrack::AudioTrackThread::resume()
1905{
1906    AutoMutex _l(mMyLock);
1907    mIgnoreNextPausedInt = true;
1908    if (mPaused || mPausedInt) {
1909        mPaused = false;
1910        mPausedInt = false;
1911        mMyCond.signal();
1912    }
1913}
1914
1915void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1916{
1917    AutoMutex _l(mMyLock);
1918    mPausedInt = true;
1919    mPausedNs = ns;
1920}
1921
1922}; // namespace android
1923