AudioTrack.cpp revision a1ebc3b03d4dca534374c19e3c4f32ee687942e3
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // FIXME merge with similar code in createTrack_l(), except we're missing
48    //       some information here that is available in createTrack_l():
49    //          audio_io_handle_t output
50    //          audio_format_t format
51    //          audio_channel_mask_t channelMask
52    //          audio_output_flags_t flags
53    uint32_t afSampleRate;
54    status_t status;
55    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
56    if (status != NO_ERROR) {
57        ALOGE("Unable to query output sample rate for stream type %d; status %d",
58                streamType, status);
59        return status;
60    }
61    size_t afFrameCount;
62    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
63    if (status != NO_ERROR) {
64        ALOGE("Unable to query output frame count for stream type %d; status %d",
65                streamType, status);
66        return status;
67    }
68    uint32_t afLatency;
69    status = AudioSystem::getOutputLatency(&afLatency, streamType);
70    if (status != NO_ERROR) {
71        ALOGE("Unable to query output latency for stream type %d; status %d",
72                streamType, status);
73        return status;
74    }
75
76    // Ensure that buffer depth covers at least audio hardware latency
77    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
78    if (minBufCount < 2) {
79        minBufCount = 2;
80    }
81
82    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
83            afFrameCount * minBufCount * sampleRate / afSampleRate;
84    // The formula above should always produce a non-zero value, but return an error
85    // in the unlikely event that it does not, as that's part of the API contract.
86    if (*frameCount == 0) {
87        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
88                streamType, sampleRate);
89        return BAD_VALUE;
90    }
91    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
92            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
93    return NO_ERROR;
94}
95
96// ---------------------------------------------------------------------------
97
98AudioTrack::AudioTrack()
99    : mStatus(NO_INIT),
100      mIsTimed(false),
101      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
102      mPreviousSchedulingGroup(SP_DEFAULT)
103{
104}
105
106AudioTrack::AudioTrack(
107        audio_stream_type_t streamType,
108        uint32_t sampleRate,
109        audio_format_t format,
110        audio_channel_mask_t channelMask,
111        int frameCount,
112        audio_output_flags_t flags,
113        callback_t cbf,
114        void* user,
115        int notificationFrames,
116        int sessionId,
117        transfer_type transferType,
118        const audio_offload_info_t *offloadInfo,
119        int uid)
120    : mStatus(NO_INIT),
121      mIsTimed(false),
122      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
123      mPreviousSchedulingGroup(SP_DEFAULT)
124{
125    mStatus = set(streamType, sampleRate, format, channelMask,
126            frameCount, flags, cbf, user, notificationFrames,
127            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
128            offloadInfo, uid);
129}
130
131AudioTrack::AudioTrack(
132        audio_stream_type_t streamType,
133        uint32_t sampleRate,
134        audio_format_t format,
135        audio_channel_mask_t channelMask,
136        const sp<IMemory>& sharedBuffer,
137        audio_output_flags_t flags,
138        callback_t cbf,
139        void* user,
140        int notificationFrames,
141        int sessionId,
142        transfer_type transferType,
143        const audio_offload_info_t *offloadInfo,
144        int uid)
145    : mStatus(NO_INIT),
146      mIsTimed(false),
147      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
148      mPreviousSchedulingGroup(SP_DEFAULT)
149{
150    mStatus = set(streamType, sampleRate, format, channelMask,
151            0 /*frameCount*/, flags, cbf, user, notificationFrames,
152            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
153}
154
155AudioTrack::~AudioTrack()
156{
157    if (mStatus == NO_ERROR) {
158        // Make sure that callback function exits in the case where
159        // it is looping on buffer full condition in obtainBuffer().
160        // Otherwise the callback thread will never exit.
161        stop();
162        if (mAudioTrackThread != 0) {
163            mProxy->interrupt();
164            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
165            mAudioTrackThread->requestExitAndWait();
166            mAudioTrackThread.clear();
167        }
168        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
169        mAudioTrack.clear();
170        IPCThreadState::self()->flushCommands();
171        AudioSystem::releaseAudioSessionId(mSessionId);
172    }
173}
174
175status_t AudioTrack::set(
176        audio_stream_type_t streamType,
177        uint32_t sampleRate,
178        audio_format_t format,
179        audio_channel_mask_t channelMask,
180        int frameCountInt,
181        audio_output_flags_t flags,
182        callback_t cbf,
183        void* user,
184        int notificationFrames,
185        const sp<IMemory>& sharedBuffer,
186        bool threadCanCallJava,
187        int sessionId,
188        transfer_type transferType,
189        const audio_offload_info_t *offloadInfo,
190        int uid)
191{
192    switch (transferType) {
193    case TRANSFER_DEFAULT:
194        if (sharedBuffer != 0) {
195            transferType = TRANSFER_SHARED;
196        } else if (cbf == NULL || threadCanCallJava) {
197            transferType = TRANSFER_SYNC;
198        } else {
199            transferType = TRANSFER_CALLBACK;
200        }
201        break;
202    case TRANSFER_CALLBACK:
203        if (cbf == NULL || sharedBuffer != 0) {
204            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
205            return BAD_VALUE;
206        }
207        break;
208    case TRANSFER_OBTAIN:
209    case TRANSFER_SYNC:
210        if (sharedBuffer != 0) {
211            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
212            return BAD_VALUE;
213        }
214        break;
215    case TRANSFER_SHARED:
216        if (sharedBuffer == 0) {
217            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
218            return BAD_VALUE;
219        }
220        break;
221    default:
222        ALOGE("Invalid transfer type %d", transferType);
223        return BAD_VALUE;
224    }
225    mTransfer = transferType;
226
227    // FIXME "int" here is legacy and will be replaced by size_t later
228    if (frameCountInt < 0) {
229        ALOGE("Invalid frame count %d", frameCountInt);
230        return BAD_VALUE;
231    }
232    size_t frameCount = frameCountInt;
233
234    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
235            sharedBuffer->size());
236
237    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
238
239    AutoMutex lock(mLock);
240
241    // invariant that mAudioTrack != 0 is true only after set() returns successfully
242    if (mAudioTrack != 0) {
243        ALOGE("Track already in use");
244        return INVALID_OPERATION;
245    }
246
247    mOutput = 0;
248
249    // handle default values first.
250    if (streamType == AUDIO_STREAM_DEFAULT) {
251        streamType = AUDIO_STREAM_MUSIC;
252    }
253
254    status_t status;
255    if (sampleRate == 0) {
256        status = AudioSystem::getOutputSamplingRate(&sampleRate, streamType);
257        if (status != NO_ERROR) {
258            ALOGE("Could not get output sample rate for stream type %d; status %d",
259                    streamType, status);
260            return status;
261        }
262    }
263    mSampleRate = sampleRate;
264
265    // these below should probably come from the audioFlinger too...
266    if (format == AUDIO_FORMAT_DEFAULT) {
267        format = AUDIO_FORMAT_PCM_16_BIT;
268    }
269
270    // validate parameters
271    if (!audio_is_valid_format(format)) {
272        ALOGE("Invalid format %d", format);
273        return BAD_VALUE;
274    }
275
276    if (!audio_is_output_channel(channelMask)) {
277        ALOGE("Invalid channel mask %#x", channelMask);
278        return BAD_VALUE;
279    }
280
281    // AudioFlinger does not currently support 8-bit data in shared memory
282    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
283        ALOGE("8-bit data in shared memory is not supported");
284        return BAD_VALUE;
285    }
286
287    // force direct flag if format is not linear PCM
288    // or offload was requested
289    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
290            || !audio_is_linear_pcm(format)) {
291        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
292                    ? "Offload request, forcing to Direct Output"
293                    : "Not linear PCM, forcing to Direct Output");
294        flags = (audio_output_flags_t)
295                // FIXME why can't we allow direct AND fast?
296                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
297    }
298    // only allow deep buffering for music stream type
299    if (streamType != AUDIO_STREAM_MUSIC) {
300        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
301    }
302
303    mChannelMask = channelMask;
304    uint32_t channelCount = popcount(channelMask);
305    mChannelCount = channelCount;
306
307    if (audio_is_linear_pcm(format)) {
308        mFrameSize = channelCount * audio_bytes_per_sample(format);
309        mFrameSizeAF = channelCount * sizeof(int16_t);
310    } else {
311        mFrameSize = sizeof(uint8_t);
312        mFrameSizeAF = sizeof(uint8_t);
313    }
314
315    audio_io_handle_t output = AudioSystem::getOutput(
316                                    streamType,
317                                    sampleRate, format, channelMask,
318                                    flags,
319                                    offloadInfo);
320
321    if (output == 0) {
322        ALOGE("Could not get audio output for stream type %d", streamType);
323        return BAD_VALUE;
324    }
325
326    mVolume[LEFT] = 1.0f;
327    mVolume[RIGHT] = 1.0f;
328    mSendLevel = 0.0f;
329    // mFrameCount is initialized in createTrack_l
330    mReqFrameCount = frameCount;
331    mNotificationFramesReq = notificationFrames;
332    mNotificationFramesAct = 0;
333    mSessionId = sessionId;
334    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
335        mClientUid = IPCThreadState::self()->getCallingUid();
336    } else {
337        mClientUid = uid;
338    }
339    mAuxEffectId = 0;
340    mFlags = flags;
341    mCbf = cbf;
342
343    if (cbf != NULL) {
344        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
345        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
346    }
347
348    // create the IAudioTrack
349    status = createTrack_l(streamType,
350                                  sampleRate,
351                                  format,
352                                  frameCount,
353                                  flags,
354                                  sharedBuffer,
355                                  output,
356                                  0 /*epoch*/);
357
358    if (status != NO_ERROR) {
359        if (mAudioTrackThread != 0) {
360            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
361            mAudioTrackThread->requestExitAndWait();
362            mAudioTrackThread.clear();
363        }
364        // Use of direct and offloaded output streams is ref counted by audio policy manager.
365        // As getOutput was called above and resulted in an output stream to be opened,
366        // we need to release it.
367        AudioSystem::releaseOutput(output);
368        return status;
369    }
370
371    mStatus = NO_ERROR;
372    mStreamType = streamType;
373    mFormat = format;
374    mSharedBuffer = sharedBuffer;
375    mState = STATE_STOPPED;
376    mUserData = user;
377    mLoopPeriod = 0;
378    mMarkerPosition = 0;
379    mMarkerReached = false;
380    mNewPosition = 0;
381    mUpdatePeriod = 0;
382    AudioSystem::acquireAudioSessionId(mSessionId);
383    mSequence = 1;
384    mObservedSequence = mSequence;
385    mInUnderrun = false;
386    mOutput = output;
387
388    return NO_ERROR;
389}
390
391// -------------------------------------------------------------------------
392
393status_t AudioTrack::start()
394{
395    AutoMutex lock(mLock);
396
397    if (mState == STATE_ACTIVE) {
398        return INVALID_OPERATION;
399    }
400
401    mInUnderrun = true;
402
403    State previousState = mState;
404    if (previousState == STATE_PAUSED_STOPPING) {
405        mState = STATE_STOPPING;
406    } else {
407        mState = STATE_ACTIVE;
408    }
409    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
410        // reset current position as seen by client to 0
411        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
412        // force refresh of remaining frames by processAudioBuffer() as last
413        // write before stop could be partial.
414        mRefreshRemaining = true;
415    }
416    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
417    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
418
419    sp<AudioTrackThread> t = mAudioTrackThread;
420    if (t != 0) {
421        if (previousState == STATE_STOPPING) {
422            mProxy->interrupt();
423        } else {
424            t->resume();
425        }
426    } else {
427        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
428        get_sched_policy(0, &mPreviousSchedulingGroup);
429        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
430    }
431
432    status_t status = NO_ERROR;
433    if (!(flags & CBLK_INVALID)) {
434        status = mAudioTrack->start();
435        if (status == DEAD_OBJECT) {
436            flags |= CBLK_INVALID;
437        }
438    }
439    if (flags & CBLK_INVALID) {
440        status = restoreTrack_l("start");
441    }
442
443    if (status != NO_ERROR) {
444        ALOGE("start() status %d", status);
445        mState = previousState;
446        if (t != 0) {
447            if (previousState != STATE_STOPPING) {
448                t->pause();
449            }
450        } else {
451            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
452            set_sched_policy(0, mPreviousSchedulingGroup);
453        }
454    }
455
456    return status;
457}
458
459void AudioTrack::stop()
460{
461    AutoMutex lock(mLock);
462    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
463        return;
464    }
465
466    if (isOffloaded_l()) {
467        mState = STATE_STOPPING;
468    } else {
469        mState = STATE_STOPPED;
470    }
471
472    mProxy->interrupt();
473    mAudioTrack->stop();
474    // the playback head position will reset to 0, so if a marker is set, we need
475    // to activate it again
476    mMarkerReached = false;
477#if 0
478    // Force flush if a shared buffer is used otherwise audioflinger
479    // will not stop before end of buffer is reached.
480    // It may be needed to make sure that we stop playback, likely in case looping is on.
481    if (mSharedBuffer != 0) {
482        flush_l();
483    }
484#endif
485
486    sp<AudioTrackThread> t = mAudioTrackThread;
487    if (t != 0) {
488        if (!isOffloaded_l()) {
489            t->pause();
490        }
491    } else {
492        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
493        set_sched_policy(0, mPreviousSchedulingGroup);
494    }
495}
496
497bool AudioTrack::stopped() const
498{
499    AutoMutex lock(mLock);
500    return mState != STATE_ACTIVE;
501}
502
503void AudioTrack::flush()
504{
505    if (mSharedBuffer != 0) {
506        return;
507    }
508    AutoMutex lock(mLock);
509    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
510        return;
511    }
512    flush_l();
513}
514
515void AudioTrack::flush_l()
516{
517    ALOG_ASSERT(mState != STATE_ACTIVE);
518
519    // clear playback marker and periodic update counter
520    mMarkerPosition = 0;
521    mMarkerReached = false;
522    mUpdatePeriod = 0;
523    mRefreshRemaining = true;
524
525    mState = STATE_FLUSHED;
526    if (isOffloaded_l()) {
527        mProxy->interrupt();
528    }
529    mProxy->flush();
530    mAudioTrack->flush();
531}
532
533void AudioTrack::pause()
534{
535    AutoMutex lock(mLock);
536    if (mState == STATE_ACTIVE) {
537        mState = STATE_PAUSED;
538    } else if (mState == STATE_STOPPING) {
539        mState = STATE_PAUSED_STOPPING;
540    } else {
541        return;
542    }
543    mProxy->interrupt();
544    mAudioTrack->pause();
545}
546
547status_t AudioTrack::setVolume(float left, float right)
548{
549    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
550        return BAD_VALUE;
551    }
552
553    AutoMutex lock(mLock);
554    mVolume[LEFT] = left;
555    mVolume[RIGHT] = right;
556
557    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
558
559    if (isOffloaded_l()) {
560        mAudioTrack->signal();
561    }
562    return NO_ERROR;
563}
564
565status_t AudioTrack::setVolume(float volume)
566{
567    return setVolume(volume, volume);
568}
569
570status_t AudioTrack::setAuxEffectSendLevel(float level)
571{
572    if (level < 0.0f || level > 1.0f) {
573        return BAD_VALUE;
574    }
575
576    AutoMutex lock(mLock);
577    mSendLevel = level;
578    mProxy->setSendLevel(level);
579
580    return NO_ERROR;
581}
582
583void AudioTrack::getAuxEffectSendLevel(float* level) const
584{
585    if (level != NULL) {
586        *level = mSendLevel;
587    }
588}
589
590status_t AudioTrack::setSampleRate(uint32_t rate)
591{
592    if (mIsTimed || isOffloaded()) {
593        return INVALID_OPERATION;
594    }
595
596    uint32_t afSamplingRate;
597    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
598        return NO_INIT;
599    }
600    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
601    if (rate == 0 || rate > afSamplingRate*2 ) {
602        return BAD_VALUE;
603    }
604
605    AutoMutex lock(mLock);
606    mSampleRate = rate;
607    mProxy->setSampleRate(rate);
608
609    return NO_ERROR;
610}
611
612uint32_t AudioTrack::getSampleRate() const
613{
614    if (mIsTimed) {
615        return 0;
616    }
617
618    AutoMutex lock(mLock);
619
620    // sample rate can be updated during playback by the offloaded decoder so we need to
621    // query the HAL and update if needed.
622// FIXME use Proxy return channel to update the rate from server and avoid polling here
623    if (isOffloaded_l()) {
624        if (mOutput != 0) {
625            uint32_t sampleRate = 0;
626            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
627            if (status == NO_ERROR) {
628                mSampleRate = sampleRate;
629            }
630        }
631    }
632    return mSampleRate;
633}
634
635status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
636{
637    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
638        return INVALID_OPERATION;
639    }
640
641    if (loopCount == 0) {
642        ;
643    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
644            loopEnd - loopStart >= MIN_LOOP) {
645        ;
646    } else {
647        return BAD_VALUE;
648    }
649
650    AutoMutex lock(mLock);
651    // See setPosition() regarding setting parameters such as loop points or position while active
652    if (mState == STATE_ACTIVE) {
653        return INVALID_OPERATION;
654    }
655    setLoop_l(loopStart, loopEnd, loopCount);
656    return NO_ERROR;
657}
658
659void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
660{
661    // FIXME If setting a loop also sets position to start of loop, then
662    //       this is correct.  Otherwise it should be removed.
663    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
664    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
665    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
666}
667
668status_t AudioTrack::setMarkerPosition(uint32_t marker)
669{
670    // The only purpose of setting marker position is to get a callback
671    if (mCbf == NULL || isOffloaded()) {
672        return INVALID_OPERATION;
673    }
674
675    AutoMutex lock(mLock);
676    mMarkerPosition = marker;
677    mMarkerReached = false;
678
679    return NO_ERROR;
680}
681
682status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
683{
684    if (isOffloaded()) {
685        return INVALID_OPERATION;
686    }
687    if (marker == NULL) {
688        return BAD_VALUE;
689    }
690
691    AutoMutex lock(mLock);
692    *marker = mMarkerPosition;
693
694    return NO_ERROR;
695}
696
697status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
698{
699    // The only purpose of setting position update period is to get a callback
700    if (mCbf == NULL || isOffloaded()) {
701        return INVALID_OPERATION;
702    }
703
704    AutoMutex lock(mLock);
705    mNewPosition = mProxy->getPosition() + updatePeriod;
706    mUpdatePeriod = updatePeriod;
707
708    return NO_ERROR;
709}
710
711status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
712{
713    if (isOffloaded()) {
714        return INVALID_OPERATION;
715    }
716    if (updatePeriod == NULL) {
717        return BAD_VALUE;
718    }
719
720    AutoMutex lock(mLock);
721    *updatePeriod = mUpdatePeriod;
722
723    return NO_ERROR;
724}
725
726status_t AudioTrack::setPosition(uint32_t position)
727{
728    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
729        return INVALID_OPERATION;
730    }
731    if (position > mFrameCount) {
732        return BAD_VALUE;
733    }
734
735    AutoMutex lock(mLock);
736    // Currently we require that the player is inactive before setting parameters such as position
737    // or loop points.  Otherwise, there could be a race condition: the application could read the
738    // current position, compute a new position or loop parameters, and then set that position or
739    // loop parameters but it would do the "wrong" thing since the position has continued to advance
740    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
741    // to specify how it wants to handle such scenarios.
742    if (mState == STATE_ACTIVE) {
743        return INVALID_OPERATION;
744    }
745    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
746    mLoopPeriod = 0;
747    // FIXME Check whether loops and setting position are incompatible in old code.
748    // If we use setLoop for both purposes we lose the capability to set the position while looping.
749    mStaticProxy->setLoop(position, mFrameCount, 0);
750
751    return NO_ERROR;
752}
753
754status_t AudioTrack::getPosition(uint32_t *position) const
755{
756    if (position == NULL) {
757        return BAD_VALUE;
758    }
759
760    AutoMutex lock(mLock);
761    if (isOffloaded_l()) {
762        uint32_t dspFrames = 0;
763
764        if (mOutput != 0) {
765            uint32_t halFrames;
766            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
767        }
768        *position = dspFrames;
769    } else {
770        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
771        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
772                mProxy->getPosition();
773    }
774    return NO_ERROR;
775}
776
777status_t AudioTrack::getBufferPosition(size_t *position)
778{
779    if (mSharedBuffer == 0 || mIsTimed) {
780        return INVALID_OPERATION;
781    }
782    if (position == NULL) {
783        return BAD_VALUE;
784    }
785
786    AutoMutex lock(mLock);
787    *position = mStaticProxy->getBufferPosition();
788    return NO_ERROR;
789}
790
791status_t AudioTrack::reload()
792{
793    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
794        return INVALID_OPERATION;
795    }
796
797    AutoMutex lock(mLock);
798    // See setPosition() regarding setting parameters such as loop points or position while active
799    if (mState == STATE_ACTIVE) {
800        return INVALID_OPERATION;
801    }
802    mNewPosition = mUpdatePeriod;
803    mLoopPeriod = 0;
804    // FIXME The new code cannot reload while keeping a loop specified.
805    // Need to check how the old code handled this, and whether it's a significant change.
806    mStaticProxy->setLoop(0, mFrameCount, 0);
807    return NO_ERROR;
808}
809
810audio_io_handle_t AudioTrack::getOutput()
811{
812    AutoMutex lock(mLock);
813    return mOutput;
814}
815
816// must be called with mLock held
817audio_io_handle_t AudioTrack::getOutput_l()
818{
819    if (mOutput) {
820        return mOutput;
821    } else {
822        return AudioSystem::getOutput(mStreamType,
823                                      mSampleRate, mFormat, mChannelMask, mFlags);
824    }
825}
826
827status_t AudioTrack::attachAuxEffect(int effectId)
828{
829    AutoMutex lock(mLock);
830    status_t status = mAudioTrack->attachAuxEffect(effectId);
831    if (status == NO_ERROR) {
832        mAuxEffectId = effectId;
833    }
834    return status;
835}
836
837// -------------------------------------------------------------------------
838
839// must be called with mLock held
840status_t AudioTrack::createTrack_l(
841        audio_stream_type_t streamType,
842        uint32_t sampleRate,
843        audio_format_t format,
844        size_t frameCount,
845        audio_output_flags_t flags,
846        const sp<IMemory>& sharedBuffer,
847        audio_io_handle_t output,
848        size_t epoch)
849{
850    status_t status;
851    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
852    if (audioFlinger == 0) {
853        ALOGE("Could not get audioflinger");
854        return NO_INIT;
855    }
856
857    // Not all of these values are needed under all conditions, but it is easier to get them all
858
859    uint32_t afLatency;
860    status = AudioSystem::getLatency(output, streamType, &afLatency);
861    if (status != NO_ERROR) {
862        ALOGE("getLatency(%d) failed status %d", output, status);
863        return NO_INIT;
864    }
865
866    size_t afFrameCount;
867    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
868    if (status != NO_ERROR) {
869        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
870        return NO_INIT;
871    }
872
873    uint32_t afSampleRate;
874    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
875    if (status != NO_ERROR) {
876        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
877        return NO_INIT;
878    }
879
880    // Client decides whether the track is TIMED (see below), but can only express a preference
881    // for FAST.  Server will perform additional tests.
882    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
883            // either of these use cases:
884            // use case 1: shared buffer
885            (sharedBuffer != 0) ||
886            // use case 2: callback handler
887            (mCbf != NULL))) {
888        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
889        // once denied, do not request again if IAudioTrack is re-created
890        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
891        mFlags = flags;
892    }
893    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
894
895    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
896    //  n = 1   fast track with single buffering; nBuffering is ignored
897    //  n = 2   fast track with double buffering
898    //  n = 2   normal track, no sample rate conversion
899    //  n = 3   normal track, with sample rate conversion
900    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
901    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
902    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
903
904    mNotificationFramesAct = mNotificationFramesReq;
905
906    if (!audio_is_linear_pcm(format)) {
907
908        if (sharedBuffer != 0) {
909            // Same comment as below about ignoring frameCount parameter for set()
910            frameCount = sharedBuffer->size();
911        } else if (frameCount == 0) {
912            frameCount = afFrameCount;
913        }
914        if (mNotificationFramesAct != frameCount) {
915            mNotificationFramesAct = frameCount;
916        }
917    } else if (sharedBuffer != 0) {
918
919        // Ensure that buffer alignment matches channel count
920        // 8-bit data in shared memory is not currently supported by AudioFlinger
921        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
922        if (mChannelCount > 1) {
923            // More than 2 channels does not require stronger alignment than stereo
924            alignment <<= 1;
925        }
926        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
927            ALOGE("Invalid buffer alignment: address %p, channel count %u",
928                    sharedBuffer->pointer(), mChannelCount);
929            return BAD_VALUE;
930        }
931
932        // When initializing a shared buffer AudioTrack via constructors,
933        // there's no frameCount parameter.
934        // But when initializing a shared buffer AudioTrack via set(),
935        // there _is_ a frameCount parameter.  We silently ignore it.
936        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
937
938    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
939
940        // FIXME move these calculations and associated checks to server
941
942        // Ensure that buffer depth covers at least audio hardware latency
943        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
944        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
945                afFrameCount, minBufCount, afSampleRate, afLatency);
946        if (minBufCount <= nBuffering) {
947            minBufCount = nBuffering;
948        }
949
950        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
951        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
952                ", afLatency=%d",
953                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
954
955        if (frameCount == 0) {
956            frameCount = minFrameCount;
957        } else if (frameCount < minFrameCount) {
958            // not ALOGW because it happens all the time when playing key clicks over A2DP
959            ALOGV("Minimum buffer size corrected from %d to %d",
960                     frameCount, minFrameCount);
961            frameCount = minFrameCount;
962        }
963        // Make sure that application is notified with sufficient margin before underrun
964        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
965            mNotificationFramesAct = frameCount/nBuffering;
966        }
967
968    } else {
969        // For fast tracks, the frame count calculations and checks are done by server
970    }
971
972    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
973    if (mIsTimed) {
974        trackFlags |= IAudioFlinger::TRACK_TIMED;
975    }
976
977    pid_t tid = -1;
978    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
979        trackFlags |= IAudioFlinger::TRACK_FAST;
980        if (mAudioTrackThread != 0) {
981            tid = mAudioTrackThread->getTid();
982        }
983    }
984
985    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
986        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
987    }
988
989    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
990                                                      sampleRate,
991                                                      // AudioFlinger only sees 16-bit PCM
992                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
993                                                              AUDIO_FORMAT_PCM_16_BIT : format,
994                                                      mChannelMask,
995                                                      frameCount,
996                                                      &trackFlags,
997                                                      sharedBuffer,
998                                                      output,
999                                                      tid,
1000                                                      &mSessionId,
1001                                                      mName,
1002                                                      mClientUid,
1003                                                      &status);
1004
1005    if (track == 0) {
1006        ALOGE("AudioFlinger could not create track, status: %d", status);
1007        return status;
1008    }
1009    sp<IMemory> iMem = track->getCblk();
1010    if (iMem == 0) {
1011        ALOGE("Could not get control block");
1012        return NO_INIT;
1013    }
1014    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1015    if (mAudioTrack != 0) {
1016        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1017        mDeathNotifier.clear();
1018    }
1019    mAudioTrack = track;
1020    mCblkMemory = iMem;
1021    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
1022    mCblk = cblk;
1023    size_t temp = cblk->frameCount_;
1024    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1025        // In current design, AudioTrack client checks and ensures frame count validity before
1026        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1027        // for fast track as it uses a special method of assigning frame count.
1028        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1029    }
1030    frameCount = temp;
1031    mAwaitBoost = false;
1032    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1033        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1034            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1035            mAwaitBoost = true;
1036            if (sharedBuffer == 0) {
1037                // Theoretically double-buffering is not required for fast tracks,
1038                // due to tighter scheduling.  But in practice, to accommodate kernels with
1039                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1040                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1041                    mNotificationFramesAct = frameCount/nBuffering;
1042                }
1043            }
1044        } else {
1045            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1046            // once denied, do not request again if IAudioTrack is re-created
1047            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1048            mFlags = flags;
1049            if (sharedBuffer == 0) {
1050                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1051                    mNotificationFramesAct = frameCount/nBuffering;
1052                }
1053            }
1054        }
1055    }
1056    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1057        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1058            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1059        } else {
1060            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1061            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1062            mFlags = flags;
1063            return NO_INIT;
1064        }
1065    }
1066
1067    mRefreshRemaining = true;
1068
1069    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1070    // is the value of pointer() for the shared buffer, otherwise buffers points
1071    // immediately after the control block.  This address is for the mapping within client
1072    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1073    void* buffers;
1074    if (sharedBuffer == 0) {
1075        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1076    } else {
1077        buffers = sharedBuffer->pointer();
1078    }
1079
1080    mAudioTrack->attachAuxEffect(mAuxEffectId);
1081    // FIXME don't believe this lie
1082    mLatency = afLatency + (1000*frameCount) / sampleRate;
1083    mFrameCount = frameCount;
1084    // If IAudioTrack is re-created, don't let the requested frameCount
1085    // decrease.  This can confuse clients that cache frameCount().
1086    if (frameCount > mReqFrameCount) {
1087        mReqFrameCount = frameCount;
1088    }
1089
1090    // update proxy
1091    if (sharedBuffer == 0) {
1092        mStaticProxy.clear();
1093        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1094    } else {
1095        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1096        mProxy = mStaticProxy;
1097    }
1098    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1099            uint16_t(mVolume[LEFT] * 0x1000));
1100    mProxy->setSendLevel(mSendLevel);
1101    mProxy->setSampleRate(mSampleRate);
1102    mProxy->setEpoch(epoch);
1103    mProxy->setMinimum(mNotificationFramesAct);
1104
1105    mDeathNotifier = new DeathNotifier(this);
1106    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1107
1108    return NO_ERROR;
1109}
1110
1111status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1112{
1113    if (audioBuffer == NULL) {
1114        return BAD_VALUE;
1115    }
1116    if (mTransfer != TRANSFER_OBTAIN) {
1117        audioBuffer->frameCount = 0;
1118        audioBuffer->size = 0;
1119        audioBuffer->raw = NULL;
1120        return INVALID_OPERATION;
1121    }
1122
1123    const struct timespec *requested;
1124    if (waitCount == -1) {
1125        requested = &ClientProxy::kForever;
1126    } else if (waitCount == 0) {
1127        requested = &ClientProxy::kNonBlocking;
1128    } else if (waitCount > 0) {
1129        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1130        struct timespec timeout;
1131        timeout.tv_sec = ms / 1000;
1132        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1133        requested = &timeout;
1134    } else {
1135        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1136        requested = NULL;
1137    }
1138    return obtainBuffer(audioBuffer, requested);
1139}
1140
1141status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1142        struct timespec *elapsed, size_t *nonContig)
1143{
1144    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1145    uint32_t oldSequence = 0;
1146    uint32_t newSequence;
1147
1148    Proxy::Buffer buffer;
1149    status_t status = NO_ERROR;
1150
1151    static const int32_t kMaxTries = 5;
1152    int32_t tryCounter = kMaxTries;
1153
1154    do {
1155        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1156        // keep them from going away if another thread re-creates the track during obtainBuffer()
1157        sp<AudioTrackClientProxy> proxy;
1158        sp<IMemory> iMem;
1159
1160        {   // start of lock scope
1161            AutoMutex lock(mLock);
1162
1163            newSequence = mSequence;
1164            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1165            if (status == DEAD_OBJECT) {
1166                // re-create track, unless someone else has already done so
1167                if (newSequence == oldSequence) {
1168                    status = restoreTrack_l("obtainBuffer");
1169                    if (status != NO_ERROR) {
1170                        buffer.mFrameCount = 0;
1171                        buffer.mRaw = NULL;
1172                        buffer.mNonContig = 0;
1173                        break;
1174                    }
1175                }
1176            }
1177            oldSequence = newSequence;
1178
1179            // Keep the extra references
1180            proxy = mProxy;
1181            iMem = mCblkMemory;
1182
1183            if (mState == STATE_STOPPING) {
1184                status = -EINTR;
1185                buffer.mFrameCount = 0;
1186                buffer.mRaw = NULL;
1187                buffer.mNonContig = 0;
1188                break;
1189            }
1190
1191            // Non-blocking if track is stopped or paused
1192            if (mState != STATE_ACTIVE) {
1193                requested = &ClientProxy::kNonBlocking;
1194            }
1195
1196        }   // end of lock scope
1197
1198        buffer.mFrameCount = audioBuffer->frameCount;
1199        // FIXME starts the requested timeout and elapsed over from scratch
1200        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1201
1202    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1203
1204    audioBuffer->frameCount = buffer.mFrameCount;
1205    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1206    audioBuffer->raw = buffer.mRaw;
1207    if (nonContig != NULL) {
1208        *nonContig = buffer.mNonContig;
1209    }
1210    return status;
1211}
1212
1213void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1214{
1215    if (mTransfer == TRANSFER_SHARED) {
1216        return;
1217    }
1218
1219    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1220    if (stepCount == 0) {
1221        return;
1222    }
1223
1224    Proxy::Buffer buffer;
1225    buffer.mFrameCount = stepCount;
1226    buffer.mRaw = audioBuffer->raw;
1227
1228    AutoMutex lock(mLock);
1229    mInUnderrun = false;
1230    mProxy->releaseBuffer(&buffer);
1231
1232    // restart track if it was disabled by audioflinger due to previous underrun
1233    if (mState == STATE_ACTIVE) {
1234        audio_track_cblk_t* cblk = mCblk;
1235        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1236            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1237                    this, mName.string());
1238            // FIXME ignoring status
1239            mAudioTrack->start();
1240        }
1241    }
1242}
1243
1244// -------------------------------------------------------------------------
1245
1246ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1247{
1248    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1249        return INVALID_OPERATION;
1250    }
1251
1252    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1253        // Sanity-check: user is most-likely passing an error code, and it would
1254        // make the return value ambiguous (actualSize vs error).
1255        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1256        return BAD_VALUE;
1257    }
1258
1259    size_t written = 0;
1260    Buffer audioBuffer;
1261
1262    while (userSize >= mFrameSize) {
1263        audioBuffer.frameCount = userSize / mFrameSize;
1264
1265        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1266        if (err < 0) {
1267            if (written > 0) {
1268                break;
1269            }
1270            return ssize_t(err);
1271        }
1272
1273        size_t toWrite;
1274        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1275            // Divide capacity by 2 to take expansion into account
1276            toWrite = audioBuffer.size >> 1;
1277            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1278        } else {
1279            toWrite = audioBuffer.size;
1280            memcpy(audioBuffer.i8, buffer, toWrite);
1281        }
1282        buffer = ((const char *) buffer) + toWrite;
1283        userSize -= toWrite;
1284        written += toWrite;
1285
1286        releaseBuffer(&audioBuffer);
1287    }
1288
1289    return written;
1290}
1291
1292// -------------------------------------------------------------------------
1293
1294TimedAudioTrack::TimedAudioTrack() {
1295    mIsTimed = true;
1296}
1297
1298status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1299{
1300    AutoMutex lock(mLock);
1301    status_t result = UNKNOWN_ERROR;
1302
1303#if 1
1304    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1305    // while we are accessing the cblk
1306    sp<IAudioTrack> audioTrack = mAudioTrack;
1307    sp<IMemory> iMem = mCblkMemory;
1308#endif
1309
1310    // If the track is not invalid already, try to allocate a buffer.  alloc
1311    // fails indicating that the server is dead, flag the track as invalid so
1312    // we can attempt to restore in just a bit.
1313    audio_track_cblk_t* cblk = mCblk;
1314    if (!(cblk->mFlags & CBLK_INVALID)) {
1315        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1316        if (result == DEAD_OBJECT) {
1317            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1318        }
1319    }
1320
1321    // If the track is invalid at this point, attempt to restore it. and try the
1322    // allocation one more time.
1323    if (cblk->mFlags & CBLK_INVALID) {
1324        result = restoreTrack_l("allocateTimedBuffer");
1325
1326        if (result == NO_ERROR) {
1327            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1328        }
1329    }
1330
1331    return result;
1332}
1333
1334status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1335                                           int64_t pts)
1336{
1337    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1338    {
1339        AutoMutex lock(mLock);
1340        audio_track_cblk_t* cblk = mCblk;
1341        // restart track if it was disabled by audioflinger due to previous underrun
1342        if (buffer->size() != 0 && status == NO_ERROR &&
1343                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1344            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1345            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1346            // FIXME ignoring status
1347            mAudioTrack->start();
1348        }
1349    }
1350    return status;
1351}
1352
1353status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1354                                                TargetTimeline target)
1355{
1356    return mAudioTrack->setMediaTimeTransform(xform, target);
1357}
1358
1359// -------------------------------------------------------------------------
1360
1361nsecs_t AudioTrack::processAudioBuffer()
1362{
1363    // Currently the AudioTrack thread is not created if there are no callbacks.
1364    // Would it ever make sense to run the thread, even without callbacks?
1365    // If so, then replace this by checks at each use for mCbf != NULL.
1366    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1367
1368    mLock.lock();
1369    if (mAwaitBoost) {
1370        mAwaitBoost = false;
1371        mLock.unlock();
1372        static const int32_t kMaxTries = 5;
1373        int32_t tryCounter = kMaxTries;
1374        uint32_t pollUs = 10000;
1375        do {
1376            int policy = sched_getscheduler(0);
1377            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1378                break;
1379            }
1380            usleep(pollUs);
1381            pollUs <<= 1;
1382        } while (tryCounter-- > 0);
1383        if (tryCounter < 0) {
1384            ALOGE("did not receive expected priority boost on time");
1385        }
1386        // Run again immediately
1387        return 0;
1388    }
1389
1390    // Can only reference mCblk while locked
1391    int32_t flags = android_atomic_and(
1392        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1393
1394    // Check for track invalidation
1395    if (flags & CBLK_INVALID) {
1396        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1397        // AudioSystem cache. We should not exit here but after calling the callback so
1398        // that the upper layers can recreate the track
1399        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1400            status_t status = restoreTrack_l("processAudioBuffer");
1401            mLock.unlock();
1402            // Run again immediately, but with a new IAudioTrack
1403            return 0;
1404        }
1405    }
1406
1407    bool waitStreamEnd = mState == STATE_STOPPING;
1408    bool active = mState == STATE_ACTIVE;
1409
1410    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1411    bool newUnderrun = false;
1412    if (flags & CBLK_UNDERRUN) {
1413#if 0
1414        // Currently in shared buffer mode, when the server reaches the end of buffer,
1415        // the track stays active in continuous underrun state.  It's up to the application
1416        // to pause or stop the track, or set the position to a new offset within buffer.
1417        // This was some experimental code to auto-pause on underrun.   Keeping it here
1418        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1419        if (mTransfer == TRANSFER_SHARED) {
1420            mState = STATE_PAUSED;
1421            active = false;
1422        }
1423#endif
1424        if (!mInUnderrun) {
1425            mInUnderrun = true;
1426            newUnderrun = true;
1427        }
1428    }
1429
1430    // Get current position of server
1431    size_t position = mProxy->getPosition();
1432
1433    // Manage marker callback
1434    bool markerReached = false;
1435    size_t markerPosition = mMarkerPosition;
1436    // FIXME fails for wraparound, need 64 bits
1437    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1438        mMarkerReached = markerReached = true;
1439    }
1440
1441    // Determine number of new position callback(s) that will be needed, while locked
1442    size_t newPosCount = 0;
1443    size_t newPosition = mNewPosition;
1444    size_t updatePeriod = mUpdatePeriod;
1445    // FIXME fails for wraparound, need 64 bits
1446    if (updatePeriod > 0 && position >= newPosition) {
1447        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1448        mNewPosition += updatePeriod * newPosCount;
1449    }
1450
1451    // Cache other fields that will be needed soon
1452    uint32_t loopPeriod = mLoopPeriod;
1453    uint32_t sampleRate = mSampleRate;
1454    size_t notificationFrames = mNotificationFramesAct;
1455    if (mRefreshRemaining) {
1456        mRefreshRemaining = false;
1457        mRemainingFrames = notificationFrames;
1458        mRetryOnPartialBuffer = false;
1459    }
1460    size_t misalignment = mProxy->getMisalignment();
1461    uint32_t sequence = mSequence;
1462
1463    // These fields don't need to be cached, because they are assigned only by set():
1464    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1465    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1466
1467    mLock.unlock();
1468
1469    if (waitStreamEnd) {
1470        AutoMutex lock(mLock);
1471
1472        sp<AudioTrackClientProxy> proxy = mProxy;
1473        sp<IMemory> iMem = mCblkMemory;
1474
1475        struct timespec timeout;
1476        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1477        timeout.tv_nsec = 0;
1478
1479        mLock.unlock();
1480        status_t status = mProxy->waitStreamEndDone(&timeout);
1481        mLock.lock();
1482        switch (status) {
1483        case NO_ERROR:
1484        case DEAD_OBJECT:
1485        case TIMED_OUT:
1486            mLock.unlock();
1487            mCbf(EVENT_STREAM_END, mUserData, NULL);
1488            mLock.lock();
1489            if (mState == STATE_STOPPING) {
1490                mState = STATE_STOPPED;
1491                if (status != DEAD_OBJECT) {
1492                   return NS_INACTIVE;
1493                }
1494            }
1495            return 0;
1496        default:
1497            return 0;
1498        }
1499    }
1500
1501    // perform callbacks while unlocked
1502    if (newUnderrun) {
1503        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1504    }
1505    // FIXME we will miss loops if loop cycle was signaled several times since last call
1506    //       to processAudioBuffer()
1507    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1508        mCbf(EVENT_LOOP_END, mUserData, NULL);
1509    }
1510    if (flags & CBLK_BUFFER_END) {
1511        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1512    }
1513    if (markerReached) {
1514        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1515    }
1516    while (newPosCount > 0) {
1517        size_t temp = newPosition;
1518        mCbf(EVENT_NEW_POS, mUserData, &temp);
1519        newPosition += updatePeriod;
1520        newPosCount--;
1521    }
1522
1523    if (mObservedSequence != sequence) {
1524        mObservedSequence = sequence;
1525        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1526        // for offloaded tracks, just wait for the upper layers to recreate the track
1527        if (isOffloaded()) {
1528            return NS_INACTIVE;
1529        }
1530    }
1531
1532    // if inactive, then don't run me again until re-started
1533    if (!active) {
1534        return NS_INACTIVE;
1535    }
1536
1537    // Compute the estimated time until the next timed event (position, markers, loops)
1538    // FIXME only for non-compressed audio
1539    uint32_t minFrames = ~0;
1540    if (!markerReached && position < markerPosition) {
1541        minFrames = markerPosition - position;
1542    }
1543    if (loopPeriod > 0 && loopPeriod < minFrames) {
1544        minFrames = loopPeriod;
1545    }
1546    if (updatePeriod > 0 && updatePeriod < minFrames) {
1547        minFrames = updatePeriod;
1548    }
1549
1550    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1551    static const uint32_t kPoll = 0;
1552    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1553        minFrames = kPoll * notificationFrames;
1554    }
1555
1556    // Convert frame units to time units
1557    nsecs_t ns = NS_WHENEVER;
1558    if (minFrames != (uint32_t) ~0) {
1559        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1560        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1561        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1562    }
1563
1564    // If not supplying data by EVENT_MORE_DATA, then we're done
1565    if (mTransfer != TRANSFER_CALLBACK) {
1566        return ns;
1567    }
1568
1569    struct timespec timeout;
1570    const struct timespec *requested = &ClientProxy::kForever;
1571    if (ns != NS_WHENEVER) {
1572        timeout.tv_sec = ns / 1000000000LL;
1573        timeout.tv_nsec = ns % 1000000000LL;
1574        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1575        requested = &timeout;
1576    }
1577
1578    while (mRemainingFrames > 0) {
1579
1580        Buffer audioBuffer;
1581        audioBuffer.frameCount = mRemainingFrames;
1582        size_t nonContig;
1583        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1584        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1585                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1586        requested = &ClientProxy::kNonBlocking;
1587        size_t avail = audioBuffer.frameCount + nonContig;
1588        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1589                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1590        if (err != NO_ERROR) {
1591            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1592                    (isOffloaded() && (err == DEAD_OBJECT))) {
1593                return 0;
1594            }
1595            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1596            return NS_NEVER;
1597        }
1598
1599        if (mRetryOnPartialBuffer && !isOffloaded()) {
1600            mRetryOnPartialBuffer = false;
1601            if (avail < mRemainingFrames) {
1602                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1603                if (ns < 0 || myns < ns) {
1604                    ns = myns;
1605                }
1606                return ns;
1607            }
1608        }
1609
1610        // Divide buffer size by 2 to take into account the expansion
1611        // due to 8 to 16 bit conversion: the callback must fill only half
1612        // of the destination buffer
1613        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1614            audioBuffer.size >>= 1;
1615        }
1616
1617        size_t reqSize = audioBuffer.size;
1618        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1619        size_t writtenSize = audioBuffer.size;
1620        size_t writtenFrames = writtenSize / mFrameSize;
1621
1622        // Sanity check on returned size
1623        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1624            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1625                    reqSize, (int) writtenSize);
1626            return NS_NEVER;
1627        }
1628
1629        if (writtenSize == 0) {
1630            // The callback is done filling buffers
1631            // Keep this thread going to handle timed events and
1632            // still try to get more data in intervals of WAIT_PERIOD_MS
1633            // but don't just loop and block the CPU, so wait
1634            return WAIT_PERIOD_MS * 1000000LL;
1635        }
1636
1637        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1638            // 8 to 16 bit conversion, note that source and destination are the same address
1639            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1640            audioBuffer.size <<= 1;
1641        }
1642
1643        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1644        audioBuffer.frameCount = releasedFrames;
1645        mRemainingFrames -= releasedFrames;
1646        if (misalignment >= releasedFrames) {
1647            misalignment -= releasedFrames;
1648        } else {
1649            misalignment = 0;
1650        }
1651
1652        releaseBuffer(&audioBuffer);
1653
1654        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1655        // if callback doesn't like to accept the full chunk
1656        if (writtenSize < reqSize) {
1657            continue;
1658        }
1659
1660        // There could be enough non-contiguous frames available to satisfy the remaining request
1661        if (mRemainingFrames <= nonContig) {
1662            continue;
1663        }
1664
1665#if 0
1666        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1667        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1668        // that total to a sum == notificationFrames.
1669        if (0 < misalignment && misalignment <= mRemainingFrames) {
1670            mRemainingFrames = misalignment;
1671            return (mRemainingFrames * 1100000000LL) / sampleRate;
1672        }
1673#endif
1674
1675    }
1676    mRemainingFrames = notificationFrames;
1677    mRetryOnPartialBuffer = true;
1678
1679    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1680    return 0;
1681}
1682
1683status_t AudioTrack::restoreTrack_l(const char *from)
1684{
1685    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1686          isOffloaded_l() ? "Offloaded" : "PCM", from);
1687    ++mSequence;
1688    status_t result;
1689
1690    // refresh the audio configuration cache in this process to make sure we get new
1691    // output parameters in getOutput_l() and createTrack_l()
1692    AudioSystem::clearAudioConfigCache();
1693
1694    if (isOffloaded_l()) {
1695        // FIXME re-creation of offloaded tracks is not yet implemented
1696        return DEAD_OBJECT;
1697    }
1698
1699    // force new output query from audio policy manager;
1700    mOutput = 0;
1701    audio_io_handle_t output = getOutput_l();
1702
1703    // if the new IAudioTrack is created, createTrack_l() will modify the
1704    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1705    // It will also delete the strong references on previous IAudioTrack and IMemory
1706
1707    // take the frames that will be lost by track recreation into account in saved position
1708    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1709    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1710    result = createTrack_l(mStreamType,
1711                           mSampleRate,
1712                           mFormat,
1713                           mReqFrameCount,  // so that frame count never goes down
1714                           mFlags,
1715                           mSharedBuffer,
1716                           output,
1717                           position /*epoch*/);
1718
1719    if (result == NO_ERROR) {
1720        // continue playback from last known position, but
1721        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1722        if (mStaticProxy != NULL) {
1723            mLoopPeriod = 0;
1724            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1725        }
1726        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1727        //       track destruction have been played? This is critical for SoundPool implementation
1728        //       This must be broken, and needs to be tested/debugged.
1729#if 0
1730        // restore write index and set other indexes to reflect empty buffer status
1731        if (!strcmp(from, "start")) {
1732            // Make sure that a client relying on callback events indicating underrun or
1733            // the actual amount of audio frames played (e.g SoundPool) receives them.
1734            if (mSharedBuffer == 0) {
1735                // restart playback even if buffer is not completely filled.
1736                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1737            }
1738        }
1739#endif
1740        if (mState == STATE_ACTIVE) {
1741            result = mAudioTrack->start();
1742        }
1743    }
1744    if (result != NO_ERROR) {
1745        // Use of direct and offloaded output streams is ref counted by audio policy manager.
1746        // As getOutput was called above and resulted in an output stream to be opened,
1747        // we need to release it.
1748        AudioSystem::releaseOutput(output);
1749        ALOGW("restoreTrack_l() failed status %d", result);
1750        mState = STATE_STOPPED;
1751    }
1752
1753    return result;
1754}
1755
1756status_t AudioTrack::setParameters(const String8& keyValuePairs)
1757{
1758    AutoMutex lock(mLock);
1759    return mAudioTrack->setParameters(keyValuePairs);
1760}
1761
1762status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1763{
1764    AutoMutex lock(mLock);
1765    // FIXME not implemented for fast tracks; should use proxy and SSQ
1766    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1767        return INVALID_OPERATION;
1768    }
1769    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1770        return INVALID_OPERATION;
1771    }
1772    status_t status = mAudioTrack->getTimestamp(timestamp);
1773    if (status == NO_ERROR) {
1774        timestamp.mPosition += mProxy->getEpoch();
1775    }
1776    return status;
1777}
1778
1779String8 AudioTrack::getParameters(const String8& keys)
1780{
1781    audio_io_handle_t output = getOutput();
1782    if (output != 0) {
1783        return AudioSystem::getParameters(output, keys);
1784    } else {
1785        return String8::empty();
1786    }
1787}
1788
1789bool AudioTrack::isOffloaded() const
1790{
1791    AutoMutex lock(mLock);
1792    return isOffloaded_l();
1793}
1794
1795status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1796{
1797
1798    const size_t SIZE = 256;
1799    char buffer[SIZE];
1800    String8 result;
1801
1802    result.append(" AudioTrack::dump\n");
1803    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1804            mVolume[0], mVolume[1]);
1805    result.append(buffer);
1806    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1807            mChannelCount, mFrameCount);
1808    result.append(buffer);
1809    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1810    result.append(buffer);
1811    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1812    result.append(buffer);
1813    ::write(fd, result.string(), result.size());
1814    return NO_ERROR;
1815}
1816
1817uint32_t AudioTrack::getUnderrunFrames() const
1818{
1819    AutoMutex lock(mLock);
1820    return mProxy->getUnderrunFrames();
1821}
1822
1823// =========================================================================
1824
1825void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
1826{
1827    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1828    if (audioTrack != 0) {
1829        AutoMutex lock(audioTrack->mLock);
1830        audioTrack->mProxy->binderDied();
1831    }
1832}
1833
1834// =========================================================================
1835
1836AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1837    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1838      mIgnoreNextPausedInt(false)
1839{
1840}
1841
1842AudioTrack::AudioTrackThread::~AudioTrackThread()
1843{
1844}
1845
1846bool AudioTrack::AudioTrackThread::threadLoop()
1847{
1848    {
1849        AutoMutex _l(mMyLock);
1850        if (mPaused) {
1851            mMyCond.wait(mMyLock);
1852            // caller will check for exitPending()
1853            return true;
1854        }
1855        if (mIgnoreNextPausedInt) {
1856            mIgnoreNextPausedInt = false;
1857            mPausedInt = false;
1858        }
1859        if (mPausedInt) {
1860            if (mPausedNs > 0) {
1861                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1862            } else {
1863                mMyCond.wait(mMyLock);
1864            }
1865            mPausedInt = false;
1866            return true;
1867        }
1868    }
1869    nsecs_t ns = mReceiver.processAudioBuffer();
1870    switch (ns) {
1871    case 0:
1872        return true;
1873    case NS_INACTIVE:
1874        pauseInternal();
1875        return true;
1876    case NS_NEVER:
1877        return false;
1878    case NS_WHENEVER:
1879        // FIXME increase poll interval, or make event-driven
1880        ns = 1000000000LL;
1881        // fall through
1882    default:
1883        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1884        pauseInternal(ns);
1885        return true;
1886    }
1887}
1888
1889void AudioTrack::AudioTrackThread::requestExit()
1890{
1891    // must be in this order to avoid a race condition
1892    Thread::requestExit();
1893    resume();
1894}
1895
1896void AudioTrack::AudioTrackThread::pause()
1897{
1898    AutoMutex _l(mMyLock);
1899    mPaused = true;
1900}
1901
1902void AudioTrack::AudioTrackThread::resume()
1903{
1904    AutoMutex _l(mMyLock);
1905    mIgnoreNextPausedInt = true;
1906    if (mPaused || mPausedInt) {
1907        mPaused = false;
1908        mPausedInt = false;
1909        mMyCond.signal();
1910    }
1911}
1912
1913void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1914{
1915    AutoMutex _l(mMyLock);
1916    mPausedInt = true;
1917    mPausedNs = ns;
1918}
1919
1920}; // namespace android
1921