AudioTrack.cpp revision 398f21348e5100289f6e5be30c8b5257fa04aaf9
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS                  10
31#define WAIT_STREAM_END_TIMEOUT_SEC     120
32
33
34namespace android {
35// ---------------------------------------------------------------------------
36
37// static
38status_t AudioTrack::getMinFrameCount(
39        size_t* frameCount,
40        audio_stream_type_t streamType,
41        uint32_t sampleRate)
42{
43    if (frameCount == NULL) {
44        return BAD_VALUE;
45    }
46
47    // default to 0 in case of error
48    *frameCount = 0;
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
58        return NO_INIT;
59    }
60    size_t afFrameCount;
61    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
62        return NO_INIT;
63    }
64    uint32_t afLatency;
65    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
66        return NO_INIT;
67    }
68
69    // Ensure that buffer depth covers at least audio hardware latency
70    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
71    if (minBufCount < 2) {
72        minBufCount = 2;
73    }
74
75    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
76            afFrameCount * minBufCount * sampleRate / afSampleRate;
77    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
78            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
79    return NO_ERROR;
80}
81
82// ---------------------------------------------------------------------------
83
84AudioTrack::AudioTrack()
85    : mStatus(NO_INIT),
86      mIsTimed(false),
87      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
88      mPreviousSchedulingGroup(SP_DEFAULT)
89{
90}
91
92AudioTrack::AudioTrack(
93        audio_stream_type_t streamType,
94        uint32_t sampleRate,
95        audio_format_t format,
96        audio_channel_mask_t channelMask,
97        int frameCount,
98        audio_output_flags_t flags,
99        callback_t cbf,
100        void* user,
101        int notificationFrames,
102        int sessionId,
103        transfer_type transferType,
104        const audio_offload_info_t *offloadInfo,
105        int uid)
106    : mStatus(NO_INIT),
107      mIsTimed(false),
108      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
109      mPreviousSchedulingGroup(SP_DEFAULT)
110{
111    mStatus = set(streamType, sampleRate, format, channelMask,
112            frameCount, flags, cbf, user, notificationFrames,
113            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
114            offloadInfo, uid);
115}
116
117AudioTrack::AudioTrack(
118        audio_stream_type_t streamType,
119        uint32_t sampleRate,
120        audio_format_t format,
121        audio_channel_mask_t channelMask,
122        const sp<IMemory>& sharedBuffer,
123        audio_output_flags_t flags,
124        callback_t cbf,
125        void* user,
126        int notificationFrames,
127        int sessionId,
128        transfer_type transferType,
129        const audio_offload_info_t *offloadInfo,
130        int uid)
131    : mStatus(NO_INIT),
132      mIsTimed(false),
133      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
134      mPreviousSchedulingGroup(SP_DEFAULT)
135{
136    mStatus = set(streamType, sampleRate, format, channelMask,
137            0 /*frameCount*/, flags, cbf, user, notificationFrames,
138            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo, uid);
139}
140
141AudioTrack::~AudioTrack()
142{
143    if (mStatus == NO_ERROR) {
144        // Make sure that callback function exits in the case where
145        // it is looping on buffer full condition in obtainBuffer().
146        // Otherwise the callback thread will never exit.
147        stop();
148        if (mAudioTrackThread != 0) {
149            mProxy->interrupt();
150            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
151            mAudioTrackThread->requestExitAndWait();
152            mAudioTrackThread.clear();
153        }
154        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
155        mAudioTrack.clear();
156        IPCThreadState::self()->flushCommands();
157        AudioSystem::releaseAudioSessionId(mSessionId);
158    }
159}
160
161status_t AudioTrack::set(
162        audio_stream_type_t streamType,
163        uint32_t sampleRate,
164        audio_format_t format,
165        audio_channel_mask_t channelMask,
166        int frameCountInt,
167        audio_output_flags_t flags,
168        callback_t cbf,
169        void* user,
170        int notificationFrames,
171        const sp<IMemory>& sharedBuffer,
172        bool threadCanCallJava,
173        int sessionId,
174        transfer_type transferType,
175        const audio_offload_info_t *offloadInfo,
176        int uid)
177{
178    switch (transferType) {
179    case TRANSFER_DEFAULT:
180        if (sharedBuffer != 0) {
181            transferType = TRANSFER_SHARED;
182        } else if (cbf == NULL || threadCanCallJava) {
183            transferType = TRANSFER_SYNC;
184        } else {
185            transferType = TRANSFER_CALLBACK;
186        }
187        break;
188    case TRANSFER_CALLBACK:
189        if (cbf == NULL || sharedBuffer != 0) {
190            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
191            return BAD_VALUE;
192        }
193        break;
194    case TRANSFER_OBTAIN:
195    case TRANSFER_SYNC:
196        if (sharedBuffer != 0) {
197            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
198            return BAD_VALUE;
199        }
200        break;
201    case TRANSFER_SHARED:
202        if (sharedBuffer == 0) {
203            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
204            return BAD_VALUE;
205        }
206        break;
207    default:
208        ALOGE("Invalid transfer type %d", transferType);
209        return BAD_VALUE;
210    }
211    mTransfer = transferType;
212
213    // FIXME "int" here is legacy and will be replaced by size_t later
214    if (frameCountInt < 0) {
215        ALOGE("Invalid frame count %d", frameCountInt);
216        return BAD_VALUE;
217    }
218    size_t frameCount = frameCountInt;
219
220    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
221            sharedBuffer->size());
222
223    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
224
225    AutoMutex lock(mLock);
226
227    // invariant that mAudioTrack != 0 is true only after set() returns successfully
228    if (mAudioTrack != 0) {
229        ALOGE("Track already in use");
230        return INVALID_OPERATION;
231    }
232
233    mOutput = 0;
234
235    // handle default values first.
236    if (streamType == AUDIO_STREAM_DEFAULT) {
237        streamType = AUDIO_STREAM_MUSIC;
238    }
239
240    if (sampleRate == 0) {
241        uint32_t afSampleRate;
242        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
243            return NO_INIT;
244        }
245        sampleRate = afSampleRate;
246    }
247    mSampleRate = sampleRate;
248
249    // these below should probably come from the audioFlinger too...
250    if (format == AUDIO_FORMAT_DEFAULT) {
251        format = AUDIO_FORMAT_PCM_16_BIT;
252    }
253    if (channelMask == 0) {
254        channelMask = AUDIO_CHANNEL_OUT_STEREO;
255    }
256
257    // validate parameters
258    if (!audio_is_valid_format(format)) {
259        ALOGE("Invalid format %d", format);
260        return BAD_VALUE;
261    }
262
263    // AudioFlinger does not currently support 8-bit data in shared memory
264    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
265        ALOGE("8-bit data in shared memory is not supported");
266        return BAD_VALUE;
267    }
268
269    // force direct flag if format is not linear PCM
270    // or offload was requested
271    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
272            || !audio_is_linear_pcm(format)) {
273        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
274                    ? "Offload request, forcing to Direct Output"
275                    : "Not linear PCM, forcing to Direct Output");
276        flags = (audio_output_flags_t)
277                // FIXME why can't we allow direct AND fast?
278                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
279    }
280    // only allow deep buffering for music stream type
281    if (streamType != AUDIO_STREAM_MUSIC) {
282        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
283    }
284
285    if (!audio_is_output_channel(channelMask)) {
286        ALOGE("Invalid channel mask %#x", channelMask);
287        return BAD_VALUE;
288    }
289    mChannelMask = channelMask;
290    uint32_t channelCount = popcount(channelMask);
291    mChannelCount = channelCount;
292
293    if (audio_is_linear_pcm(format)) {
294        mFrameSize = channelCount * audio_bytes_per_sample(format);
295        mFrameSizeAF = channelCount * sizeof(int16_t);
296    } else {
297        mFrameSize = sizeof(uint8_t);
298        mFrameSizeAF = sizeof(uint8_t);
299    }
300
301    audio_io_handle_t output = AudioSystem::getOutput(
302                                    streamType,
303                                    sampleRate, format, channelMask,
304                                    flags,
305                                    offloadInfo);
306
307    if (output == 0) {
308        ALOGE("Could not get audio output for stream type %d", streamType);
309        return BAD_VALUE;
310    }
311
312    mVolume[LEFT] = 1.0f;
313    mVolume[RIGHT] = 1.0f;
314    mSendLevel = 0.0f;
315    mFrameCount = frameCount;
316    mReqFrameCount = frameCount;
317    mNotificationFramesReq = notificationFrames;
318    mNotificationFramesAct = 0;
319    mSessionId = sessionId;
320    if (uid == -1 || (IPCThreadState::self()->getCallingPid() != getpid())) {
321        mClientUid = IPCThreadState::self()->getCallingUid();
322    } else {
323        mClientUid = uid;
324    }
325    mAuxEffectId = 0;
326    mFlags = flags;
327    mCbf = cbf;
328
329    if (cbf != NULL) {
330        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
331        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
332    }
333
334    // create the IAudioTrack
335    status_t status = createTrack_l(streamType,
336                                  sampleRate,
337                                  format,
338                                  frameCount,
339                                  flags,
340                                  sharedBuffer,
341                                  output,
342                                  0 /*epoch*/);
343
344    if (status != NO_ERROR) {
345        if (mAudioTrackThread != 0) {
346            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
347            mAudioTrackThread->requestExitAndWait();
348            mAudioTrackThread.clear();
349        }
350        //Use of direct and offloaded output streams is ref counted by audio policy manager.
351        // As getOutput was called above and resulted in an output stream to be opened,
352        // we need to release it.
353        AudioSystem::releaseOutput(output);
354        return status;
355    }
356
357    mStatus = NO_ERROR;
358    mStreamType = streamType;
359    mFormat = format;
360    mSharedBuffer = sharedBuffer;
361    mState = STATE_STOPPED;
362    mUserData = user;
363    mLoopPeriod = 0;
364    mMarkerPosition = 0;
365    mMarkerReached = false;
366    mNewPosition = 0;
367    mUpdatePeriod = 0;
368    AudioSystem::acquireAudioSessionId(mSessionId);
369    mSequence = 1;
370    mObservedSequence = mSequence;
371    mInUnderrun = false;
372    mOutput = output;
373
374    return NO_ERROR;
375}
376
377// -------------------------------------------------------------------------
378
379status_t AudioTrack::start()
380{
381    AutoMutex lock(mLock);
382
383    if (mState == STATE_ACTIVE) {
384        return INVALID_OPERATION;
385    }
386
387    mInUnderrun = true;
388
389    State previousState = mState;
390    if (previousState == STATE_PAUSED_STOPPING) {
391        mState = STATE_STOPPING;
392    } else {
393        mState = STATE_ACTIVE;
394    }
395    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
396        // reset current position as seen by client to 0
397        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
398        // force refresh of remaining frames by processAudioBuffer() as last
399        // write before stop could be partial.
400        mRefreshRemaining = true;
401    }
402    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
403    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
404
405    sp<AudioTrackThread> t = mAudioTrackThread;
406    if (t != 0) {
407        if (previousState == STATE_STOPPING) {
408            mProxy->interrupt();
409        } else {
410            t->resume();
411        }
412    } else {
413        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
414        get_sched_policy(0, &mPreviousSchedulingGroup);
415        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
416    }
417
418    status_t status = NO_ERROR;
419    if (!(flags & CBLK_INVALID)) {
420        status = mAudioTrack->start();
421        if (status == DEAD_OBJECT) {
422            flags |= CBLK_INVALID;
423        }
424    }
425    if (flags & CBLK_INVALID) {
426        status = restoreTrack_l("start");
427    }
428
429    if (status != NO_ERROR) {
430        ALOGE("start() status %d", status);
431        mState = previousState;
432        if (t != 0) {
433            if (previousState != STATE_STOPPING) {
434                t->pause();
435            }
436        } else {
437            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
438            set_sched_policy(0, mPreviousSchedulingGroup);
439        }
440    }
441
442    return status;
443}
444
445void AudioTrack::stop()
446{
447    AutoMutex lock(mLock);
448    // FIXME pause then stop should not be a nop
449    if (mState != STATE_ACTIVE) {
450        return;
451    }
452
453    if (isOffloaded()) {
454        mState = STATE_STOPPING;
455    } else {
456        mState = STATE_STOPPED;
457    }
458
459    mProxy->interrupt();
460    mAudioTrack->stop();
461    // the playback head position will reset to 0, so if a marker is set, we need
462    // to activate it again
463    mMarkerReached = false;
464#if 0
465    // Force flush if a shared buffer is used otherwise audioflinger
466    // will not stop before end of buffer is reached.
467    // It may be needed to make sure that we stop playback, likely in case looping is on.
468    if (mSharedBuffer != 0) {
469        flush_l();
470    }
471#endif
472
473    sp<AudioTrackThread> t = mAudioTrackThread;
474    if (t != 0) {
475        if (!isOffloaded()) {
476            t->pause();
477        }
478    } else {
479        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
480        set_sched_policy(0, mPreviousSchedulingGroup);
481    }
482}
483
484bool AudioTrack::stopped() const
485{
486    AutoMutex lock(mLock);
487    return mState != STATE_ACTIVE;
488}
489
490void AudioTrack::flush()
491{
492    if (mSharedBuffer != 0) {
493        return;
494    }
495    AutoMutex lock(mLock);
496    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
497        return;
498    }
499    flush_l();
500}
501
502void AudioTrack::flush_l()
503{
504    ALOG_ASSERT(mState != STATE_ACTIVE);
505
506    // clear playback marker and periodic update counter
507    mMarkerPosition = 0;
508    mMarkerReached = false;
509    mUpdatePeriod = 0;
510    mRefreshRemaining = true;
511
512    mState = STATE_FLUSHED;
513    if (isOffloaded()) {
514        mProxy->interrupt();
515    }
516    mProxy->flush();
517    mAudioTrack->flush();
518}
519
520void AudioTrack::pause()
521{
522    AutoMutex lock(mLock);
523    if (mState == STATE_ACTIVE) {
524        mState = STATE_PAUSED;
525    } else if (mState == STATE_STOPPING) {
526        mState = STATE_PAUSED_STOPPING;
527    } else {
528        return;
529    }
530    mProxy->interrupt();
531    mAudioTrack->pause();
532}
533
534status_t AudioTrack::setVolume(float left, float right)
535{
536    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
537        return BAD_VALUE;
538    }
539
540    AutoMutex lock(mLock);
541    mVolume[LEFT] = left;
542    mVolume[RIGHT] = right;
543
544    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
545
546    if (isOffloaded()) {
547        mAudioTrack->signal();
548    }
549    return NO_ERROR;
550}
551
552status_t AudioTrack::setVolume(float volume)
553{
554    return setVolume(volume, volume);
555}
556
557status_t AudioTrack::setAuxEffectSendLevel(float level)
558{
559    if (level < 0.0f || level > 1.0f) {
560        return BAD_VALUE;
561    }
562
563    AutoMutex lock(mLock);
564    mSendLevel = level;
565    mProxy->setSendLevel(level);
566
567    return NO_ERROR;
568}
569
570void AudioTrack::getAuxEffectSendLevel(float* level) const
571{
572    if (level != NULL) {
573        *level = mSendLevel;
574    }
575}
576
577status_t AudioTrack::setSampleRate(uint32_t rate)
578{
579    if (mIsTimed || isOffloaded()) {
580        return INVALID_OPERATION;
581    }
582
583    uint32_t afSamplingRate;
584    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
585        return NO_INIT;
586    }
587    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
588    if (rate == 0 || rate > afSamplingRate*2 ) {
589        return BAD_VALUE;
590    }
591
592    AutoMutex lock(mLock);
593    mSampleRate = rate;
594    mProxy->setSampleRate(rate);
595
596    return NO_ERROR;
597}
598
599uint32_t AudioTrack::getSampleRate() const
600{
601    if (mIsTimed) {
602        return 0;
603    }
604
605    AutoMutex lock(mLock);
606
607    // sample rate can be updated during playback by the offloaded decoder so we need to
608    // query the HAL and update if needed.
609// FIXME use Proxy return channel to update the rate from server and avoid polling here
610    if (isOffloaded()) {
611        if (mOutput != 0) {
612            uint32_t sampleRate = 0;
613            status_t status = AudioSystem::getSamplingRate(mOutput, mStreamType, &sampleRate);
614            if (status == NO_ERROR) {
615                mSampleRate = sampleRate;
616            }
617        }
618    }
619    return mSampleRate;
620}
621
622status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
623{
624    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
625        return INVALID_OPERATION;
626    }
627
628    if (loopCount == 0) {
629        ;
630    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
631            loopEnd - loopStart >= MIN_LOOP) {
632        ;
633    } else {
634        return BAD_VALUE;
635    }
636
637    AutoMutex lock(mLock);
638    // See setPosition() regarding setting parameters such as loop points or position while active
639    if (mState == STATE_ACTIVE) {
640        return INVALID_OPERATION;
641    }
642    setLoop_l(loopStart, loopEnd, loopCount);
643    return NO_ERROR;
644}
645
646void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
647{
648    // FIXME If setting a loop also sets position to start of loop, then
649    //       this is correct.  Otherwise it should be removed.
650    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
651    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
652    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
653}
654
655status_t AudioTrack::setMarkerPosition(uint32_t marker)
656{
657    // The only purpose of setting marker position is to get a callback
658    if (mCbf == NULL || isOffloaded()) {
659        return INVALID_OPERATION;
660    }
661
662    AutoMutex lock(mLock);
663    mMarkerPosition = marker;
664    mMarkerReached = false;
665
666    return NO_ERROR;
667}
668
669status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
670{
671    if (isOffloaded()) {
672        return INVALID_OPERATION;
673    }
674    if (marker == NULL) {
675        return BAD_VALUE;
676    }
677
678    AutoMutex lock(mLock);
679    *marker = mMarkerPosition;
680
681    return NO_ERROR;
682}
683
684status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
685{
686    // The only purpose of setting position update period is to get a callback
687    if (mCbf == NULL || isOffloaded()) {
688        return INVALID_OPERATION;
689    }
690
691    AutoMutex lock(mLock);
692    mNewPosition = mProxy->getPosition() + updatePeriod;
693    mUpdatePeriod = updatePeriod;
694    return NO_ERROR;
695}
696
697status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
698{
699    if (isOffloaded()) {
700        return INVALID_OPERATION;
701    }
702    if (updatePeriod == NULL) {
703        return BAD_VALUE;
704    }
705
706    AutoMutex lock(mLock);
707    *updatePeriod = mUpdatePeriod;
708
709    return NO_ERROR;
710}
711
712status_t AudioTrack::setPosition(uint32_t position)
713{
714    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
715        return INVALID_OPERATION;
716    }
717    if (position > mFrameCount) {
718        return BAD_VALUE;
719    }
720
721    AutoMutex lock(mLock);
722    // Currently we require that the player is inactive before setting parameters such as position
723    // or loop points.  Otherwise, there could be a race condition: the application could read the
724    // current position, compute a new position or loop parameters, and then set that position or
725    // loop parameters but it would do the "wrong" thing since the position has continued to advance
726    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
727    // to specify how it wants to handle such scenarios.
728    if (mState == STATE_ACTIVE) {
729        return INVALID_OPERATION;
730    }
731    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
732    mLoopPeriod = 0;
733    // FIXME Check whether loops and setting position are incompatible in old code.
734    // If we use setLoop for both purposes we lose the capability to set the position while looping.
735    mStaticProxy->setLoop(position, mFrameCount, 0);
736
737    return NO_ERROR;
738}
739
740status_t AudioTrack::getPosition(uint32_t *position) const
741{
742    if (position == NULL) {
743        return BAD_VALUE;
744    }
745
746    AutoMutex lock(mLock);
747    if (isOffloaded()) {
748        uint32_t dspFrames = 0;
749
750        if (mOutput != 0) {
751            uint32_t halFrames;
752            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
753        }
754        *position = dspFrames;
755    } else {
756        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
757        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
758                mProxy->getPosition();
759    }
760    return NO_ERROR;
761}
762
763status_t AudioTrack::getBufferPosition(size_t *position)
764{
765    if (mSharedBuffer == 0 || mIsTimed) {
766        return INVALID_OPERATION;
767    }
768    if (position == NULL) {
769        return BAD_VALUE;
770    }
771
772    AutoMutex lock(mLock);
773    *position = mStaticProxy->getBufferPosition();
774    return NO_ERROR;
775}
776
777status_t AudioTrack::reload()
778{
779    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
780        return INVALID_OPERATION;
781    }
782
783    AutoMutex lock(mLock);
784    // See setPosition() regarding setting parameters such as loop points or position while active
785    if (mState == STATE_ACTIVE) {
786        return INVALID_OPERATION;
787    }
788    mNewPosition = mUpdatePeriod;
789    mLoopPeriod = 0;
790    // FIXME The new code cannot reload while keeping a loop specified.
791    // Need to check how the old code handled this, and whether it's a significant change.
792    mStaticProxy->setLoop(0, mFrameCount, 0);
793    return NO_ERROR;
794}
795
796audio_io_handle_t AudioTrack::getOutput()
797{
798    AutoMutex lock(mLock);
799    return mOutput;
800}
801
802// must be called with mLock held
803audio_io_handle_t AudioTrack::getOutput_l()
804{
805    if (mOutput) {
806        return mOutput;
807    } else {
808        return AudioSystem::getOutput(mStreamType,
809                                      mSampleRate, mFormat, mChannelMask, mFlags);
810    }
811}
812
813status_t AudioTrack::attachAuxEffect(int effectId)
814{
815    AutoMutex lock(mLock);
816    status_t status = mAudioTrack->attachAuxEffect(effectId);
817    if (status == NO_ERROR) {
818        mAuxEffectId = effectId;
819    }
820    return status;
821}
822
823// -------------------------------------------------------------------------
824
825// must be called with mLock held
826status_t AudioTrack::createTrack_l(
827        audio_stream_type_t streamType,
828        uint32_t sampleRate,
829        audio_format_t format,
830        size_t frameCount,
831        audio_output_flags_t flags,
832        const sp<IMemory>& sharedBuffer,
833        audio_io_handle_t output,
834        size_t epoch)
835{
836    status_t status;
837    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
838    if (audioFlinger == 0) {
839        ALOGE("Could not get audioflinger");
840        return NO_INIT;
841    }
842
843    // Not all of these values are needed under all conditions, but it is easier to get them all
844
845    uint32_t afLatency;
846    status = AudioSystem::getLatency(output, streamType, &afLatency);
847    if (status != NO_ERROR) {
848        ALOGE("getLatency(%d) failed status %d", output, status);
849        return NO_INIT;
850    }
851
852    size_t afFrameCount;
853    status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
854    if (status != NO_ERROR) {
855        ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
856        return NO_INIT;
857    }
858
859    uint32_t afSampleRate;
860    status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
861    if (status != NO_ERROR) {
862        ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType, status);
863        return NO_INIT;
864    }
865
866    // Client decides whether the track is TIMED (see below), but can only express a preference
867    // for FAST.  Server will perform additional tests.
868    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
869            // either of these use cases:
870            // use case 1: shared buffer
871            (sharedBuffer != 0) ||
872            // use case 2: callback handler
873            (mCbf != NULL))) {
874        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
875        // once denied, do not request again if IAudioTrack is re-created
876        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
877        mFlags = flags;
878    }
879    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
880
881    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && sampleRate != afSampleRate) {
882        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client due to mismatching sample rate (%d vs %d)",
883              sampleRate, afSampleRate);
884        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
885    }
886
887    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
888    //  n = 1   fast track with single buffering; nBuffering is ignored
889    //  n = 2   fast track with double buffering
890    //  n = 2   normal track, no sample rate conversion
891    //  n = 3   normal track, with sample rate conversion
892    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
893    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
894    const uint32_t nBuffering = (sampleRate == afSampleRate) ? 2 : 3;
895
896    mNotificationFramesAct = mNotificationFramesReq;
897
898    if (!audio_is_linear_pcm(format)) {
899
900        if (sharedBuffer != 0) {
901            // Same comment as below about ignoring frameCount parameter for set()
902            frameCount = sharedBuffer->size();
903        } else if (frameCount == 0) {
904            frameCount = afFrameCount;
905        }
906        if (mNotificationFramesAct != frameCount) {
907            mNotificationFramesAct = frameCount;
908        }
909    } else if (sharedBuffer != 0) {
910
911        // Ensure that buffer alignment matches channel count
912        // 8-bit data in shared memory is not currently supported by AudioFlinger
913        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
914        if (mChannelCount > 1) {
915            // More than 2 channels does not require stronger alignment than stereo
916            alignment <<= 1;
917        }
918        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
919            ALOGE("Invalid buffer alignment: address %p, channel count %u",
920                    sharedBuffer->pointer(), mChannelCount);
921            return BAD_VALUE;
922        }
923
924        // When initializing a shared buffer AudioTrack via constructors,
925        // there's no frameCount parameter.
926        // But when initializing a shared buffer AudioTrack via set(),
927        // there _is_ a frameCount parameter.  We silently ignore it.
928        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
929
930    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
931
932        // FIXME move these calculations and associated checks to server
933
934        // Ensure that buffer depth covers at least audio hardware latency
935        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
936        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
937                afFrameCount, minBufCount, afSampleRate, afLatency);
938        if (minBufCount <= nBuffering) {
939            minBufCount = nBuffering;
940        }
941
942        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
943        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
944                ", afLatency=%d",
945                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
946
947        if (frameCount == 0) {
948            frameCount = minFrameCount;
949        } else if (frameCount < minFrameCount) {
950            // not ALOGW because it happens all the time when playing key clicks over A2DP
951            ALOGV("Minimum buffer size corrected from %d to %d",
952                     frameCount, minFrameCount);
953            frameCount = minFrameCount;
954        }
955        // Make sure that application is notified with sufficient margin before underrun
956        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
957            mNotificationFramesAct = frameCount/nBuffering;
958        }
959
960    } else {
961        // For fast tracks, the frame count calculations and checks are done by server
962    }
963
964    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
965    if (mIsTimed) {
966        trackFlags |= IAudioFlinger::TRACK_TIMED;
967    }
968
969    pid_t tid = -1;
970    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
971        trackFlags |= IAudioFlinger::TRACK_FAST;
972        if (mAudioTrackThread != 0) {
973            tid = mAudioTrackThread->getTid();
974        }
975    }
976
977    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
978        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
979    }
980
981    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
982                                                      sampleRate,
983                                                      // AudioFlinger only sees 16-bit PCM
984                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
985                                                              AUDIO_FORMAT_PCM_16_BIT : format,
986                                                      mChannelMask,
987                                                      frameCount,
988                                                      &trackFlags,
989                                                      sharedBuffer,
990                                                      output,
991                                                      tid,
992                                                      &mSessionId,
993                                                      mName,
994                                                      mClientUid,
995                                                      &status);
996
997    if (track == 0) {
998        ALOGE("AudioFlinger could not create track, status: %d", status);
999        return status;
1000    }
1001    sp<IMemory> iMem = track->getCblk();
1002    if (iMem == 0) {
1003        ALOGE("Could not get control block");
1004        return NO_INIT;
1005    }
1006    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1007    if (mAudioTrack != 0) {
1008        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1009        mDeathNotifier.clear();
1010    }
1011    mAudioTrack = track;
1012    mCblkMemory = iMem;
1013    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
1014    mCblk = cblk;
1015    size_t temp = cblk->frameCount_;
1016    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1017        // In current design, AudioTrack client checks and ensures frame count validity before
1018        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1019        // for fast track as it uses a special method of assigning frame count.
1020        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1021    }
1022    frameCount = temp;
1023    mAwaitBoost = false;
1024    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1025        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1026            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1027            mAwaitBoost = true;
1028            if (sharedBuffer == 0) {
1029                // Theoretically double-buffering is not required for fast tracks,
1030                // due to tighter scheduling.  But in practice, to accommodate kernels with
1031                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1032                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1033                    mNotificationFramesAct = frameCount/nBuffering;
1034                }
1035            }
1036        } else {
1037            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1038            // once denied, do not request again if IAudioTrack is re-created
1039            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
1040            mFlags = flags;
1041            if (sharedBuffer == 0) {
1042                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1043                    mNotificationFramesAct = frameCount/nBuffering;
1044                }
1045            }
1046        }
1047    }
1048    if (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1049        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1050            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1051        } else {
1052            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1053            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1054            mFlags = flags;
1055            return NO_INIT;
1056        }
1057    }
1058
1059    mRefreshRemaining = true;
1060
1061    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1062    // is the value of pointer() for the shared buffer, otherwise buffers points
1063    // immediately after the control block.  This address is for the mapping within client
1064    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1065    void* buffers;
1066    if (sharedBuffer == 0) {
1067        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1068    } else {
1069        buffers = sharedBuffer->pointer();
1070    }
1071
1072    mAudioTrack->attachAuxEffect(mAuxEffectId);
1073    // FIXME don't believe this lie
1074    mLatency = afLatency + (1000*frameCount) / sampleRate;
1075    mFrameCount = frameCount;
1076    // If IAudioTrack is re-created, don't let the requested frameCount
1077    // decrease.  This can confuse clients that cache frameCount().
1078    if (frameCount > mReqFrameCount) {
1079        mReqFrameCount = frameCount;
1080    }
1081
1082    // update proxy
1083    if (sharedBuffer == 0) {
1084        mStaticProxy.clear();
1085        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1086    } else {
1087        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1088        mProxy = mStaticProxy;
1089    }
1090    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
1091            uint16_t(mVolume[LEFT] * 0x1000));
1092    mProxy->setSendLevel(mSendLevel);
1093    mProxy->setSampleRate(mSampleRate);
1094    mProxy->setEpoch(epoch);
1095    mProxy->setMinimum(mNotificationFramesAct);
1096
1097    mDeathNotifier = new DeathNotifier(this);
1098    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1099
1100    return NO_ERROR;
1101}
1102
1103status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1104{
1105    if (audioBuffer == NULL) {
1106        return BAD_VALUE;
1107    }
1108    if (mTransfer != TRANSFER_OBTAIN) {
1109        audioBuffer->frameCount = 0;
1110        audioBuffer->size = 0;
1111        audioBuffer->raw = NULL;
1112        return INVALID_OPERATION;
1113    }
1114
1115    const struct timespec *requested;
1116    if (waitCount == -1) {
1117        requested = &ClientProxy::kForever;
1118    } else if (waitCount == 0) {
1119        requested = &ClientProxy::kNonBlocking;
1120    } else if (waitCount > 0) {
1121        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1122        struct timespec timeout;
1123        timeout.tv_sec = ms / 1000;
1124        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1125        requested = &timeout;
1126    } else {
1127        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1128        requested = NULL;
1129    }
1130    return obtainBuffer(audioBuffer, requested);
1131}
1132
1133status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1134        struct timespec *elapsed, size_t *nonContig)
1135{
1136    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1137    uint32_t oldSequence = 0;
1138    uint32_t newSequence;
1139
1140    Proxy::Buffer buffer;
1141    status_t status = NO_ERROR;
1142
1143    static const int32_t kMaxTries = 5;
1144    int32_t tryCounter = kMaxTries;
1145
1146    do {
1147        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1148        // keep them from going away if another thread re-creates the track during obtainBuffer()
1149        sp<AudioTrackClientProxy> proxy;
1150        sp<IMemory> iMem;
1151
1152        {   // start of lock scope
1153            AutoMutex lock(mLock);
1154
1155            newSequence = mSequence;
1156            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1157            if (status == DEAD_OBJECT) {
1158                // re-create track, unless someone else has already done so
1159                if (newSequence == oldSequence) {
1160                    status = restoreTrack_l("obtainBuffer");
1161                    if (status != NO_ERROR) {
1162                        buffer.mFrameCount = 0;
1163                        buffer.mRaw = NULL;
1164                        buffer.mNonContig = 0;
1165                        break;
1166                    }
1167                }
1168            }
1169            oldSequence = newSequence;
1170
1171            // Keep the extra references
1172            proxy = mProxy;
1173            iMem = mCblkMemory;
1174
1175            if (mState == STATE_STOPPING) {
1176                status = -EINTR;
1177                buffer.mFrameCount = 0;
1178                buffer.mRaw = NULL;
1179                buffer.mNonContig = 0;
1180                break;
1181            }
1182
1183            // Non-blocking if track is stopped or paused
1184            if (mState != STATE_ACTIVE) {
1185                requested = &ClientProxy::kNonBlocking;
1186            }
1187
1188        }   // end of lock scope
1189
1190        buffer.mFrameCount = audioBuffer->frameCount;
1191        // FIXME starts the requested timeout and elapsed over from scratch
1192        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1193
1194    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1195
1196    audioBuffer->frameCount = buffer.mFrameCount;
1197    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1198    audioBuffer->raw = buffer.mRaw;
1199    if (nonContig != NULL) {
1200        *nonContig = buffer.mNonContig;
1201    }
1202    return status;
1203}
1204
1205void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1206{
1207    if (mTransfer == TRANSFER_SHARED) {
1208        return;
1209    }
1210
1211    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1212    if (stepCount == 0) {
1213        return;
1214    }
1215
1216    Proxy::Buffer buffer;
1217    buffer.mFrameCount = stepCount;
1218    buffer.mRaw = audioBuffer->raw;
1219
1220    AutoMutex lock(mLock);
1221    mInUnderrun = false;
1222    mProxy->releaseBuffer(&buffer);
1223
1224    // restart track if it was disabled by audioflinger due to previous underrun
1225    if (mState == STATE_ACTIVE) {
1226        audio_track_cblk_t* cblk = mCblk;
1227        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1228            ALOGW("releaseBuffer() track %p name=%s disabled due to previous underrun, restarting",
1229                    this, mName.string());
1230            // FIXME ignoring status
1231            mAudioTrack->start();
1232        }
1233    }
1234}
1235
1236// -------------------------------------------------------------------------
1237
1238ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1239{
1240    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1241        return INVALID_OPERATION;
1242    }
1243
1244    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1245        // Sanity-check: user is most-likely passing an error code, and it would
1246        // make the return value ambiguous (actualSize vs error).
1247        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1248        return BAD_VALUE;
1249    }
1250
1251    size_t written = 0;
1252    Buffer audioBuffer;
1253
1254    while (userSize >= mFrameSize) {
1255        audioBuffer.frameCount = userSize / mFrameSize;
1256
1257        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1258        if (err < 0) {
1259            if (written > 0) {
1260                break;
1261            }
1262            return ssize_t(err);
1263        }
1264
1265        size_t toWrite;
1266        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1267            // Divide capacity by 2 to take expansion into account
1268            toWrite = audioBuffer.size >> 1;
1269            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1270        } else {
1271            toWrite = audioBuffer.size;
1272            memcpy(audioBuffer.i8, buffer, toWrite);
1273        }
1274        buffer = ((const char *) buffer) + toWrite;
1275        userSize -= toWrite;
1276        written += toWrite;
1277
1278        releaseBuffer(&audioBuffer);
1279    }
1280
1281    return written;
1282}
1283
1284// -------------------------------------------------------------------------
1285
1286TimedAudioTrack::TimedAudioTrack() {
1287    mIsTimed = true;
1288}
1289
1290status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1291{
1292    AutoMutex lock(mLock);
1293    status_t result = UNKNOWN_ERROR;
1294
1295#if 1
1296    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1297    // while we are accessing the cblk
1298    sp<IAudioTrack> audioTrack = mAudioTrack;
1299    sp<IMemory> iMem = mCblkMemory;
1300#endif
1301
1302    // If the track is not invalid already, try to allocate a buffer.  alloc
1303    // fails indicating that the server is dead, flag the track as invalid so
1304    // we can attempt to restore in just a bit.
1305    audio_track_cblk_t* cblk = mCblk;
1306    if (!(cblk->mFlags & CBLK_INVALID)) {
1307        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1308        if (result == DEAD_OBJECT) {
1309            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1310        }
1311    }
1312
1313    // If the track is invalid at this point, attempt to restore it. and try the
1314    // allocation one more time.
1315    if (cblk->mFlags & CBLK_INVALID) {
1316        result = restoreTrack_l("allocateTimedBuffer");
1317
1318        if (result == NO_ERROR) {
1319            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1320        }
1321    }
1322
1323    return result;
1324}
1325
1326status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1327                                           int64_t pts)
1328{
1329    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1330    {
1331        AutoMutex lock(mLock);
1332        audio_track_cblk_t* cblk = mCblk;
1333        // restart track if it was disabled by audioflinger due to previous underrun
1334        if (buffer->size() != 0 && status == NO_ERROR &&
1335                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1336            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1337            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1338            // FIXME ignoring status
1339            mAudioTrack->start();
1340        }
1341    }
1342    return status;
1343}
1344
1345status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1346                                                TargetTimeline target)
1347{
1348    return mAudioTrack->setMediaTimeTransform(xform, target);
1349}
1350
1351// -------------------------------------------------------------------------
1352
1353nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1354{
1355    // Currently the AudioTrack thread is not created if there are no callbacks.
1356    // Would it ever make sense to run the thread, even without callbacks?
1357    // If so, then replace this by checks at each use for mCbf != NULL.
1358    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1359
1360    mLock.lock();
1361    if (mAwaitBoost) {
1362        mAwaitBoost = false;
1363        mLock.unlock();
1364        static const int32_t kMaxTries = 5;
1365        int32_t tryCounter = kMaxTries;
1366        uint32_t pollUs = 10000;
1367        do {
1368            int policy = sched_getscheduler(0);
1369            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1370                break;
1371            }
1372            usleep(pollUs);
1373            pollUs <<= 1;
1374        } while (tryCounter-- > 0);
1375        if (tryCounter < 0) {
1376            ALOGE("did not receive expected priority boost on time");
1377        }
1378        // Run again immediately
1379        return 0;
1380    }
1381
1382    // Can only reference mCblk while locked
1383    int32_t flags = android_atomic_and(
1384        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1385
1386    // Check for track invalidation
1387    if (flags & CBLK_INVALID) {
1388        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1389        // AudioSystem cache. We should not exit here but after calling the callback so
1390        // that the upper layers can recreate the track
1391        if (!isOffloaded() || (mSequence == mObservedSequence)) {
1392            status_t status = restoreTrack_l("processAudioBuffer");
1393            mLock.unlock();
1394            // Run again immediately, but with a new IAudioTrack
1395            return 0;
1396        }
1397    }
1398
1399    bool waitStreamEnd = mState == STATE_STOPPING;
1400    bool active = mState == STATE_ACTIVE;
1401
1402    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1403    bool newUnderrun = false;
1404    if (flags & CBLK_UNDERRUN) {
1405#if 0
1406        // Currently in shared buffer mode, when the server reaches the end of buffer,
1407        // the track stays active in continuous underrun state.  It's up to the application
1408        // to pause or stop the track, or set the position to a new offset within buffer.
1409        // This was some experimental code to auto-pause on underrun.   Keeping it here
1410        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1411        if (mTransfer == TRANSFER_SHARED) {
1412            mState = STATE_PAUSED;
1413            active = false;
1414        }
1415#endif
1416        if (!mInUnderrun) {
1417            mInUnderrun = true;
1418            newUnderrun = true;
1419        }
1420    }
1421
1422    // Get current position of server
1423    size_t position = mProxy->getPosition();
1424
1425    // Manage marker callback
1426    bool markerReached = false;
1427    size_t markerPosition = mMarkerPosition;
1428    // FIXME fails for wraparound, need 64 bits
1429    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1430        mMarkerReached = markerReached = true;
1431    }
1432
1433    // Determine number of new position callback(s) that will be needed, while locked
1434    size_t newPosCount = 0;
1435    size_t newPosition = mNewPosition;
1436    size_t updatePeriod = mUpdatePeriod;
1437    // FIXME fails for wraparound, need 64 bits
1438    if (updatePeriod > 0 && position >= newPosition) {
1439        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1440        mNewPosition += updatePeriod * newPosCount;
1441    }
1442
1443    // Cache other fields that will be needed soon
1444    uint32_t loopPeriod = mLoopPeriod;
1445    uint32_t sampleRate = mSampleRate;
1446    size_t notificationFrames = mNotificationFramesAct;
1447    if (mRefreshRemaining) {
1448        mRefreshRemaining = false;
1449        mRemainingFrames = notificationFrames;
1450        mRetryOnPartialBuffer = false;
1451    }
1452    size_t misalignment = mProxy->getMisalignment();
1453    uint32_t sequence = mSequence;
1454
1455    // These fields don't need to be cached, because they are assigned only by set():
1456    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1457    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1458
1459    mLock.unlock();
1460
1461    if (waitStreamEnd) {
1462        AutoMutex lock(mLock);
1463
1464        sp<AudioTrackClientProxy> proxy = mProxy;
1465        sp<IMemory> iMem = mCblkMemory;
1466
1467        struct timespec timeout;
1468        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1469        timeout.tv_nsec = 0;
1470
1471        mLock.unlock();
1472        status_t status = mProxy->waitStreamEndDone(&timeout);
1473        mLock.lock();
1474        switch (status) {
1475        case NO_ERROR:
1476        case DEAD_OBJECT:
1477        case TIMED_OUT:
1478            mLock.unlock();
1479            mCbf(EVENT_STREAM_END, mUserData, NULL);
1480            mLock.lock();
1481            if (mState == STATE_STOPPING) {
1482                mState = STATE_STOPPED;
1483                if (status != DEAD_OBJECT) {
1484                   return NS_INACTIVE;
1485                }
1486            }
1487            return 0;
1488        default:
1489            return 0;
1490        }
1491    }
1492
1493    // perform callbacks while unlocked
1494    if (newUnderrun) {
1495        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1496    }
1497    // FIXME we will miss loops if loop cycle was signaled several times since last call
1498    //       to processAudioBuffer()
1499    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1500        mCbf(EVENT_LOOP_END, mUserData, NULL);
1501    }
1502    if (flags & CBLK_BUFFER_END) {
1503        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1504    }
1505    if (markerReached) {
1506        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1507    }
1508    while (newPosCount > 0) {
1509        size_t temp = newPosition;
1510        mCbf(EVENT_NEW_POS, mUserData, &temp);
1511        newPosition += updatePeriod;
1512        newPosCount--;
1513    }
1514
1515    if (mObservedSequence != sequence) {
1516        mObservedSequence = sequence;
1517        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1518        // for offloaded tracks, just wait for the upper layers to recreate the track
1519        if (isOffloaded()) {
1520            return NS_INACTIVE;
1521        }
1522    }
1523
1524    // if inactive, then don't run me again until re-started
1525    if (!active) {
1526        return NS_INACTIVE;
1527    }
1528
1529    // Compute the estimated time until the next timed event (position, markers, loops)
1530    // FIXME only for non-compressed audio
1531    uint32_t minFrames = ~0;
1532    if (!markerReached && position < markerPosition) {
1533        minFrames = markerPosition - position;
1534    }
1535    if (loopPeriod > 0 && loopPeriod < minFrames) {
1536        minFrames = loopPeriod;
1537    }
1538    if (updatePeriod > 0 && updatePeriod < minFrames) {
1539        minFrames = updatePeriod;
1540    }
1541
1542    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1543    static const uint32_t kPoll = 0;
1544    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1545        minFrames = kPoll * notificationFrames;
1546    }
1547
1548    // Convert frame units to time units
1549    nsecs_t ns = NS_WHENEVER;
1550    if (minFrames != (uint32_t) ~0) {
1551        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1552        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1553        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1554    }
1555
1556    // If not supplying data by EVENT_MORE_DATA, then we're done
1557    if (mTransfer != TRANSFER_CALLBACK) {
1558        return ns;
1559    }
1560
1561    struct timespec timeout;
1562    const struct timespec *requested = &ClientProxy::kForever;
1563    if (ns != NS_WHENEVER) {
1564        timeout.tv_sec = ns / 1000000000LL;
1565        timeout.tv_nsec = ns % 1000000000LL;
1566        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1567        requested = &timeout;
1568    }
1569
1570    while (mRemainingFrames > 0) {
1571
1572        Buffer audioBuffer;
1573        audioBuffer.frameCount = mRemainingFrames;
1574        size_t nonContig;
1575        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1576        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1577                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1578        requested = &ClientProxy::kNonBlocking;
1579        size_t avail = audioBuffer.frameCount + nonContig;
1580        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1581                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1582        if (err != NO_ERROR) {
1583            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1584                    (isOffloaded() && (err == DEAD_OBJECT))) {
1585                return 0;
1586            }
1587            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1588            return NS_NEVER;
1589        }
1590
1591        if (mRetryOnPartialBuffer && !isOffloaded()) {
1592            mRetryOnPartialBuffer = false;
1593            if (avail < mRemainingFrames) {
1594                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1595                if (ns < 0 || myns < ns) {
1596                    ns = myns;
1597                }
1598                return ns;
1599            }
1600        }
1601
1602        // Divide buffer size by 2 to take into account the expansion
1603        // due to 8 to 16 bit conversion: the callback must fill only half
1604        // of the destination buffer
1605        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1606            audioBuffer.size >>= 1;
1607        }
1608
1609        size_t reqSize = audioBuffer.size;
1610        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1611        size_t writtenSize = audioBuffer.size;
1612        size_t writtenFrames = writtenSize / mFrameSize;
1613
1614        // Sanity check on returned size
1615        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1616            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1617                    reqSize, (int) writtenSize);
1618            return NS_NEVER;
1619        }
1620
1621        if (writtenSize == 0) {
1622            // The callback is done filling buffers
1623            // Keep this thread going to handle timed events and
1624            // still try to get more data in intervals of WAIT_PERIOD_MS
1625            // but don't just loop and block the CPU, so wait
1626            return WAIT_PERIOD_MS * 1000000LL;
1627        }
1628
1629        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1630            // 8 to 16 bit conversion, note that source and destination are the same address
1631            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1632            audioBuffer.size <<= 1;
1633        }
1634
1635        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1636        audioBuffer.frameCount = releasedFrames;
1637        mRemainingFrames -= releasedFrames;
1638        if (misalignment >= releasedFrames) {
1639            misalignment -= releasedFrames;
1640        } else {
1641            misalignment = 0;
1642        }
1643
1644        releaseBuffer(&audioBuffer);
1645
1646        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1647        // if callback doesn't like to accept the full chunk
1648        if (writtenSize < reqSize) {
1649            continue;
1650        }
1651
1652        // There could be enough non-contiguous frames available to satisfy the remaining request
1653        if (mRemainingFrames <= nonContig) {
1654            continue;
1655        }
1656
1657#if 0
1658        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1659        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1660        // that total to a sum == notificationFrames.
1661        if (0 < misalignment && misalignment <= mRemainingFrames) {
1662            mRemainingFrames = misalignment;
1663            return (mRemainingFrames * 1100000000LL) / sampleRate;
1664        }
1665#endif
1666
1667    }
1668    mRemainingFrames = notificationFrames;
1669    mRetryOnPartialBuffer = true;
1670
1671    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1672    return 0;
1673}
1674
1675status_t AudioTrack::restoreTrack_l(const char *from)
1676{
1677    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1678          isOffloaded() ? "Offloaded" : "PCM", from);
1679    ++mSequence;
1680    status_t result;
1681
1682    // refresh the audio configuration cache in this process to make sure we get new
1683    // output parameters in getOutput_l() and createTrack_l()
1684    AudioSystem::clearAudioConfigCache();
1685
1686    if (isOffloaded()) {
1687        return DEAD_OBJECT;
1688    }
1689
1690    // force new output query from audio policy manager;
1691    mOutput = 0;
1692    audio_io_handle_t output = getOutput_l();
1693
1694    // if the new IAudioTrack is created, createTrack_l() will modify the
1695    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1696    // It will also delete the strong references on previous IAudioTrack and IMemory
1697
1698    // take the frames that will be lost by track recreation into account in saved position
1699    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1700    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1701    result = createTrack_l(mStreamType,
1702                           mSampleRate,
1703                           mFormat,
1704                           mReqFrameCount,  // so that frame count never goes down
1705                           mFlags,
1706                           mSharedBuffer,
1707                           output,
1708                           position /*epoch*/);
1709
1710    if (result == NO_ERROR) {
1711        // continue playback from last known position, but
1712        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1713        if (mStaticProxy != NULL) {
1714            mLoopPeriod = 0;
1715            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1716        }
1717        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1718        //       track destruction have been played? This is critical for SoundPool implementation
1719        //       This must be broken, and needs to be tested/debugged.
1720#if 0
1721        // restore write index and set other indexes to reflect empty buffer status
1722        if (!strcmp(from, "start")) {
1723            // Make sure that a client relying on callback events indicating underrun or
1724            // the actual amount of audio frames played (e.g SoundPool) receives them.
1725            if (mSharedBuffer == 0) {
1726                // restart playback even if buffer is not completely filled.
1727                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1728            }
1729        }
1730#endif
1731        if (mState == STATE_ACTIVE) {
1732            result = mAudioTrack->start();
1733        }
1734    }
1735    if (result != NO_ERROR) {
1736        //Use of direct and offloaded output streams is ref counted by audio policy manager.
1737        // As getOutput was called above and resulted in an output stream to be opened,
1738        // we need to release it.
1739        AudioSystem::releaseOutput(output);
1740        ALOGW("restoreTrack_l() failed status %d", result);
1741        mState = STATE_STOPPED;
1742    }
1743
1744    return result;
1745}
1746
1747status_t AudioTrack::setParameters(const String8& keyValuePairs)
1748{
1749    AutoMutex lock(mLock);
1750    return mAudioTrack->setParameters(keyValuePairs);
1751}
1752
1753status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1754{
1755    AutoMutex lock(mLock);
1756    // FIXME not implemented for fast tracks; should use proxy and SSQ
1757    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1758        return INVALID_OPERATION;
1759    }
1760    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1761        return INVALID_OPERATION;
1762    }
1763    status_t status = mAudioTrack->getTimestamp(timestamp);
1764    if (status == NO_ERROR) {
1765        timestamp.mPosition += mProxy->getEpoch();
1766    }
1767    return status;
1768}
1769
1770String8 AudioTrack::getParameters(const String8& keys)
1771{
1772    if (mOutput) {
1773        return AudioSystem::getParameters(mOutput, keys);
1774    } else {
1775        return String8::empty();
1776    }
1777}
1778
1779status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1780{
1781
1782    const size_t SIZE = 256;
1783    char buffer[SIZE];
1784    String8 result;
1785
1786    result.append(" AudioTrack::dump\n");
1787    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1788            mVolume[0], mVolume[1]);
1789    result.append(buffer);
1790    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1791            mChannelCount, mFrameCount);
1792    result.append(buffer);
1793    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1794    result.append(buffer);
1795    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1796    result.append(buffer);
1797    ::write(fd, result.string(), result.size());
1798    return NO_ERROR;
1799}
1800
1801uint32_t AudioTrack::getUnderrunFrames() const
1802{
1803    AutoMutex lock(mLock);
1804    return mProxy->getUnderrunFrames();
1805}
1806
1807// =========================================================================
1808
1809void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1810{
1811    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1812    if (audioTrack != 0) {
1813        AutoMutex lock(audioTrack->mLock);
1814        audioTrack->mProxy->binderDied();
1815    }
1816}
1817
1818// =========================================================================
1819
1820AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1821    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
1822      mIgnoreNextPausedInt(false)
1823{
1824}
1825
1826AudioTrack::AudioTrackThread::~AudioTrackThread()
1827{
1828}
1829
1830bool AudioTrack::AudioTrackThread::threadLoop()
1831{
1832    {
1833        AutoMutex _l(mMyLock);
1834        if (mPaused) {
1835            mMyCond.wait(mMyLock);
1836            // caller will check for exitPending()
1837            return true;
1838        }
1839        if (mIgnoreNextPausedInt) {
1840            mIgnoreNextPausedInt = false;
1841            mPausedInt = false;
1842        }
1843        if (mPausedInt) {
1844            if (mPausedNs > 0) {
1845                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
1846            } else {
1847                mMyCond.wait(mMyLock);
1848            }
1849            mPausedInt = false;
1850            return true;
1851        }
1852    }
1853    nsecs_t ns = mReceiver.processAudioBuffer(this);
1854    switch (ns) {
1855    case 0:
1856        return true;
1857    case NS_INACTIVE:
1858        pauseInternal();
1859        return true;
1860    case NS_NEVER:
1861        return false;
1862    case NS_WHENEVER:
1863        // FIXME increase poll interval, or make event-driven
1864        ns = 1000000000LL;
1865        // fall through
1866    default:
1867        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1868        pauseInternal(ns);
1869        return true;
1870    }
1871}
1872
1873void AudioTrack::AudioTrackThread::requestExit()
1874{
1875    // must be in this order to avoid a race condition
1876    Thread::requestExit();
1877    resume();
1878}
1879
1880void AudioTrack::AudioTrackThread::pause()
1881{
1882    AutoMutex _l(mMyLock);
1883    mPaused = true;
1884}
1885
1886void AudioTrack::AudioTrackThread::resume()
1887{
1888    AutoMutex _l(mMyLock);
1889    mIgnoreNextPausedInt = true;
1890    if (mPaused || mPausedInt) {
1891        mPaused = false;
1892        mPausedInt = false;
1893        mMyCond.signal();
1894    }
1895}
1896
1897void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
1898{
1899    AutoMutex _l(mMyLock);
1900    mPausedInt = true;
1901    mPausedNs = ns;
1902}
1903
1904}; // namespace android
1905