AudioTrack.cpp revision 200092b7f21d2b98f30b800e79d152636f9ba225
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioResamplerPublic.h>
32
33#define WAIT_PERIOD_MS                  10
34#define WAIT_STREAM_END_TIMEOUT_SEC     120
35
36
37namespace android {
38// ---------------------------------------------------------------------------
39
40// static
41status_t AudioTrack::getMinFrameCount(
42        size_t* frameCount,
43        audio_stream_type_t streamType,
44        uint32_t sampleRate)
45{
46    if (frameCount == NULL) {
47        return BAD_VALUE;
48    }
49
50    // FIXME merge with similar code in createTrack_l(), except we're missing
51    //       some information here that is available in createTrack_l():
52    //          audio_io_handle_t output
53    //          audio_format_t format
54    //          audio_channel_mask_t channelMask
55    //          audio_output_flags_t flags
56    uint32_t afSampleRate;
57    status_t status;
58    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
59    if (status != NO_ERROR) {
60        ALOGE("Unable to query output sample rate for stream type %d; status %d",
61                streamType, status);
62        return status;
63    }
64    size_t afFrameCount;
65    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
66    if (status != NO_ERROR) {
67        ALOGE("Unable to query output frame count for stream type %d; status %d",
68                streamType, status);
69        return status;
70    }
71    uint32_t afLatency;
72    status = AudioSystem::getOutputLatency(&afLatency, streamType);
73    if (status != NO_ERROR) {
74        ALOGE("Unable to query output latency for stream type %d; status %d",
75                streamType, status);
76        return status;
77    }
78
79    // Ensure that buffer depth covers at least audio hardware latency
80    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
81    if (minBufCount < 2) {
82        minBufCount = 2;
83    }
84
85    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
86            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
87    // The formula above should always produce a non-zero value, but return an error
88    // in the unlikely event that it does not, as that's part of the API contract.
89    if (*frameCount == 0) {
90        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
91                streamType, sampleRate);
92        return BAD_VALUE;
93    }
94    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
95            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
96    return NO_ERROR;
97}
98
99// ---------------------------------------------------------------------------
100
101AudioTrack::AudioTrack()
102    : mStatus(NO_INIT),
103      mIsTimed(false),
104      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
105      mPreviousSchedulingGroup(SP_DEFAULT),
106      mPausedPosition(0)
107{
108    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
109    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
110    mAttributes.flags = 0x0;
111    strcpy(mAttributes.tags, "");
112}
113
114AudioTrack::AudioTrack(
115        audio_stream_type_t streamType,
116        uint32_t sampleRate,
117        audio_format_t format,
118        audio_channel_mask_t channelMask,
119        size_t frameCount,
120        audio_output_flags_t flags,
121        callback_t cbf,
122        void* user,
123        uint32_t notificationFrames,
124        int sessionId,
125        transfer_type transferType,
126        const audio_offload_info_t *offloadInfo,
127        int uid,
128        pid_t pid,
129        const audio_attributes_t* pAttributes)
130    : mStatus(NO_INIT),
131      mIsTimed(false),
132      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
133      mPreviousSchedulingGroup(SP_DEFAULT),
134      mPausedPosition(0)
135{
136    mStatus = set(streamType, sampleRate, format, channelMask,
137            frameCount, flags, cbf, user, notificationFrames,
138            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
139            offloadInfo, uid, pid, pAttributes);
140}
141
142AudioTrack::AudioTrack(
143        audio_stream_type_t streamType,
144        uint32_t sampleRate,
145        audio_format_t format,
146        audio_channel_mask_t channelMask,
147        const sp<IMemory>& sharedBuffer,
148        audio_output_flags_t flags,
149        callback_t cbf,
150        void* user,
151        uint32_t notificationFrames,
152        int sessionId,
153        transfer_type transferType,
154        const audio_offload_info_t *offloadInfo,
155        int uid,
156        pid_t pid,
157        const audio_attributes_t* pAttributes)
158    : mStatus(NO_INIT),
159      mIsTimed(false),
160      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
161      mPreviousSchedulingGroup(SP_DEFAULT),
162      mPausedPosition(0)
163{
164    mStatus = set(streamType, sampleRate, format, channelMask,
165            0 /*frameCount*/, flags, cbf, user, notificationFrames,
166            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
167            uid, pid, pAttributes);
168}
169
170AudioTrack::~AudioTrack()
171{
172    if (mStatus == NO_ERROR) {
173        // Make sure that callback function exits in the case where
174        // it is looping on buffer full condition in obtainBuffer().
175        // Otherwise the callback thread will never exit.
176        stop();
177        if (mAudioTrackThread != 0) {
178            mProxy->interrupt();
179            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
180            mAudioTrackThread->requestExitAndWait();
181            mAudioTrackThread.clear();
182        }
183        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
184        mAudioTrack.clear();
185        mCblkMemory.clear();
186        mSharedBuffer.clear();
187        IPCThreadState::self()->flushCommands();
188        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
189                IPCThreadState::self()->getCallingPid(), mClientPid);
190        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
191    }
192}
193
194status_t AudioTrack::set(
195        audio_stream_type_t streamType,
196        uint32_t sampleRate,
197        audio_format_t format,
198        audio_channel_mask_t channelMask,
199        size_t frameCount,
200        audio_output_flags_t flags,
201        callback_t cbf,
202        void* user,
203        uint32_t notificationFrames,
204        const sp<IMemory>& sharedBuffer,
205        bool threadCanCallJava,
206        int sessionId,
207        transfer_type transferType,
208        const audio_offload_info_t *offloadInfo,
209        int uid,
210        pid_t pid,
211        const audio_attributes_t* pAttributes)
212{
213    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
214          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
215          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
216          sessionId, transferType);
217
218    switch (transferType) {
219    case TRANSFER_DEFAULT:
220        if (sharedBuffer != 0) {
221            transferType = TRANSFER_SHARED;
222        } else if (cbf == NULL || threadCanCallJava) {
223            transferType = TRANSFER_SYNC;
224        } else {
225            transferType = TRANSFER_CALLBACK;
226        }
227        break;
228    case TRANSFER_CALLBACK:
229        if (cbf == NULL || sharedBuffer != 0) {
230            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
231            return BAD_VALUE;
232        }
233        break;
234    case TRANSFER_OBTAIN:
235    case TRANSFER_SYNC:
236        if (sharedBuffer != 0) {
237            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
238            return BAD_VALUE;
239        }
240        break;
241    case TRANSFER_SHARED:
242        if (sharedBuffer == 0) {
243            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
244            return BAD_VALUE;
245        }
246        break;
247    default:
248        ALOGE("Invalid transfer type %d", transferType);
249        return BAD_VALUE;
250    }
251    mSharedBuffer = sharedBuffer;
252    mTransfer = transferType;
253
254    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
255            sharedBuffer->size());
256
257    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
258
259    AutoMutex lock(mLock);
260
261    // invariant that mAudioTrack != 0 is true only after set() returns successfully
262    if (mAudioTrack != 0) {
263        ALOGE("Track already in use");
264        return INVALID_OPERATION;
265    }
266
267    // handle default values first.
268    if (streamType == AUDIO_STREAM_DEFAULT) {
269        streamType = AUDIO_STREAM_MUSIC;
270    }
271
272    if (pAttributes == NULL) {
273        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
274            ALOGE("Invalid stream type %d", streamType);
275            return BAD_VALUE;
276        }
277        setAttributesFromStreamType(streamType);
278        mStreamType = streamType;
279    } else {
280        if (!isValidAttributes(pAttributes)) {
281            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
282                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
283                pAttributes->tags);
284        }
285        // stream type shouldn't be looked at, this track has audio attributes
286        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
287        setStreamTypeFromAttributes(mAttributes);
288        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
289                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
290    }
291
292    status_t status;
293    if (sampleRate == 0) {
294        status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
295        if (status != NO_ERROR) {
296            ALOGE("Could not get output sample rate for stream type %d; status %d",
297                    mStreamType, status);
298            return status;
299        }
300    }
301    mSampleRate = sampleRate;
302
303    // these below should probably come from the audioFlinger too...
304    if (format == AUDIO_FORMAT_DEFAULT) {
305        format = AUDIO_FORMAT_PCM_16_BIT;
306    }
307
308    // validate parameters
309    if (!audio_is_valid_format(format)) {
310        ALOGE("Invalid format %#x", format);
311        return BAD_VALUE;
312    }
313    mFormat = format;
314
315    if (!audio_is_output_channel(channelMask)) {
316        ALOGE("Invalid channel mask %#x", channelMask);
317        return BAD_VALUE;
318    }
319    mChannelMask = channelMask;
320    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
321    mChannelCount = channelCount;
322
323    // AudioFlinger does not currently support 8-bit data in shared memory
324    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
325        ALOGE("8-bit data in shared memory is not supported");
326        return BAD_VALUE;
327    }
328
329    // force direct flag if format is not linear PCM
330    // or offload was requested
331    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
332            || !audio_is_linear_pcm(format)) {
333        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
334                    ? "Offload request, forcing to Direct Output"
335                    : "Not linear PCM, forcing to Direct Output");
336        flags = (audio_output_flags_t)
337                // FIXME why can't we allow direct AND fast?
338                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
339    }
340    // only allow deep buffering for music stream type
341    if (mStreamType != AUDIO_STREAM_MUSIC) {
342        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
343    }
344
345    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
346        if (audio_is_linear_pcm(format)) {
347            mFrameSize = channelCount * audio_bytes_per_sample(format);
348        } else {
349            mFrameSize = sizeof(uint8_t);
350        }
351        mFrameSizeAF = mFrameSize;
352    } else {
353        ALOG_ASSERT(audio_is_linear_pcm(format));
354        mFrameSize = channelCount * audio_bytes_per_sample(format);
355        mFrameSizeAF = channelCount * audio_bytes_per_sample(
356                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
357        // createTrack will return an error if PCM format is not supported by server,
358        // so no need to check for specific PCM formats here
359    }
360
361    // Make copy of input parameter offloadInfo so that in the future:
362    //  (a) createTrack_l doesn't need it as an input parameter
363    //  (b) we can support re-creation of offloaded tracks
364    if (offloadInfo != NULL) {
365        mOffloadInfoCopy = *offloadInfo;
366        mOffloadInfo = &mOffloadInfoCopy;
367    } else {
368        mOffloadInfo = NULL;
369    }
370
371    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
372    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
373    mSendLevel = 0.0f;
374    // mFrameCount is initialized in createTrack_l
375    mReqFrameCount = frameCount;
376    mNotificationFramesReq = notificationFrames;
377    mNotificationFramesAct = 0;
378    mSessionId = sessionId;
379    int callingpid = IPCThreadState::self()->getCallingPid();
380    int mypid = getpid();
381    if (uid == -1 || (callingpid != mypid)) {
382        mClientUid = IPCThreadState::self()->getCallingUid();
383    } else {
384        mClientUid = uid;
385    }
386    if (pid == -1 || (callingpid != mypid)) {
387        mClientPid = callingpid;
388    } else {
389        mClientPid = pid;
390    }
391    mAuxEffectId = 0;
392    mFlags = flags;
393    mCbf = cbf;
394
395    if (cbf != NULL) {
396        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
397        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
398    }
399
400    // create the IAudioTrack
401    status = createTrack_l();
402
403    if (status != NO_ERROR) {
404        if (mAudioTrackThread != 0) {
405            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
406            mAudioTrackThread->requestExitAndWait();
407            mAudioTrackThread.clear();
408        }
409        return status;
410    }
411
412    mStatus = NO_ERROR;
413    mState = STATE_STOPPED;
414    mUserData = user;
415    mLoopPeriod = 0;
416    mMarkerPosition = 0;
417    mMarkerReached = false;
418    mNewPosition = 0;
419    mUpdatePeriod = 0;
420    mServer = 0;
421    mPosition = 0;
422    mReleased = 0;
423    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
424    mSequence = 1;
425    mObservedSequence = mSequence;
426    mInUnderrun = false;
427
428    return NO_ERROR;
429}
430
431// -------------------------------------------------------------------------
432
433status_t AudioTrack::start()
434{
435    AutoMutex lock(mLock);
436
437    if (mState == STATE_ACTIVE) {
438        return INVALID_OPERATION;
439    }
440
441    mInUnderrun = true;
442
443    State previousState = mState;
444    if (previousState == STATE_PAUSED_STOPPING) {
445        mState = STATE_STOPPING;
446    } else {
447        mState = STATE_ACTIVE;
448    }
449    (void) updateAndGetPosition_l();
450    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
451        // reset current position as seen by client to 0
452        mPosition = 0;
453        mReleased = 0;
454        // force refresh of remaining frames by processAudioBuffer() as last
455        // write before stop could be partial.
456        mRefreshRemaining = true;
457    }
458    mNewPosition = mPosition + mUpdatePeriod;
459    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
460
461    sp<AudioTrackThread> t = mAudioTrackThread;
462    if (t != 0) {
463        if (previousState == STATE_STOPPING) {
464            mProxy->interrupt();
465        } else {
466            t->resume();
467        }
468    } else {
469        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
470        get_sched_policy(0, &mPreviousSchedulingGroup);
471        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
472    }
473
474    status_t status = NO_ERROR;
475    if (!(flags & CBLK_INVALID)) {
476        status = mAudioTrack->start();
477        if (status == DEAD_OBJECT) {
478            flags |= CBLK_INVALID;
479        }
480    }
481    if (flags & CBLK_INVALID) {
482        status = restoreTrack_l("start");
483    }
484
485    if (status != NO_ERROR) {
486        ALOGE("start() status %d", status);
487        mState = previousState;
488        if (t != 0) {
489            if (previousState != STATE_STOPPING) {
490                t->pause();
491            }
492        } else {
493            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
494            set_sched_policy(0, mPreviousSchedulingGroup);
495        }
496    }
497
498    return status;
499}
500
501void AudioTrack::stop()
502{
503    AutoMutex lock(mLock);
504    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
505        return;
506    }
507
508    if (isOffloaded_l()) {
509        mState = STATE_STOPPING;
510    } else {
511        mState = STATE_STOPPED;
512    }
513
514    mProxy->interrupt();
515    mAudioTrack->stop();
516    // the playback head position will reset to 0, so if a marker is set, we need
517    // to activate it again
518    mMarkerReached = false;
519#if 0
520    // Force flush if a shared buffer is used otherwise audioflinger
521    // will not stop before end of buffer is reached.
522    // It may be needed to make sure that we stop playback, likely in case looping is on.
523    if (mSharedBuffer != 0) {
524        flush_l();
525    }
526#endif
527
528    sp<AudioTrackThread> t = mAudioTrackThread;
529    if (t != 0) {
530        if (!isOffloaded_l()) {
531            t->pause();
532        }
533    } else {
534        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
535        set_sched_policy(0, mPreviousSchedulingGroup);
536    }
537}
538
539bool AudioTrack::stopped() const
540{
541    AutoMutex lock(mLock);
542    return mState != STATE_ACTIVE;
543}
544
545void AudioTrack::flush()
546{
547    if (mSharedBuffer != 0) {
548        return;
549    }
550    AutoMutex lock(mLock);
551    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
552        return;
553    }
554    flush_l();
555}
556
557void AudioTrack::flush_l()
558{
559    ALOG_ASSERT(mState != STATE_ACTIVE);
560
561    // clear playback marker and periodic update counter
562    mMarkerPosition = 0;
563    mMarkerReached = false;
564    mUpdatePeriod = 0;
565    mRefreshRemaining = true;
566
567    mState = STATE_FLUSHED;
568    if (isOffloaded_l()) {
569        mProxy->interrupt();
570    }
571    mProxy->flush();
572    mAudioTrack->flush();
573}
574
575void AudioTrack::pause()
576{
577    AutoMutex lock(mLock);
578    if (mState == STATE_ACTIVE) {
579        mState = STATE_PAUSED;
580    } else if (mState == STATE_STOPPING) {
581        mState = STATE_PAUSED_STOPPING;
582    } else {
583        return;
584    }
585    mProxy->interrupt();
586    mAudioTrack->pause();
587
588    if (isOffloaded_l()) {
589        if (mOutput != AUDIO_IO_HANDLE_NONE) {
590            uint32_t halFrames;
591            // OffloadThread sends HAL pause in its threadLoop.. time saved
592            // here can be slightly off
593            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
594            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
595        }
596    }
597}
598
599status_t AudioTrack::setVolume(float left, float right)
600{
601    // This duplicates a test by AudioTrack JNI, but that is not the only caller
602    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
603            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
604        return BAD_VALUE;
605    }
606
607    AutoMutex lock(mLock);
608    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
609    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
610
611    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
612
613    if (isOffloaded_l()) {
614        mAudioTrack->signal();
615    }
616    return NO_ERROR;
617}
618
619status_t AudioTrack::setVolume(float volume)
620{
621    return setVolume(volume, volume);
622}
623
624status_t AudioTrack::setAuxEffectSendLevel(float level)
625{
626    // This duplicates a test by AudioTrack JNI, but that is not the only caller
627    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
628        return BAD_VALUE;
629    }
630
631    AutoMutex lock(mLock);
632    mSendLevel = level;
633    mProxy->setSendLevel(level);
634
635    return NO_ERROR;
636}
637
638void AudioTrack::getAuxEffectSendLevel(float* level) const
639{
640    if (level != NULL) {
641        *level = mSendLevel;
642    }
643}
644
645status_t AudioTrack::setSampleRate(uint32_t rate)
646{
647    if (mIsTimed || isOffloadedOrDirect()) {
648        return INVALID_OPERATION;
649    }
650
651    uint32_t afSamplingRate;
652    if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
653        return NO_INIT;
654    }
655    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
656        return BAD_VALUE;
657    }
658
659    AutoMutex lock(mLock);
660    mSampleRate = rate;
661    mProxy->setSampleRate(rate);
662
663    return NO_ERROR;
664}
665
666uint32_t AudioTrack::getSampleRate() const
667{
668    if (mIsTimed) {
669        return 0;
670    }
671
672    AutoMutex lock(mLock);
673
674    // sample rate can be updated during playback by the offloaded decoder so we need to
675    // query the HAL and update if needed.
676// FIXME use Proxy return channel to update the rate from server and avoid polling here
677    if (isOffloadedOrDirect_l()) {
678        if (mOutput != AUDIO_IO_HANDLE_NONE) {
679            uint32_t sampleRate = 0;
680            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
681            if (status == NO_ERROR) {
682                mSampleRate = sampleRate;
683            }
684        }
685    }
686    return mSampleRate;
687}
688
689status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
690{
691    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
692        return INVALID_OPERATION;
693    }
694
695    if (loopCount == 0) {
696        ;
697    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
698            loopEnd - loopStart >= MIN_LOOP) {
699        ;
700    } else {
701        return BAD_VALUE;
702    }
703
704    AutoMutex lock(mLock);
705    // See setPosition() regarding setting parameters such as loop points or position while active
706    if (mState == STATE_ACTIVE) {
707        return INVALID_OPERATION;
708    }
709    setLoop_l(loopStart, loopEnd, loopCount);
710    return NO_ERROR;
711}
712
713void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
714{
715    // FIXME If setting a loop also sets position to start of loop, then
716    //       this is correct.  Otherwise it should be removed.
717    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
718    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
719    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
720}
721
722status_t AudioTrack::setMarkerPosition(uint32_t marker)
723{
724    // The only purpose of setting marker position is to get a callback
725    if (mCbf == NULL || isOffloadedOrDirect()) {
726        return INVALID_OPERATION;
727    }
728
729    AutoMutex lock(mLock);
730    mMarkerPosition = marker;
731    mMarkerReached = false;
732
733    return NO_ERROR;
734}
735
736status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
737{
738    if (isOffloadedOrDirect()) {
739        return INVALID_OPERATION;
740    }
741    if (marker == NULL) {
742        return BAD_VALUE;
743    }
744
745    AutoMutex lock(mLock);
746    *marker = mMarkerPosition;
747
748    return NO_ERROR;
749}
750
751status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
752{
753    // The only purpose of setting position update period is to get a callback
754    if (mCbf == NULL || isOffloadedOrDirect()) {
755        return INVALID_OPERATION;
756    }
757
758    AutoMutex lock(mLock);
759    mNewPosition = updateAndGetPosition_l() + updatePeriod;
760    mUpdatePeriod = updatePeriod;
761
762    return NO_ERROR;
763}
764
765status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
766{
767    if (isOffloadedOrDirect()) {
768        return INVALID_OPERATION;
769    }
770    if (updatePeriod == NULL) {
771        return BAD_VALUE;
772    }
773
774    AutoMutex lock(mLock);
775    *updatePeriod = mUpdatePeriod;
776
777    return NO_ERROR;
778}
779
780status_t AudioTrack::setPosition(uint32_t position)
781{
782    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
783        return INVALID_OPERATION;
784    }
785    if (position > mFrameCount) {
786        return BAD_VALUE;
787    }
788
789    AutoMutex lock(mLock);
790    // Currently we require that the player is inactive before setting parameters such as position
791    // or loop points.  Otherwise, there could be a race condition: the application could read the
792    // current position, compute a new position or loop parameters, and then set that position or
793    // loop parameters but it would do the "wrong" thing since the position has continued to advance
794    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
795    // to specify how it wants to handle such scenarios.
796    if (mState == STATE_ACTIVE) {
797        return INVALID_OPERATION;
798    }
799    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
800    mLoopPeriod = 0;
801    // FIXME Check whether loops and setting position are incompatible in old code.
802    // If we use setLoop for both purposes we lose the capability to set the position while looping.
803    mStaticProxy->setLoop(position, mFrameCount, 0);
804
805    return NO_ERROR;
806}
807
808status_t AudioTrack::getPosition(uint32_t *position)
809{
810    if (position == NULL) {
811        return BAD_VALUE;
812    }
813
814    AutoMutex lock(mLock);
815    if (isOffloadedOrDirect_l()) {
816        uint32_t dspFrames = 0;
817
818        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
819            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
820            *position = mPausedPosition;
821            return NO_ERROR;
822        }
823
824        if (mOutput != AUDIO_IO_HANDLE_NONE) {
825            uint32_t halFrames;
826            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
827        }
828        *position = dspFrames;
829    } else {
830        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
831        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
832                0 : updateAndGetPosition_l();
833    }
834    return NO_ERROR;
835}
836
837status_t AudioTrack::getBufferPosition(uint32_t *position)
838{
839    if (mSharedBuffer == 0 || mIsTimed) {
840        return INVALID_OPERATION;
841    }
842    if (position == NULL) {
843        return BAD_VALUE;
844    }
845
846    AutoMutex lock(mLock);
847    *position = mStaticProxy->getBufferPosition();
848    return NO_ERROR;
849}
850
851status_t AudioTrack::reload()
852{
853    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
854        return INVALID_OPERATION;
855    }
856
857    AutoMutex lock(mLock);
858    // See setPosition() regarding setting parameters such as loop points or position while active
859    if (mState == STATE_ACTIVE) {
860        return INVALID_OPERATION;
861    }
862    mNewPosition = mUpdatePeriod;
863    mLoopPeriod = 0;
864    // FIXME The new code cannot reload while keeping a loop specified.
865    // Need to check how the old code handled this, and whether it's a significant change.
866    mStaticProxy->setLoop(0, mFrameCount, 0);
867    return NO_ERROR;
868}
869
870audio_io_handle_t AudioTrack::getOutput() const
871{
872    AutoMutex lock(mLock);
873    return mOutput;
874}
875
876status_t AudioTrack::attachAuxEffect(int effectId)
877{
878    AutoMutex lock(mLock);
879    status_t status = mAudioTrack->attachAuxEffect(effectId);
880    if (status == NO_ERROR) {
881        mAuxEffectId = effectId;
882    }
883    return status;
884}
885
886// -------------------------------------------------------------------------
887
888// must be called with mLock held
889status_t AudioTrack::createTrack_l()
890{
891    status_t status;
892    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
893    if (audioFlinger == 0) {
894        ALOGE("Could not get audioflinger");
895        return NO_INIT;
896    }
897
898    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
899            mChannelMask, mFlags, mOffloadInfo);
900    if (output == AUDIO_IO_HANDLE_NONE) {
901        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
902              " channel mask %#x, flags %#x",
903              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
904        return BAD_VALUE;
905    }
906    {
907    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
908    // we must release it ourselves if anything goes wrong.
909
910    // Not all of these values are needed under all conditions, but it is easier to get them all
911
912    uint32_t afLatency;
913    status = AudioSystem::getLatency(output, &afLatency);
914    if (status != NO_ERROR) {
915        ALOGE("getLatency(%d) failed status %d", output, status);
916        goto release;
917    }
918
919    size_t afFrameCount;
920    status = AudioSystem::getFrameCount(output, &afFrameCount);
921    if (status != NO_ERROR) {
922        ALOGE("getFrameCount(output=%d) status %d", output, status);
923        goto release;
924    }
925
926    uint32_t afSampleRate;
927    status = AudioSystem::getSamplingRate(output, &afSampleRate);
928    if (status != NO_ERROR) {
929        ALOGE("getSamplingRate(output=%d) status %d", output, status);
930        goto release;
931    }
932
933    // Client decides whether the track is TIMED (see below), but can only express a preference
934    // for FAST.  Server will perform additional tests.
935    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
936            // either of these use cases:
937            // use case 1: shared buffer
938            (mSharedBuffer != 0) ||
939            // use case 2: callback transfer mode
940            (mTransfer == TRANSFER_CALLBACK)) &&
941            // matching sample rate
942            (mSampleRate == afSampleRate))) {
943        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
944        // once denied, do not request again if IAudioTrack is re-created
945        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
946    }
947    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
948
949    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
950    //  n = 1   fast track with single buffering; nBuffering is ignored
951    //  n = 2   fast track with double buffering
952    //  n = 2   normal track, no sample rate conversion
953    //  n = 3   normal track, with sample rate conversion
954    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
955    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
956    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
957
958    mNotificationFramesAct = mNotificationFramesReq;
959
960    size_t frameCount = mReqFrameCount;
961    if (!audio_is_linear_pcm(mFormat)) {
962
963        if (mSharedBuffer != 0) {
964            // Same comment as below about ignoring frameCount parameter for set()
965            frameCount = mSharedBuffer->size();
966        } else if (frameCount == 0) {
967            frameCount = afFrameCount;
968        }
969        if (mNotificationFramesAct != frameCount) {
970            mNotificationFramesAct = frameCount;
971        }
972    } else if (mSharedBuffer != 0) {
973
974        // Ensure that buffer alignment matches channel count
975        // 8-bit data in shared memory is not currently supported by AudioFlinger
976        size_t alignment = audio_bytes_per_sample(
977                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
978        if (alignment & 1) {
979            alignment = 1;
980        }
981        if (mChannelCount > 1) {
982            // More than 2 channels does not require stronger alignment than stereo
983            alignment <<= 1;
984        }
985        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
986            ALOGE("Invalid buffer alignment: address %p, channel count %u",
987                    mSharedBuffer->pointer(), mChannelCount);
988            status = BAD_VALUE;
989            goto release;
990        }
991
992        // When initializing a shared buffer AudioTrack via constructors,
993        // there's no frameCount parameter.
994        // But when initializing a shared buffer AudioTrack via set(),
995        // there _is_ a frameCount parameter.  We silently ignore it.
996        frameCount = mSharedBuffer->size() / mFrameSizeAF;
997
998    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
999
1000        // FIXME move these calculations and associated checks to server
1001
1002        // Ensure that buffer depth covers at least audio hardware latency
1003        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
1004        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
1005                afFrameCount, minBufCount, afSampleRate, afLatency);
1006        if (minBufCount <= nBuffering) {
1007            minBufCount = nBuffering;
1008        }
1009
1010        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
1011        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1012                ", afLatency=%d",
1013                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1014
1015        if (frameCount == 0) {
1016            frameCount = minFrameCount;
1017        } else if (frameCount < minFrameCount) {
1018            // not ALOGW because it happens all the time when playing key clicks over A2DP
1019            ALOGV("Minimum buffer size corrected from %zu to %zu",
1020                     frameCount, minFrameCount);
1021            frameCount = minFrameCount;
1022        }
1023        // Make sure that application is notified with sufficient margin before underrun
1024        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1025            mNotificationFramesAct = frameCount/nBuffering;
1026        }
1027
1028    } else {
1029        // For fast tracks, the frame count calculations and checks are done by server
1030    }
1031
1032    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1033    if (mIsTimed) {
1034        trackFlags |= IAudioFlinger::TRACK_TIMED;
1035    }
1036
1037    pid_t tid = -1;
1038    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1039        trackFlags |= IAudioFlinger::TRACK_FAST;
1040        if (mAudioTrackThread != 0) {
1041            tid = mAudioTrackThread->getTid();
1042        }
1043    }
1044
1045    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1046        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1047    }
1048
1049    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1050        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1051    }
1052
1053    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1054                                // but we will still need the original value also
1055    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1056                                                      mSampleRate,
1057                                                      // AudioFlinger only sees 16-bit PCM
1058                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1059                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1060                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1061                                                      mChannelMask,
1062                                                      &temp,
1063                                                      &trackFlags,
1064                                                      mSharedBuffer,
1065                                                      output,
1066                                                      tid,
1067                                                      &mSessionId,
1068                                                      mClientUid,
1069                                                      &status);
1070
1071    if (status != NO_ERROR) {
1072        ALOGE("AudioFlinger could not create track, status: %d", status);
1073        goto release;
1074    }
1075    ALOG_ASSERT(track != 0);
1076
1077    // AudioFlinger now owns the reference to the I/O handle,
1078    // so we are no longer responsible for releasing it.
1079
1080    sp<IMemory> iMem = track->getCblk();
1081    if (iMem == 0) {
1082        ALOGE("Could not get control block");
1083        return NO_INIT;
1084    }
1085    void *iMemPointer = iMem->pointer();
1086    if (iMemPointer == NULL) {
1087        ALOGE("Could not get control block pointer");
1088        return NO_INIT;
1089    }
1090    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1091    if (mAudioTrack != 0) {
1092        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1093        mDeathNotifier.clear();
1094    }
1095    mAudioTrack = track;
1096    mCblkMemory = iMem;
1097    IPCThreadState::self()->flushCommands();
1098
1099    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1100    mCblk = cblk;
1101    // note that temp is the (possibly revised) value of frameCount
1102    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1103        // In current design, AudioTrack client checks and ensures frame count validity before
1104        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1105        // for fast track as it uses a special method of assigning frame count.
1106        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1107    }
1108    frameCount = temp;
1109
1110    mAwaitBoost = false;
1111    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1112        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1113            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1114            mAwaitBoost = true;
1115            if (mSharedBuffer == 0) {
1116                // Theoretically double-buffering is not required for fast tracks,
1117                // due to tighter scheduling.  But in practice, to accommodate kernels with
1118                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1119                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1120                    mNotificationFramesAct = frameCount/nBuffering;
1121                }
1122            }
1123        } else {
1124            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1125            // once denied, do not request again if IAudioTrack is re-created
1126            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1127            if (mSharedBuffer == 0) {
1128                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1129                    mNotificationFramesAct = frameCount/nBuffering;
1130                }
1131            }
1132        }
1133    }
1134    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1135        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1136            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1137        } else {
1138            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1139            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1140            // FIXME This is a warning, not an error, so don't return error status
1141            //return NO_INIT;
1142        }
1143    }
1144    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1145        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1146            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1147        } else {
1148            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1149            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1150            // FIXME This is a warning, not an error, so don't return error status
1151            //return NO_INIT;
1152        }
1153    }
1154
1155    // We retain a copy of the I/O handle, but don't own the reference
1156    mOutput = output;
1157    mRefreshRemaining = true;
1158
1159    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1160    // is the value of pointer() for the shared buffer, otherwise buffers points
1161    // immediately after the control block.  This address is for the mapping within client
1162    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1163    void* buffers;
1164    if (mSharedBuffer == 0) {
1165        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1166    } else {
1167        buffers = mSharedBuffer->pointer();
1168    }
1169
1170    mAudioTrack->attachAuxEffect(mAuxEffectId);
1171    // FIXME don't believe this lie
1172    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1173
1174    mFrameCount = frameCount;
1175    // If IAudioTrack is re-created, don't let the requested frameCount
1176    // decrease.  This can confuse clients that cache frameCount().
1177    if (frameCount > mReqFrameCount) {
1178        mReqFrameCount = frameCount;
1179    }
1180
1181    // update proxy
1182    if (mSharedBuffer == 0) {
1183        mStaticProxy.clear();
1184        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1185    } else {
1186        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1187        mProxy = mStaticProxy;
1188    }
1189    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1190    mProxy->setSendLevel(mSendLevel);
1191    mProxy->setSampleRate(mSampleRate);
1192    mProxy->setMinimum(mNotificationFramesAct);
1193
1194    mDeathNotifier = new DeathNotifier(this);
1195    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1196
1197    return NO_ERROR;
1198    }
1199
1200release:
1201    AudioSystem::releaseOutput(output);
1202    if (status == NO_ERROR) {
1203        status = NO_INIT;
1204    }
1205    return status;
1206}
1207
1208status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1209{
1210    if (audioBuffer == NULL) {
1211        return BAD_VALUE;
1212    }
1213    if (mTransfer != TRANSFER_OBTAIN) {
1214        audioBuffer->frameCount = 0;
1215        audioBuffer->size = 0;
1216        audioBuffer->raw = NULL;
1217        return INVALID_OPERATION;
1218    }
1219
1220    const struct timespec *requested;
1221    struct timespec timeout;
1222    if (waitCount == -1) {
1223        requested = &ClientProxy::kForever;
1224    } else if (waitCount == 0) {
1225        requested = &ClientProxy::kNonBlocking;
1226    } else if (waitCount > 0) {
1227        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1228        timeout.tv_sec = ms / 1000;
1229        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1230        requested = &timeout;
1231    } else {
1232        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1233        requested = NULL;
1234    }
1235    return obtainBuffer(audioBuffer, requested);
1236}
1237
1238status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1239        struct timespec *elapsed, size_t *nonContig)
1240{
1241    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1242    uint32_t oldSequence = 0;
1243    uint32_t newSequence;
1244
1245    Proxy::Buffer buffer;
1246    status_t status = NO_ERROR;
1247
1248    static const int32_t kMaxTries = 5;
1249    int32_t tryCounter = kMaxTries;
1250
1251    do {
1252        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1253        // keep them from going away if another thread re-creates the track during obtainBuffer()
1254        sp<AudioTrackClientProxy> proxy;
1255        sp<IMemory> iMem;
1256
1257        {   // start of lock scope
1258            AutoMutex lock(mLock);
1259
1260            newSequence = mSequence;
1261            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1262            if (status == DEAD_OBJECT) {
1263                // re-create track, unless someone else has already done so
1264                if (newSequence == oldSequence) {
1265                    status = restoreTrack_l("obtainBuffer");
1266                    if (status != NO_ERROR) {
1267                        buffer.mFrameCount = 0;
1268                        buffer.mRaw = NULL;
1269                        buffer.mNonContig = 0;
1270                        break;
1271                    }
1272                }
1273            }
1274            oldSequence = newSequence;
1275
1276            // Keep the extra references
1277            proxy = mProxy;
1278            iMem = mCblkMemory;
1279
1280            if (mState == STATE_STOPPING) {
1281                status = -EINTR;
1282                buffer.mFrameCount = 0;
1283                buffer.mRaw = NULL;
1284                buffer.mNonContig = 0;
1285                break;
1286            }
1287
1288            // Non-blocking if track is stopped or paused
1289            if (mState != STATE_ACTIVE) {
1290                requested = &ClientProxy::kNonBlocking;
1291            }
1292
1293        }   // end of lock scope
1294
1295        buffer.mFrameCount = audioBuffer->frameCount;
1296        // FIXME starts the requested timeout and elapsed over from scratch
1297        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1298
1299    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1300
1301    audioBuffer->frameCount = buffer.mFrameCount;
1302    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1303    audioBuffer->raw = buffer.mRaw;
1304    if (nonContig != NULL) {
1305        *nonContig = buffer.mNonContig;
1306    }
1307    return status;
1308}
1309
1310void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1311{
1312    if (mTransfer == TRANSFER_SHARED) {
1313        return;
1314    }
1315
1316    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1317    if (stepCount == 0) {
1318        return;
1319    }
1320
1321    Proxy::Buffer buffer;
1322    buffer.mFrameCount = stepCount;
1323    buffer.mRaw = audioBuffer->raw;
1324
1325    AutoMutex lock(mLock);
1326    mReleased += stepCount;
1327    mInUnderrun = false;
1328    mProxy->releaseBuffer(&buffer);
1329
1330    // restart track if it was disabled by audioflinger due to previous underrun
1331    if (mState == STATE_ACTIVE) {
1332        audio_track_cblk_t* cblk = mCblk;
1333        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1334            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1335            // FIXME ignoring status
1336            mAudioTrack->start();
1337        }
1338    }
1339}
1340
1341// -------------------------------------------------------------------------
1342
1343ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1344{
1345    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1346        return INVALID_OPERATION;
1347    }
1348
1349    if (isDirect()) {
1350        AutoMutex lock(mLock);
1351        int32_t flags = android_atomic_and(
1352                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1353                            &mCblk->mFlags);
1354        if (flags & CBLK_INVALID) {
1355            return DEAD_OBJECT;
1356        }
1357    }
1358
1359    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1360        // Sanity-check: user is most-likely passing an error code, and it would
1361        // make the return value ambiguous (actualSize vs error).
1362        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1363        return BAD_VALUE;
1364    }
1365
1366    size_t written = 0;
1367    Buffer audioBuffer;
1368
1369    while (userSize >= mFrameSize) {
1370        audioBuffer.frameCount = userSize / mFrameSize;
1371
1372        status_t err = obtainBuffer(&audioBuffer,
1373                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1374        if (err < 0) {
1375            if (written > 0) {
1376                break;
1377            }
1378            return ssize_t(err);
1379        }
1380
1381        size_t toWrite;
1382        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1383            // Divide capacity by 2 to take expansion into account
1384            toWrite = audioBuffer.size >> 1;
1385            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1386        } else {
1387            toWrite = audioBuffer.size;
1388            memcpy(audioBuffer.i8, buffer, toWrite);
1389        }
1390        buffer = ((const char *) buffer) + toWrite;
1391        userSize -= toWrite;
1392        written += toWrite;
1393
1394        releaseBuffer(&audioBuffer);
1395    }
1396
1397    return written;
1398}
1399
1400// -------------------------------------------------------------------------
1401
1402TimedAudioTrack::TimedAudioTrack() {
1403    mIsTimed = true;
1404}
1405
1406status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1407{
1408    AutoMutex lock(mLock);
1409    status_t result = UNKNOWN_ERROR;
1410
1411#if 1
1412    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1413    // while we are accessing the cblk
1414    sp<IAudioTrack> audioTrack = mAudioTrack;
1415    sp<IMemory> iMem = mCblkMemory;
1416#endif
1417
1418    // If the track is not invalid already, try to allocate a buffer.  alloc
1419    // fails indicating that the server is dead, flag the track as invalid so
1420    // we can attempt to restore in just a bit.
1421    audio_track_cblk_t* cblk = mCblk;
1422    if (!(cblk->mFlags & CBLK_INVALID)) {
1423        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1424        if (result == DEAD_OBJECT) {
1425            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1426        }
1427    }
1428
1429    // If the track is invalid at this point, attempt to restore it. and try the
1430    // allocation one more time.
1431    if (cblk->mFlags & CBLK_INVALID) {
1432        result = restoreTrack_l("allocateTimedBuffer");
1433
1434        if (result == NO_ERROR) {
1435            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1436        }
1437    }
1438
1439    return result;
1440}
1441
1442status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1443                                           int64_t pts)
1444{
1445    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1446    {
1447        AutoMutex lock(mLock);
1448        audio_track_cblk_t* cblk = mCblk;
1449        // restart track if it was disabled by audioflinger due to previous underrun
1450        if (buffer->size() != 0 && status == NO_ERROR &&
1451                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1452            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1453            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1454            // FIXME ignoring status
1455            mAudioTrack->start();
1456        }
1457    }
1458    return status;
1459}
1460
1461status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1462                                                TargetTimeline target)
1463{
1464    return mAudioTrack->setMediaTimeTransform(xform, target);
1465}
1466
1467// -------------------------------------------------------------------------
1468
1469nsecs_t AudioTrack::processAudioBuffer()
1470{
1471    // Currently the AudioTrack thread is not created if there are no callbacks.
1472    // Would it ever make sense to run the thread, even without callbacks?
1473    // If so, then replace this by checks at each use for mCbf != NULL.
1474    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1475
1476    mLock.lock();
1477    if (mAwaitBoost) {
1478        mAwaitBoost = false;
1479        mLock.unlock();
1480        static const int32_t kMaxTries = 5;
1481        int32_t tryCounter = kMaxTries;
1482        uint32_t pollUs = 10000;
1483        do {
1484            int policy = sched_getscheduler(0);
1485            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1486                break;
1487            }
1488            usleep(pollUs);
1489            pollUs <<= 1;
1490        } while (tryCounter-- > 0);
1491        if (tryCounter < 0) {
1492            ALOGE("did not receive expected priority boost on time");
1493        }
1494        // Run again immediately
1495        return 0;
1496    }
1497
1498    // Can only reference mCblk while locked
1499    int32_t flags = android_atomic_and(
1500        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1501
1502    // Check for track invalidation
1503    if (flags & CBLK_INVALID) {
1504        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1505        // AudioSystem cache. We should not exit here but after calling the callback so
1506        // that the upper layers can recreate the track
1507        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1508            status_t status = restoreTrack_l("processAudioBuffer");
1509            mLock.unlock();
1510            // Run again immediately, but with a new IAudioTrack
1511            return 0;
1512        }
1513    }
1514
1515    bool waitStreamEnd = mState == STATE_STOPPING;
1516    bool active = mState == STATE_ACTIVE;
1517
1518    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1519    bool newUnderrun = false;
1520    if (flags & CBLK_UNDERRUN) {
1521#if 0
1522        // Currently in shared buffer mode, when the server reaches the end of buffer,
1523        // the track stays active in continuous underrun state.  It's up to the application
1524        // to pause or stop the track, or set the position to a new offset within buffer.
1525        // This was some experimental code to auto-pause on underrun.   Keeping it here
1526        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1527        if (mTransfer == TRANSFER_SHARED) {
1528            mState = STATE_PAUSED;
1529            active = false;
1530        }
1531#endif
1532        if (!mInUnderrun) {
1533            mInUnderrun = true;
1534            newUnderrun = true;
1535        }
1536    }
1537
1538    // Get current position of server
1539    size_t position = updateAndGetPosition_l();
1540
1541    // Manage marker callback
1542    bool markerReached = false;
1543    size_t markerPosition = mMarkerPosition;
1544    // FIXME fails for wraparound, need 64 bits
1545    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1546        mMarkerReached = markerReached = true;
1547    }
1548
1549    // Determine number of new position callback(s) that will be needed, while locked
1550    size_t newPosCount = 0;
1551    size_t newPosition = mNewPosition;
1552    size_t updatePeriod = mUpdatePeriod;
1553    // FIXME fails for wraparound, need 64 bits
1554    if (updatePeriod > 0 && position >= newPosition) {
1555        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1556        mNewPosition += updatePeriod * newPosCount;
1557    }
1558
1559    // Cache other fields that will be needed soon
1560    uint32_t loopPeriod = mLoopPeriod;
1561    uint32_t sampleRate = mSampleRate;
1562    uint32_t notificationFrames = mNotificationFramesAct;
1563    if (mRefreshRemaining) {
1564        mRefreshRemaining = false;
1565        mRemainingFrames = notificationFrames;
1566        mRetryOnPartialBuffer = false;
1567    }
1568    size_t misalignment = mProxy->getMisalignment();
1569    uint32_t sequence = mSequence;
1570    sp<AudioTrackClientProxy> proxy = mProxy;
1571
1572    // These fields don't need to be cached, because they are assigned only by set():
1573    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1574    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1575
1576    mLock.unlock();
1577
1578    if (waitStreamEnd) {
1579        struct timespec timeout;
1580        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1581        timeout.tv_nsec = 0;
1582
1583        status_t status = proxy->waitStreamEndDone(&timeout);
1584        switch (status) {
1585        case NO_ERROR:
1586        case DEAD_OBJECT:
1587        case TIMED_OUT:
1588            mCbf(EVENT_STREAM_END, mUserData, NULL);
1589            {
1590                AutoMutex lock(mLock);
1591                // The previously assigned value of waitStreamEnd is no longer valid,
1592                // since the mutex has been unlocked and either the callback handler
1593                // or another thread could have re-started the AudioTrack during that time.
1594                waitStreamEnd = mState == STATE_STOPPING;
1595                if (waitStreamEnd) {
1596                    mState = STATE_STOPPED;
1597                }
1598            }
1599            if (waitStreamEnd && status != DEAD_OBJECT) {
1600               return NS_INACTIVE;
1601            }
1602            break;
1603        }
1604        return 0;
1605    }
1606
1607    // perform callbacks while unlocked
1608    if (newUnderrun) {
1609        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1610    }
1611    // FIXME we will miss loops if loop cycle was signaled several times since last call
1612    //       to processAudioBuffer()
1613    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1614        mCbf(EVENT_LOOP_END, mUserData, NULL);
1615    }
1616    if (flags & CBLK_BUFFER_END) {
1617        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1618    }
1619    if (markerReached) {
1620        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1621    }
1622    while (newPosCount > 0) {
1623        size_t temp = newPosition;
1624        mCbf(EVENT_NEW_POS, mUserData, &temp);
1625        newPosition += updatePeriod;
1626        newPosCount--;
1627    }
1628
1629    if (mObservedSequence != sequence) {
1630        mObservedSequence = sequence;
1631        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1632        // for offloaded tracks, just wait for the upper layers to recreate the track
1633        if (isOffloadedOrDirect()) {
1634            return NS_INACTIVE;
1635        }
1636    }
1637
1638    // if inactive, then don't run me again until re-started
1639    if (!active) {
1640        return NS_INACTIVE;
1641    }
1642
1643    // Compute the estimated time until the next timed event (position, markers, loops)
1644    // FIXME only for non-compressed audio
1645    uint32_t minFrames = ~0;
1646    if (!markerReached && position < markerPosition) {
1647        minFrames = markerPosition - position;
1648    }
1649    if (loopPeriod > 0 && loopPeriod < minFrames) {
1650        minFrames = loopPeriod;
1651    }
1652    if (updatePeriod > 0 && updatePeriod < minFrames) {
1653        minFrames = updatePeriod;
1654    }
1655
1656    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1657    static const uint32_t kPoll = 0;
1658    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1659        minFrames = kPoll * notificationFrames;
1660    }
1661
1662    // Convert frame units to time units
1663    nsecs_t ns = NS_WHENEVER;
1664    if (minFrames != (uint32_t) ~0) {
1665        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1666        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1667        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1668    }
1669
1670    // If not supplying data by EVENT_MORE_DATA, then we're done
1671    if (mTransfer != TRANSFER_CALLBACK) {
1672        return ns;
1673    }
1674
1675    struct timespec timeout;
1676    const struct timespec *requested = &ClientProxy::kForever;
1677    if (ns != NS_WHENEVER) {
1678        timeout.tv_sec = ns / 1000000000LL;
1679        timeout.tv_nsec = ns % 1000000000LL;
1680        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1681        requested = &timeout;
1682    }
1683
1684    while (mRemainingFrames > 0) {
1685
1686        Buffer audioBuffer;
1687        audioBuffer.frameCount = mRemainingFrames;
1688        size_t nonContig;
1689        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1690        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1691                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1692        requested = &ClientProxy::kNonBlocking;
1693        size_t avail = audioBuffer.frameCount + nonContig;
1694        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1695                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1696        if (err != NO_ERROR) {
1697            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1698                    (isOffloaded() && (err == DEAD_OBJECT))) {
1699                return 0;
1700            }
1701            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1702            return NS_NEVER;
1703        }
1704
1705        if (mRetryOnPartialBuffer && !isOffloaded()) {
1706            mRetryOnPartialBuffer = false;
1707            if (avail < mRemainingFrames) {
1708                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1709                if (ns < 0 || myns < ns) {
1710                    ns = myns;
1711                }
1712                return ns;
1713            }
1714        }
1715
1716        // Divide buffer size by 2 to take into account the expansion
1717        // due to 8 to 16 bit conversion: the callback must fill only half
1718        // of the destination buffer
1719        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1720            audioBuffer.size >>= 1;
1721        }
1722
1723        size_t reqSize = audioBuffer.size;
1724        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1725        size_t writtenSize = audioBuffer.size;
1726
1727        // Sanity check on returned size
1728        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1729            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1730                    reqSize, ssize_t(writtenSize));
1731            return NS_NEVER;
1732        }
1733
1734        if (writtenSize == 0) {
1735            // The callback is done filling buffers
1736            // Keep this thread going to handle timed events and
1737            // still try to get more data in intervals of WAIT_PERIOD_MS
1738            // but don't just loop and block the CPU, so wait
1739            return WAIT_PERIOD_MS * 1000000LL;
1740        }
1741
1742        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1743            // 8 to 16 bit conversion, note that source and destination are the same address
1744            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1745            audioBuffer.size <<= 1;
1746        }
1747
1748        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1749        audioBuffer.frameCount = releasedFrames;
1750        mRemainingFrames -= releasedFrames;
1751        if (misalignment >= releasedFrames) {
1752            misalignment -= releasedFrames;
1753        } else {
1754            misalignment = 0;
1755        }
1756
1757        releaseBuffer(&audioBuffer);
1758
1759        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1760        // if callback doesn't like to accept the full chunk
1761        if (writtenSize < reqSize) {
1762            continue;
1763        }
1764
1765        // There could be enough non-contiguous frames available to satisfy the remaining request
1766        if (mRemainingFrames <= nonContig) {
1767            continue;
1768        }
1769
1770#if 0
1771        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1772        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1773        // that total to a sum == notificationFrames.
1774        if (0 < misalignment && misalignment <= mRemainingFrames) {
1775            mRemainingFrames = misalignment;
1776            return (mRemainingFrames * 1100000000LL) / sampleRate;
1777        }
1778#endif
1779
1780    }
1781    mRemainingFrames = notificationFrames;
1782    mRetryOnPartialBuffer = true;
1783
1784    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1785    return 0;
1786}
1787
1788status_t AudioTrack::restoreTrack_l(const char *from)
1789{
1790    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1791          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1792    ++mSequence;
1793    status_t result;
1794
1795    // refresh the audio configuration cache in this process to make sure we get new
1796    // output parameters in createTrack_l()
1797    AudioSystem::clearAudioConfigCache();
1798
1799    if (isOffloadedOrDirect_l()) {
1800        // FIXME re-creation of offloaded tracks is not yet implemented
1801        return DEAD_OBJECT;
1802    }
1803
1804    // save the old static buffer position
1805    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1806
1807    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1808    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1809    // It will also delete the strong references on previous IAudioTrack and IMemory.
1810    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1811    result = createTrack_l();
1812
1813    // take the frames that will be lost by track recreation into account in saved position
1814    (void) updateAndGetPosition_l();
1815    mPosition = mReleased;
1816
1817    if (result == NO_ERROR) {
1818        // continue playback from last known position, but
1819        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1820        if (mStaticProxy != NULL) {
1821            mLoopPeriod = 0;
1822            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1823        }
1824        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1825        //       track destruction have been played? This is critical for SoundPool implementation
1826        //       This must be broken, and needs to be tested/debugged.
1827#if 0
1828        // restore write index and set other indexes to reflect empty buffer status
1829        if (!strcmp(from, "start")) {
1830            // Make sure that a client relying on callback events indicating underrun or
1831            // the actual amount of audio frames played (e.g SoundPool) receives them.
1832            if (mSharedBuffer == 0) {
1833                // restart playback even if buffer is not completely filled.
1834                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1835            }
1836        }
1837#endif
1838        if (mState == STATE_ACTIVE) {
1839            result = mAudioTrack->start();
1840        }
1841    }
1842    if (result != NO_ERROR) {
1843        ALOGW("restoreTrack_l() failed status %d", result);
1844        mState = STATE_STOPPED;
1845    }
1846
1847    return result;
1848}
1849
1850uint32_t AudioTrack::updateAndGetPosition_l()
1851{
1852    // This is the sole place to read server consumed frames
1853    uint32_t newServer = mProxy->getPosition();
1854    int32_t delta = newServer - mServer;
1855    mServer = newServer;
1856    // TODO There is controversy about whether there can be "negative jitter" in server position.
1857    //      This should be investigated further, and if possible, it should be addressed.
1858    //      A more definite failure mode is infrequent polling by client.
1859    //      One could call (void)getPosition_l() in releaseBuffer(),
1860    //      so mReleased and mPosition are always lock-step as best possible.
1861    //      That should ensure delta never goes negative for infrequent polling
1862    //      unless the server has more than 2^31 frames in its buffer,
1863    //      in which case the use of uint32_t for these counters has bigger issues.
1864    if (delta < 0) {
1865        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1866        delta = 0;
1867    }
1868    return mPosition += (uint32_t) delta;
1869}
1870
1871status_t AudioTrack::setParameters(const String8& keyValuePairs)
1872{
1873    AutoMutex lock(mLock);
1874    return mAudioTrack->setParameters(keyValuePairs);
1875}
1876
1877status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1878{
1879    AutoMutex lock(mLock);
1880    // FIXME not implemented for fast tracks; should use proxy and SSQ
1881    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1882        return INVALID_OPERATION;
1883    }
1884    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1885        return INVALID_OPERATION;
1886    }
1887    // The presented frame count must always lag behind the consumed frame count.
1888    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1889    status_t status = mAudioTrack->getTimestamp(timestamp);
1890    if (status == NO_ERROR) {
1891        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
1892        (void) updateAndGetPosition_l();
1893        // Server consumed (mServer) and presented both use the same server time base,
1894        // and server consumed is always >= presented.
1895        // The delta between these represents the number of frames in the buffer pipeline.
1896        // If this delta between these is greater than the client position, it means that
1897        // actually presented is still stuck at the starting line (figuratively speaking),
1898        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
1899        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
1900            return INVALID_OPERATION;
1901        }
1902        // Convert timestamp position from server time base to client time base.
1903        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
1904        // But if we change it to 64-bit then this could fail.
1905        // If (mPosition - mServer) can be negative then should use:
1906        //   (int32_t)(mPosition - mServer)
1907        timestamp.mPosition += mPosition - mServer;
1908        // Immediately after a call to getPosition_l(), mPosition and
1909        // mServer both represent the same frame position.  mPosition is
1910        // in client's point of view, and mServer is in server's point of
1911        // view.  So the difference between them is the "fudge factor"
1912        // between client and server views due to stop() and/or new
1913        // IAudioTrack.  And timestamp.mPosition is initially in server's
1914        // point of view, so we need to apply the same fudge factor to it.
1915    }
1916    return status;
1917}
1918
1919String8 AudioTrack::getParameters(const String8& keys)
1920{
1921    audio_io_handle_t output = getOutput();
1922    if (output != AUDIO_IO_HANDLE_NONE) {
1923        return AudioSystem::getParameters(output, keys);
1924    } else {
1925        return String8::empty();
1926    }
1927}
1928
1929bool AudioTrack::isOffloaded() const
1930{
1931    AutoMutex lock(mLock);
1932    return isOffloaded_l();
1933}
1934
1935bool AudioTrack::isDirect() const
1936{
1937    AutoMutex lock(mLock);
1938    return isDirect_l();
1939}
1940
1941bool AudioTrack::isOffloadedOrDirect() const
1942{
1943    AutoMutex lock(mLock);
1944    return isOffloadedOrDirect_l();
1945}
1946
1947
1948status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1949{
1950
1951    const size_t SIZE = 256;
1952    char buffer[SIZE];
1953    String8 result;
1954
1955    result.append(" AudioTrack::dump\n");
1956    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1957            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1958    result.append(buffer);
1959    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1960            mChannelCount, mFrameCount);
1961    result.append(buffer);
1962    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1963    result.append(buffer);
1964    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1965    result.append(buffer);
1966    ::write(fd, result.string(), result.size());
1967    return NO_ERROR;
1968}
1969
1970uint32_t AudioTrack::getUnderrunFrames() const
1971{
1972    AutoMutex lock(mLock);
1973    return mProxy->getUnderrunFrames();
1974}
1975
1976void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
1977    mAttributes.flags = 0x0;
1978
1979    switch(streamType) {
1980    case AUDIO_STREAM_DEFAULT:
1981    case AUDIO_STREAM_MUSIC:
1982        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1983        mAttributes.usage = AUDIO_USAGE_MEDIA;
1984        break;
1985    case AUDIO_STREAM_VOICE_CALL:
1986        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1987        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1988        break;
1989    case AUDIO_STREAM_ENFORCED_AUDIBLE:
1990        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
1991        // intended fall through, attributes in common with STREAM_SYSTEM
1992    case AUDIO_STREAM_SYSTEM:
1993        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1994        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1995        break;
1996    case AUDIO_STREAM_RING:
1997        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1998        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1999        break;
2000    case AUDIO_STREAM_ALARM:
2001        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2002        mAttributes.usage = AUDIO_USAGE_ALARM;
2003        break;
2004    case AUDIO_STREAM_NOTIFICATION:
2005        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2006        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
2007        break;
2008    case AUDIO_STREAM_BLUETOOTH_SCO:
2009        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2010        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
2011        mAttributes.flags |= AUDIO_FLAG_SCO;
2012        break;
2013    case AUDIO_STREAM_DTMF:
2014        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2015        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
2016        break;
2017    case AUDIO_STREAM_TTS:
2018        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2019        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
2020        break;
2021    default:
2022        ALOGE("invalid stream type %d when converting to attributes", streamType);
2023    }
2024}
2025
2026void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
2027    // flags to stream type mapping
2028    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
2029        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
2030        return;
2031    }
2032    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
2033        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
2034        return;
2035    }
2036
2037    // usage to stream type mapping
2038    switch (aa.usage) {
2039    case AUDIO_USAGE_MEDIA:
2040    case AUDIO_USAGE_GAME:
2041    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2042    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2043        mStreamType = AUDIO_STREAM_MUSIC;
2044        return;
2045    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2046        mStreamType = AUDIO_STREAM_SYSTEM;
2047        return;
2048    case AUDIO_USAGE_VOICE_COMMUNICATION:
2049        mStreamType = AUDIO_STREAM_VOICE_CALL;
2050        return;
2051
2052    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2053        mStreamType = AUDIO_STREAM_DTMF;
2054        return;
2055
2056    case AUDIO_USAGE_ALARM:
2057        mStreamType = AUDIO_STREAM_ALARM;
2058        return;
2059    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2060        mStreamType = AUDIO_STREAM_RING;
2061        return;
2062
2063    case AUDIO_USAGE_NOTIFICATION:
2064    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2065    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2066    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2067    case AUDIO_USAGE_NOTIFICATION_EVENT:
2068        mStreamType = AUDIO_STREAM_NOTIFICATION;
2069        return;
2070
2071    case AUDIO_USAGE_UNKNOWN:
2072    default:
2073        mStreamType = AUDIO_STREAM_MUSIC;
2074    }
2075}
2076
2077bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
2078    // has flags that map to a strategy?
2079    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
2080        return true;
2081    }
2082
2083    // has known usage?
2084    switch (paa->usage) {
2085    case AUDIO_USAGE_UNKNOWN:
2086    case AUDIO_USAGE_MEDIA:
2087    case AUDIO_USAGE_VOICE_COMMUNICATION:
2088    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2089    case AUDIO_USAGE_ALARM:
2090    case AUDIO_USAGE_NOTIFICATION:
2091    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2092    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2093    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2094    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2095    case AUDIO_USAGE_NOTIFICATION_EVENT:
2096    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2097    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2098    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2099    case AUDIO_USAGE_GAME:
2100        break;
2101    default:
2102        return false;
2103    }
2104    return true;
2105}
2106// =========================================================================
2107
2108void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2109{
2110    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2111    if (audioTrack != 0) {
2112        AutoMutex lock(audioTrack->mLock);
2113        audioTrack->mProxy->binderDied();
2114    }
2115}
2116
2117// =========================================================================
2118
2119AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2120    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2121      mIgnoreNextPausedInt(false)
2122{
2123}
2124
2125AudioTrack::AudioTrackThread::~AudioTrackThread()
2126{
2127}
2128
2129bool AudioTrack::AudioTrackThread::threadLoop()
2130{
2131    {
2132        AutoMutex _l(mMyLock);
2133        if (mPaused) {
2134            mMyCond.wait(mMyLock);
2135            // caller will check for exitPending()
2136            return true;
2137        }
2138        if (mIgnoreNextPausedInt) {
2139            mIgnoreNextPausedInt = false;
2140            mPausedInt = false;
2141        }
2142        if (mPausedInt) {
2143            if (mPausedNs > 0) {
2144                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2145            } else {
2146                mMyCond.wait(mMyLock);
2147            }
2148            mPausedInt = false;
2149            return true;
2150        }
2151    }
2152    nsecs_t ns = mReceiver.processAudioBuffer();
2153    switch (ns) {
2154    case 0:
2155        return true;
2156    case NS_INACTIVE:
2157        pauseInternal();
2158        return true;
2159    case NS_NEVER:
2160        return false;
2161    case NS_WHENEVER:
2162        // FIXME increase poll interval, or make event-driven
2163        ns = 1000000000LL;
2164        // fall through
2165    default:
2166        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2167        pauseInternal(ns);
2168        return true;
2169    }
2170}
2171
2172void AudioTrack::AudioTrackThread::requestExit()
2173{
2174    // must be in this order to avoid a race condition
2175    Thread::requestExit();
2176    resume();
2177}
2178
2179void AudioTrack::AudioTrackThread::pause()
2180{
2181    AutoMutex _l(mMyLock);
2182    mPaused = true;
2183}
2184
2185void AudioTrack::AudioTrackThread::resume()
2186{
2187    AutoMutex _l(mMyLock);
2188    mIgnoreNextPausedInt = true;
2189    if (mPaused || mPausedInt) {
2190        mPaused = false;
2191        mPausedInt = false;
2192        mMyCond.signal();
2193    }
2194}
2195
2196void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2197{
2198    AutoMutex _l(mMyLock);
2199    mPausedInt = true;
2200    mPausedNs = ns;
2201}
2202
2203}; // namespace android
2204