AudioTrack.cpp revision 5bd3f38638acab633d181359cc9ec27b80f84d43
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <math.h>
23#include <sys/resource.h>
24#include <audio_utils/primitives.h>
25#include <binder/IPCThreadState.h>
26#include <media/AudioTrack.h>
27#include <utils/Log.h>
28#include <private/media/AudioTrackShared.h>
29#include <media/IAudioFlinger.h>
30
31#define WAIT_PERIOD_MS                  10
32#define WAIT_STREAM_END_TIMEOUT_SEC     120
33
34
35namespace android {
36// ---------------------------------------------------------------------------
37
38// static
39status_t AudioTrack::getMinFrameCount(
40        size_t* frameCount,
41        audio_stream_type_t streamType,
42        uint32_t sampleRate)
43{
44    if (frameCount == NULL) {
45        return BAD_VALUE;
46    }
47
48    // FIXME merge with similar code in createTrack_l(), except we're missing
49    //       some information here that is available in createTrack_l():
50    //          audio_io_handle_t output
51    //          audio_format_t format
52    //          audio_channel_mask_t channelMask
53    //          audio_output_flags_t flags
54    uint32_t afSampleRate;
55    status_t status;
56    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
57    if (status != NO_ERROR) {
58        ALOGE("Unable to query output sample rate for stream type %d; status %d",
59                streamType, status);
60        return status;
61    }
62    size_t afFrameCount;
63    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
64    if (status != NO_ERROR) {
65        ALOGE("Unable to query output frame count for stream type %d; status %d",
66                streamType, status);
67        return status;
68    }
69    uint32_t afLatency;
70    status = AudioSystem::getOutputLatency(&afLatency, streamType);
71    if (status != NO_ERROR) {
72        ALOGE("Unable to query output latency for stream type %d; status %d",
73                streamType, status);
74        return status;
75    }
76
77    // Ensure that buffer depth covers at least audio hardware latency
78    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
79    if (minBufCount < 2) {
80        minBufCount = 2;
81    }
82
83    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
84            afFrameCount * minBufCount * sampleRate / afSampleRate;
85    // The formula above should always produce a non-zero value, but return an error
86    // in the unlikely event that it does not, as that's part of the API contract.
87    if (*frameCount == 0) {
88        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
89                streamType, sampleRate);
90        return BAD_VALUE;
91    }
92    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
93            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
94    return NO_ERROR;
95}
96
97// ---------------------------------------------------------------------------
98
99AudioTrack::AudioTrack()
100    : mStatus(NO_INIT),
101      mIsTimed(false),
102      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
103      mPreviousSchedulingGroup(SP_DEFAULT),
104      mPausedPosition(0)
105{
106    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
107    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
108    mAttributes.flags = 0x0;
109    strcpy(mAttributes.tags, "");
110}
111
112AudioTrack::AudioTrack(
113        audio_stream_type_t streamType,
114        uint32_t sampleRate,
115        audio_format_t format,
116        audio_channel_mask_t channelMask,
117        size_t frameCount,
118        audio_output_flags_t flags,
119        callback_t cbf,
120        void* user,
121        uint32_t notificationFrames,
122        int sessionId,
123        transfer_type transferType,
124        const audio_offload_info_t *offloadInfo,
125        int uid,
126        pid_t pid)
127    : mStatus(NO_INIT),
128      mIsTimed(false),
129      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
130      mPreviousSchedulingGroup(SP_DEFAULT),
131      mPausedPosition(0)
132{
133    mStatus = set(streamType, sampleRate, format, channelMask,
134            frameCount, flags, cbf, user, notificationFrames,
135            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
136            offloadInfo, uid, pid, NULL /*no audio attributes*/);
137}
138
139AudioTrack::AudioTrack(
140        audio_stream_type_t streamType,
141        uint32_t sampleRate,
142        audio_format_t format,
143        audio_channel_mask_t channelMask,
144        const sp<IMemory>& sharedBuffer,
145        audio_output_flags_t flags,
146        callback_t cbf,
147        void* user,
148        uint32_t notificationFrames,
149        int sessionId,
150        transfer_type transferType,
151        const audio_offload_info_t *offloadInfo,
152        int uid,
153        pid_t pid)
154    : mStatus(NO_INIT),
155      mIsTimed(false),
156      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
157      mPreviousSchedulingGroup(SP_DEFAULT),
158      mPausedPosition(0)
159{
160    mStatus = set(streamType, sampleRate, format, channelMask,
161            0 /*frameCount*/, flags, cbf, user, notificationFrames,
162            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
163            uid, pid, NULL /*no audio attributes*/);
164}
165
166AudioTrack::~AudioTrack()
167{
168    if (mStatus == NO_ERROR) {
169        // Make sure that callback function exits in the case where
170        // it is looping on buffer full condition in obtainBuffer().
171        // Otherwise the callback thread will never exit.
172        stop();
173        if (mAudioTrackThread != 0) {
174            mProxy->interrupt();
175            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
176            mAudioTrackThread->requestExitAndWait();
177            mAudioTrackThread.clear();
178        }
179        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
180        mAudioTrack.clear();
181        mCblkMemory.clear();
182        mSharedBuffer.clear();
183        IPCThreadState::self()->flushCommands();
184        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
185                IPCThreadState::self()->getCallingPid(), mClientPid);
186        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
187    }
188}
189
190status_t AudioTrack::set(
191        audio_stream_type_t streamType,
192        uint32_t sampleRate,
193        audio_format_t format,
194        audio_channel_mask_t channelMask,
195        size_t frameCount,
196        audio_output_flags_t flags,
197        callback_t cbf,
198        void* user,
199        uint32_t notificationFrames,
200        const sp<IMemory>& sharedBuffer,
201        bool threadCanCallJava,
202        int sessionId,
203        transfer_type transferType,
204        const audio_offload_info_t *offloadInfo,
205        int uid,
206        pid_t pid,
207        audio_attributes_t* pAttributes)
208{
209    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
210          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
211          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
212          sessionId, transferType);
213
214    switch (transferType) {
215    case TRANSFER_DEFAULT:
216        if (sharedBuffer != 0) {
217            transferType = TRANSFER_SHARED;
218        } else if (cbf == NULL || threadCanCallJava) {
219            transferType = TRANSFER_SYNC;
220        } else {
221            transferType = TRANSFER_CALLBACK;
222        }
223        break;
224    case TRANSFER_CALLBACK:
225        if (cbf == NULL || sharedBuffer != 0) {
226            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
227            return BAD_VALUE;
228        }
229        break;
230    case TRANSFER_OBTAIN:
231    case TRANSFER_SYNC:
232        if (sharedBuffer != 0) {
233            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
234            return BAD_VALUE;
235        }
236        break;
237    case TRANSFER_SHARED:
238        if (sharedBuffer == 0) {
239            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
240            return BAD_VALUE;
241        }
242        break;
243    default:
244        ALOGE("Invalid transfer type %d", transferType);
245        return BAD_VALUE;
246    }
247    mSharedBuffer = sharedBuffer;
248    mTransfer = transferType;
249
250    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
251            sharedBuffer->size());
252
253    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
254
255    AutoMutex lock(mLock);
256
257    // invariant that mAudioTrack != 0 is true only after set() returns successfully
258    if (mAudioTrack != 0) {
259        ALOGE("Track already in use");
260        return INVALID_OPERATION;
261    }
262
263    // handle default values first.
264    if (streamType == AUDIO_STREAM_DEFAULT) {
265        streamType = AUDIO_STREAM_MUSIC;
266    }
267
268    if (pAttributes == NULL) {
269        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
270            ALOGE("Invalid stream type %d", streamType);
271            return BAD_VALUE;
272        }
273        setAttributesFromStreamType(streamType);
274        mStreamType = streamType;
275    } else {
276        if (!isValidAttributes(pAttributes)) {
277            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
278                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
279                pAttributes->tags);
280        }
281        // stream type shouldn't be looked at, this track has audio attributes
282        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
283        setStreamTypeFromAttributes(mAttributes);
284        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
285                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
286    }
287
288    status_t status;
289    if (sampleRate == 0) {
290        status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
291        if (status != NO_ERROR) {
292            ALOGE("Could not get output sample rate for stream type %d; status %d",
293                    mStreamType, status);
294            return status;
295        }
296    }
297    mSampleRate = sampleRate;
298
299    // these below should probably come from the audioFlinger too...
300    if (format == AUDIO_FORMAT_DEFAULT) {
301        format = AUDIO_FORMAT_PCM_16_BIT;
302    }
303
304    // validate parameters
305    if (!audio_is_valid_format(format)) {
306        ALOGE("Invalid format %#x", format);
307        return BAD_VALUE;
308    }
309    mFormat = format;
310
311    if (!audio_is_output_channel(channelMask)) {
312        ALOGE("Invalid channel mask %#x", channelMask);
313        return BAD_VALUE;
314    }
315    mChannelMask = channelMask;
316    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
317    mChannelCount = channelCount;
318
319    // AudioFlinger does not currently support 8-bit data in shared memory
320    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
321        ALOGE("8-bit data in shared memory is not supported");
322        return BAD_VALUE;
323    }
324
325    // force direct flag if format is not linear PCM
326    // or offload was requested
327    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
328            || !audio_is_linear_pcm(format)) {
329        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
330                    ? "Offload request, forcing to Direct Output"
331                    : "Not linear PCM, forcing to Direct Output");
332        flags = (audio_output_flags_t)
333                // FIXME why can't we allow direct AND fast?
334                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
335    }
336    // only allow deep buffering for music stream type
337    if (mStreamType != AUDIO_STREAM_MUSIC) {
338        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
339    }
340
341    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
342        if (audio_is_linear_pcm(format)) {
343            mFrameSize = channelCount * audio_bytes_per_sample(format);
344        } else {
345            mFrameSize = sizeof(uint8_t);
346        }
347        mFrameSizeAF = mFrameSize;
348    } else {
349        ALOG_ASSERT(audio_is_linear_pcm(format));
350        mFrameSize = channelCount * audio_bytes_per_sample(format);
351        mFrameSizeAF = channelCount * audio_bytes_per_sample(
352                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
353        // createTrack will return an error if PCM format is not supported by server,
354        // so no need to check for specific PCM formats here
355    }
356
357    // Make copy of input parameter offloadInfo so that in the future:
358    //  (a) createTrack_l doesn't need it as an input parameter
359    //  (b) we can support re-creation of offloaded tracks
360    if (offloadInfo != NULL) {
361        mOffloadInfoCopy = *offloadInfo;
362        mOffloadInfo = &mOffloadInfoCopy;
363    } else {
364        mOffloadInfo = NULL;
365    }
366
367    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
368    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
369    mSendLevel = 0.0f;
370    // mFrameCount is initialized in createTrack_l
371    mReqFrameCount = frameCount;
372    mNotificationFramesReq = notificationFrames;
373    mNotificationFramesAct = 0;
374    mSessionId = sessionId;
375    int callingpid = IPCThreadState::self()->getCallingPid();
376    int mypid = getpid();
377    if (uid == -1 || (callingpid != mypid)) {
378        mClientUid = IPCThreadState::self()->getCallingUid();
379    } else {
380        mClientUid = uid;
381    }
382    if (pid == -1 || (callingpid != mypid)) {
383        mClientPid = callingpid;
384    } else {
385        mClientPid = pid;
386    }
387    mAuxEffectId = 0;
388    mFlags = flags;
389    mCbf = cbf;
390
391    if (cbf != NULL) {
392        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
393        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
394    }
395
396    // create the IAudioTrack
397    status = createTrack_l(0 /*epoch*/);
398
399    if (status != NO_ERROR) {
400        if (mAudioTrackThread != 0) {
401            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
402            mAudioTrackThread->requestExitAndWait();
403            mAudioTrackThread.clear();
404        }
405        return status;
406    }
407
408    mStatus = NO_ERROR;
409    mState = STATE_STOPPED;
410    mUserData = user;
411    mLoopPeriod = 0;
412    mMarkerPosition = 0;
413    mMarkerReached = false;
414    mNewPosition = 0;
415    mUpdatePeriod = 0;
416    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
417    mSequence = 1;
418    mObservedSequence = mSequence;
419    mInUnderrun = false;
420
421    return NO_ERROR;
422}
423
424// -------------------------------------------------------------------------
425
426status_t AudioTrack::start()
427{
428    AutoMutex lock(mLock);
429
430    if (mState == STATE_ACTIVE) {
431        return INVALID_OPERATION;
432    }
433
434    mInUnderrun = true;
435
436    State previousState = mState;
437    if (previousState == STATE_PAUSED_STOPPING) {
438        mState = STATE_STOPPING;
439    } else {
440        mState = STATE_ACTIVE;
441    }
442    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
443        // reset current position as seen by client to 0
444        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
445        // force refresh of remaining frames by processAudioBuffer() as last
446        // write before stop could be partial.
447        mRefreshRemaining = true;
448    }
449    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
450    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
451
452    sp<AudioTrackThread> t = mAudioTrackThread;
453    if (t != 0) {
454        if (previousState == STATE_STOPPING) {
455            mProxy->interrupt();
456        } else {
457            t->resume();
458        }
459    } else {
460        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
461        get_sched_policy(0, &mPreviousSchedulingGroup);
462        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
463    }
464
465    status_t status = NO_ERROR;
466    if (!(flags & CBLK_INVALID)) {
467        status = mAudioTrack->start();
468        if (status == DEAD_OBJECT) {
469            flags |= CBLK_INVALID;
470        }
471    }
472    if (flags & CBLK_INVALID) {
473        status = restoreTrack_l("start");
474    }
475
476    if (status != NO_ERROR) {
477        ALOGE("start() status %d", status);
478        mState = previousState;
479        if (t != 0) {
480            if (previousState != STATE_STOPPING) {
481                t->pause();
482            }
483        } else {
484            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
485            set_sched_policy(0, mPreviousSchedulingGroup);
486        }
487    }
488
489    return status;
490}
491
492void AudioTrack::stop()
493{
494    AutoMutex lock(mLock);
495    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
496        return;
497    }
498
499    if (isOffloaded_l()) {
500        mState = STATE_STOPPING;
501    } else {
502        mState = STATE_STOPPED;
503    }
504
505    mProxy->interrupt();
506    mAudioTrack->stop();
507    // the playback head position will reset to 0, so if a marker is set, we need
508    // to activate it again
509    mMarkerReached = false;
510#if 0
511    // Force flush if a shared buffer is used otherwise audioflinger
512    // will not stop before end of buffer is reached.
513    // It may be needed to make sure that we stop playback, likely in case looping is on.
514    if (mSharedBuffer != 0) {
515        flush_l();
516    }
517#endif
518
519    sp<AudioTrackThread> t = mAudioTrackThread;
520    if (t != 0) {
521        if (!isOffloaded_l()) {
522            t->pause();
523        }
524    } else {
525        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
526        set_sched_policy(0, mPreviousSchedulingGroup);
527    }
528}
529
530bool AudioTrack::stopped() const
531{
532    AutoMutex lock(mLock);
533    return mState != STATE_ACTIVE;
534}
535
536void AudioTrack::flush()
537{
538    if (mSharedBuffer != 0) {
539        return;
540    }
541    AutoMutex lock(mLock);
542    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
543        return;
544    }
545    flush_l();
546}
547
548void AudioTrack::flush_l()
549{
550    ALOG_ASSERT(mState != STATE_ACTIVE);
551
552    // clear playback marker and periodic update counter
553    mMarkerPosition = 0;
554    mMarkerReached = false;
555    mUpdatePeriod = 0;
556    mRefreshRemaining = true;
557
558    mState = STATE_FLUSHED;
559    if (isOffloaded_l()) {
560        mProxy->interrupt();
561    }
562    mProxy->flush();
563    mAudioTrack->flush();
564}
565
566void AudioTrack::pause()
567{
568    AutoMutex lock(mLock);
569    if (mState == STATE_ACTIVE) {
570        mState = STATE_PAUSED;
571    } else if (mState == STATE_STOPPING) {
572        mState = STATE_PAUSED_STOPPING;
573    } else {
574        return;
575    }
576    mProxy->interrupt();
577    mAudioTrack->pause();
578
579    if (isOffloaded_l()) {
580        if (mOutput != AUDIO_IO_HANDLE_NONE) {
581            uint32_t halFrames;
582            // OffloadThread sends HAL pause in its threadLoop.. time saved
583            // here can be slightly off
584            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
585            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
586        }
587    }
588}
589
590status_t AudioTrack::setVolume(float left, float right)
591{
592    // This duplicates a test by AudioTrack JNI, but that is not the only caller
593    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
594            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
595        return BAD_VALUE;
596    }
597
598    AutoMutex lock(mLock);
599    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
600    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
601
602    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
603
604    if (isOffloaded_l()) {
605        mAudioTrack->signal();
606    }
607    return NO_ERROR;
608}
609
610status_t AudioTrack::setVolume(float volume)
611{
612    return setVolume(volume, volume);
613}
614
615status_t AudioTrack::setAuxEffectSendLevel(float level)
616{
617    // This duplicates a test by AudioTrack JNI, but that is not the only caller
618    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
619        return BAD_VALUE;
620    }
621
622    AutoMutex lock(mLock);
623    mSendLevel = level;
624    mProxy->setSendLevel(level);
625
626    return NO_ERROR;
627}
628
629void AudioTrack::getAuxEffectSendLevel(float* level) const
630{
631    if (level != NULL) {
632        *level = mSendLevel;
633    }
634}
635
636status_t AudioTrack::setSampleRate(uint32_t rate)
637{
638    if (mIsTimed || isOffloaded()) {
639        return INVALID_OPERATION;
640    }
641
642    uint32_t afSamplingRate;
643    if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
644        return NO_INIT;
645    }
646    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
647    if (rate == 0 || rate > afSamplingRate*2 ) {
648        return BAD_VALUE;
649    }
650
651    AutoMutex lock(mLock);
652    mSampleRate = rate;
653    mProxy->setSampleRate(rate);
654
655    return NO_ERROR;
656}
657
658uint32_t AudioTrack::getSampleRate() const
659{
660    if (mIsTimed) {
661        return 0;
662    }
663
664    AutoMutex lock(mLock);
665
666    // sample rate can be updated during playback by the offloaded decoder so we need to
667    // query the HAL and update if needed.
668// FIXME use Proxy return channel to update the rate from server and avoid polling here
669    if (isOffloaded_l()) {
670        if (mOutput != AUDIO_IO_HANDLE_NONE) {
671            uint32_t sampleRate = 0;
672            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
673            if (status == NO_ERROR) {
674                mSampleRate = sampleRate;
675            }
676        }
677    }
678    return mSampleRate;
679}
680
681status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
682{
683    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
684        return INVALID_OPERATION;
685    }
686
687    if (loopCount == 0) {
688        ;
689    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
690            loopEnd - loopStart >= MIN_LOOP) {
691        ;
692    } else {
693        return BAD_VALUE;
694    }
695
696    AutoMutex lock(mLock);
697    // See setPosition() regarding setting parameters such as loop points or position while active
698    if (mState == STATE_ACTIVE) {
699        return INVALID_OPERATION;
700    }
701    setLoop_l(loopStart, loopEnd, loopCount);
702    return NO_ERROR;
703}
704
705void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
706{
707    // FIXME If setting a loop also sets position to start of loop, then
708    //       this is correct.  Otherwise it should be removed.
709    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
710    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
711    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
712}
713
714status_t AudioTrack::setMarkerPosition(uint32_t marker)
715{
716    // The only purpose of setting marker position is to get a callback
717    if (mCbf == NULL || isOffloaded()) {
718        return INVALID_OPERATION;
719    }
720
721    AutoMutex lock(mLock);
722    mMarkerPosition = marker;
723    mMarkerReached = false;
724
725    return NO_ERROR;
726}
727
728status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
729{
730    if (isOffloaded()) {
731        return INVALID_OPERATION;
732    }
733    if (marker == NULL) {
734        return BAD_VALUE;
735    }
736
737    AutoMutex lock(mLock);
738    *marker = mMarkerPosition;
739
740    return NO_ERROR;
741}
742
743status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
744{
745    // The only purpose of setting position update period is to get a callback
746    if (mCbf == NULL || isOffloaded()) {
747        return INVALID_OPERATION;
748    }
749
750    AutoMutex lock(mLock);
751    mNewPosition = mProxy->getPosition() + updatePeriod;
752    mUpdatePeriod = updatePeriod;
753
754    return NO_ERROR;
755}
756
757status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
758{
759    if (isOffloaded()) {
760        return INVALID_OPERATION;
761    }
762    if (updatePeriod == NULL) {
763        return BAD_VALUE;
764    }
765
766    AutoMutex lock(mLock);
767    *updatePeriod = mUpdatePeriod;
768
769    return NO_ERROR;
770}
771
772status_t AudioTrack::setPosition(uint32_t position)
773{
774    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
775        return INVALID_OPERATION;
776    }
777    if (position > mFrameCount) {
778        return BAD_VALUE;
779    }
780
781    AutoMutex lock(mLock);
782    // Currently we require that the player is inactive before setting parameters such as position
783    // or loop points.  Otherwise, there could be a race condition: the application could read the
784    // current position, compute a new position or loop parameters, and then set that position or
785    // loop parameters but it would do the "wrong" thing since the position has continued to advance
786    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
787    // to specify how it wants to handle such scenarios.
788    if (mState == STATE_ACTIVE) {
789        return INVALID_OPERATION;
790    }
791    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
792    mLoopPeriod = 0;
793    // FIXME Check whether loops and setting position are incompatible in old code.
794    // If we use setLoop for both purposes we lose the capability to set the position while looping.
795    mStaticProxy->setLoop(position, mFrameCount, 0);
796
797    return NO_ERROR;
798}
799
800status_t AudioTrack::getPosition(uint32_t *position) const
801{
802    if (position == NULL) {
803        return BAD_VALUE;
804    }
805
806    AutoMutex lock(mLock);
807    if (isOffloaded_l()) {
808        uint32_t dspFrames = 0;
809
810        if ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING)) {
811            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
812            *position = mPausedPosition;
813            return NO_ERROR;
814        }
815
816        if (mOutput != AUDIO_IO_HANDLE_NONE) {
817            uint32_t halFrames;
818            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
819        }
820        *position = dspFrames;
821    } else {
822        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
823        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
824                mProxy->getPosition();
825    }
826    return NO_ERROR;
827}
828
829status_t AudioTrack::getBufferPosition(uint32_t *position)
830{
831    if (mSharedBuffer == 0 || mIsTimed) {
832        return INVALID_OPERATION;
833    }
834    if (position == NULL) {
835        return BAD_VALUE;
836    }
837
838    AutoMutex lock(mLock);
839    *position = mStaticProxy->getBufferPosition();
840    return NO_ERROR;
841}
842
843status_t AudioTrack::reload()
844{
845    if (mSharedBuffer == 0 || mIsTimed || isOffloaded()) {
846        return INVALID_OPERATION;
847    }
848
849    AutoMutex lock(mLock);
850    // See setPosition() regarding setting parameters such as loop points or position while active
851    if (mState == STATE_ACTIVE) {
852        return INVALID_OPERATION;
853    }
854    mNewPosition = mUpdatePeriod;
855    mLoopPeriod = 0;
856    // FIXME The new code cannot reload while keeping a loop specified.
857    // Need to check how the old code handled this, and whether it's a significant change.
858    mStaticProxy->setLoop(0, mFrameCount, 0);
859    return NO_ERROR;
860}
861
862audio_io_handle_t AudioTrack::getOutput() const
863{
864    AutoMutex lock(mLock);
865    return mOutput;
866}
867
868status_t AudioTrack::attachAuxEffect(int effectId)
869{
870    AutoMutex lock(mLock);
871    status_t status = mAudioTrack->attachAuxEffect(effectId);
872    if (status == NO_ERROR) {
873        mAuxEffectId = effectId;
874    }
875    return status;
876}
877
878// -------------------------------------------------------------------------
879
880// must be called with mLock held
881status_t AudioTrack::createTrack_l(size_t epoch)
882{
883    status_t status;
884    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
885    if (audioFlinger == 0) {
886        ALOGE("Could not get audioflinger");
887        return NO_INIT;
888    }
889
890    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
891            mChannelMask, mFlags, mOffloadInfo);
892    if (output == AUDIO_IO_HANDLE_NONE) {
893        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
894              " channel mask %#x, flags %#x",
895              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
896        return BAD_VALUE;
897    }
898    {
899    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
900    // we must release it ourselves if anything goes wrong.
901
902    // Not all of these values are needed under all conditions, but it is easier to get them all
903
904    uint32_t afLatency;
905    status = AudioSystem::getLatency(output, &afLatency);
906    if (status != NO_ERROR) {
907        ALOGE("getLatency(%d) failed status %d", output, status);
908        goto release;
909    }
910
911    size_t afFrameCount;
912    status = AudioSystem::getFrameCount(output, &afFrameCount);
913    if (status != NO_ERROR) {
914        ALOGE("getFrameCount(output=%d) status %d", output, status);
915        goto release;
916    }
917
918    uint32_t afSampleRate;
919    status = AudioSystem::getSamplingRate(output, &afSampleRate);
920    if (status != NO_ERROR) {
921        ALOGE("getSamplingRate(output=%d) status %d", output, status);
922        goto release;
923    }
924
925    // Client decides whether the track is TIMED (see below), but can only express a preference
926    // for FAST.  Server will perform additional tests.
927    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
928            // either of these use cases:
929            // use case 1: shared buffer
930            (mSharedBuffer != 0) ||
931            // use case 2: callback transfer mode
932            (mTransfer == TRANSFER_CALLBACK)) &&
933            // matching sample rate
934            (mSampleRate == afSampleRate))) {
935        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
936        // once denied, do not request again if IAudioTrack is re-created
937        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
938    }
939    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
940
941    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
942    //  n = 1   fast track with single buffering; nBuffering is ignored
943    //  n = 2   fast track with double buffering
944    //  n = 2   normal track, no sample rate conversion
945    //  n = 3   normal track, with sample rate conversion
946    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
947    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
948    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
949
950    mNotificationFramesAct = mNotificationFramesReq;
951
952    size_t frameCount = mReqFrameCount;
953    if (!audio_is_linear_pcm(mFormat)) {
954
955        if (mSharedBuffer != 0) {
956            // Same comment as below about ignoring frameCount parameter for set()
957            frameCount = mSharedBuffer->size();
958        } else if (frameCount == 0) {
959            frameCount = afFrameCount;
960        }
961        if (mNotificationFramesAct != frameCount) {
962            mNotificationFramesAct = frameCount;
963        }
964    } else if (mSharedBuffer != 0) {
965
966        // Ensure that buffer alignment matches channel count
967        // 8-bit data in shared memory is not currently supported by AudioFlinger
968        size_t alignment = audio_bytes_per_sample(
969                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
970        if (alignment & 1) {
971            alignment = 1;
972        }
973        if (mChannelCount > 1) {
974            // More than 2 channels does not require stronger alignment than stereo
975            alignment <<= 1;
976        }
977        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
978            ALOGE("Invalid buffer alignment: address %p, channel count %u",
979                    mSharedBuffer->pointer(), mChannelCount);
980            status = BAD_VALUE;
981            goto release;
982        }
983
984        // When initializing a shared buffer AudioTrack via constructors,
985        // there's no frameCount parameter.
986        // But when initializing a shared buffer AudioTrack via set(),
987        // there _is_ a frameCount parameter.  We silently ignore it.
988        frameCount = mSharedBuffer->size() / mFrameSizeAF;
989
990    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
991
992        // FIXME move these calculations and associated checks to server
993
994        // Ensure that buffer depth covers at least audio hardware latency
995        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
996        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
997                afFrameCount, minBufCount, afSampleRate, afLatency);
998        if (minBufCount <= nBuffering) {
999            minBufCount = nBuffering;
1000        }
1001
1002        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
1003        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1004                ", afLatency=%d",
1005                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1006
1007        if (frameCount == 0) {
1008            frameCount = minFrameCount;
1009        } else if (frameCount < minFrameCount) {
1010            // not ALOGW because it happens all the time when playing key clicks over A2DP
1011            ALOGV("Minimum buffer size corrected from %d to %d",
1012                     frameCount, minFrameCount);
1013            frameCount = minFrameCount;
1014        }
1015        // Make sure that application is notified with sufficient margin before underrun
1016        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1017            mNotificationFramesAct = frameCount/nBuffering;
1018        }
1019
1020    } else {
1021        // For fast tracks, the frame count calculations and checks are done by server
1022    }
1023
1024    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1025    if (mIsTimed) {
1026        trackFlags |= IAudioFlinger::TRACK_TIMED;
1027    }
1028
1029    pid_t tid = -1;
1030    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1031        trackFlags |= IAudioFlinger::TRACK_FAST;
1032        if (mAudioTrackThread != 0) {
1033            tid = mAudioTrackThread->getTid();
1034        }
1035    }
1036
1037    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1038        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1039    }
1040
1041    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1042                                // but we will still need the original value also
1043    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1044                                                      mSampleRate,
1045                                                      // AudioFlinger only sees 16-bit PCM
1046                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1047                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1048                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1049                                                      mChannelMask,
1050                                                      &temp,
1051                                                      &trackFlags,
1052                                                      mSharedBuffer,
1053                                                      output,
1054                                                      tid,
1055                                                      &mSessionId,
1056                                                      mClientUid,
1057                                                      &status);
1058
1059    if (status != NO_ERROR) {
1060        ALOGE("AudioFlinger could not create track, status: %d", status);
1061        goto release;
1062    }
1063    ALOG_ASSERT(track != 0);
1064
1065    // AudioFlinger now owns the reference to the I/O handle,
1066    // so we are no longer responsible for releasing it.
1067
1068    sp<IMemory> iMem = track->getCblk();
1069    if (iMem == 0) {
1070        ALOGE("Could not get control block");
1071        return NO_INIT;
1072    }
1073    void *iMemPointer = iMem->pointer();
1074    if (iMemPointer == NULL) {
1075        ALOGE("Could not get control block pointer");
1076        return NO_INIT;
1077    }
1078    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1079    if (mAudioTrack != 0) {
1080        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1081        mDeathNotifier.clear();
1082    }
1083    mAudioTrack = track;
1084    mCblkMemory = iMem;
1085    IPCThreadState::self()->flushCommands();
1086
1087    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1088    mCblk = cblk;
1089    // note that temp is the (possibly revised) value of frameCount
1090    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1091        // In current design, AudioTrack client checks and ensures frame count validity before
1092        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1093        // for fast track as it uses a special method of assigning frame count.
1094        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1095    }
1096    frameCount = temp;
1097
1098    mAwaitBoost = false;
1099    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1100        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1101            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1102            mAwaitBoost = true;
1103            if (mSharedBuffer == 0) {
1104                // Theoretically double-buffering is not required for fast tracks,
1105                // due to tighter scheduling.  But in practice, to accommodate kernels with
1106                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1107                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1108                    mNotificationFramesAct = frameCount/nBuffering;
1109                }
1110            }
1111        } else {
1112            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1113            // once denied, do not request again if IAudioTrack is re-created
1114            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1115            if (mSharedBuffer == 0) {
1116                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1117                    mNotificationFramesAct = frameCount/nBuffering;
1118                }
1119            }
1120        }
1121    }
1122    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1123        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1124            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1125        } else {
1126            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1127            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1128            // FIXME This is a warning, not an error, so don't return error status
1129            //return NO_INIT;
1130        }
1131    }
1132
1133    // We retain a copy of the I/O handle, but don't own the reference
1134    mOutput = output;
1135    mRefreshRemaining = true;
1136
1137    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1138    // is the value of pointer() for the shared buffer, otherwise buffers points
1139    // immediately after the control block.  This address is for the mapping within client
1140    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1141    void* buffers;
1142    if (mSharedBuffer == 0) {
1143        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1144    } else {
1145        buffers = mSharedBuffer->pointer();
1146    }
1147
1148    mAudioTrack->attachAuxEffect(mAuxEffectId);
1149    // FIXME don't believe this lie
1150    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1151
1152    mFrameCount = frameCount;
1153    // If IAudioTrack is re-created, don't let the requested frameCount
1154    // decrease.  This can confuse clients that cache frameCount().
1155    if (frameCount > mReqFrameCount) {
1156        mReqFrameCount = frameCount;
1157    }
1158
1159    // update proxy
1160    if (mSharedBuffer == 0) {
1161        mStaticProxy.clear();
1162        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1163    } else {
1164        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1165        mProxy = mStaticProxy;
1166    }
1167    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1168    mProxy->setSendLevel(mSendLevel);
1169    mProxy->setSampleRate(mSampleRate);
1170    mProxy->setEpoch(epoch);
1171    mProxy->setMinimum(mNotificationFramesAct);
1172
1173    mDeathNotifier = new DeathNotifier(this);
1174    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1175
1176    return NO_ERROR;
1177    }
1178
1179release:
1180    AudioSystem::releaseOutput(output);
1181    if (status == NO_ERROR) {
1182        status = NO_INIT;
1183    }
1184    return status;
1185}
1186
1187status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1188{
1189    if (audioBuffer == NULL) {
1190        return BAD_VALUE;
1191    }
1192    if (mTransfer != TRANSFER_OBTAIN) {
1193        audioBuffer->frameCount = 0;
1194        audioBuffer->size = 0;
1195        audioBuffer->raw = NULL;
1196        return INVALID_OPERATION;
1197    }
1198
1199    const struct timespec *requested;
1200    struct timespec timeout;
1201    if (waitCount == -1) {
1202        requested = &ClientProxy::kForever;
1203    } else if (waitCount == 0) {
1204        requested = &ClientProxy::kNonBlocking;
1205    } else if (waitCount > 0) {
1206        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1207        timeout.tv_sec = ms / 1000;
1208        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1209        requested = &timeout;
1210    } else {
1211        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1212        requested = NULL;
1213    }
1214    return obtainBuffer(audioBuffer, requested);
1215}
1216
1217status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1218        struct timespec *elapsed, size_t *nonContig)
1219{
1220    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1221    uint32_t oldSequence = 0;
1222    uint32_t newSequence;
1223
1224    Proxy::Buffer buffer;
1225    status_t status = NO_ERROR;
1226
1227    static const int32_t kMaxTries = 5;
1228    int32_t tryCounter = kMaxTries;
1229
1230    do {
1231        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1232        // keep them from going away if another thread re-creates the track during obtainBuffer()
1233        sp<AudioTrackClientProxy> proxy;
1234        sp<IMemory> iMem;
1235
1236        {   // start of lock scope
1237            AutoMutex lock(mLock);
1238
1239            newSequence = mSequence;
1240            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1241            if (status == DEAD_OBJECT) {
1242                // re-create track, unless someone else has already done so
1243                if (newSequence == oldSequence) {
1244                    status = restoreTrack_l("obtainBuffer");
1245                    if (status != NO_ERROR) {
1246                        buffer.mFrameCount = 0;
1247                        buffer.mRaw = NULL;
1248                        buffer.mNonContig = 0;
1249                        break;
1250                    }
1251                }
1252            }
1253            oldSequence = newSequence;
1254
1255            // Keep the extra references
1256            proxy = mProxy;
1257            iMem = mCblkMemory;
1258
1259            if (mState == STATE_STOPPING) {
1260                status = -EINTR;
1261                buffer.mFrameCount = 0;
1262                buffer.mRaw = NULL;
1263                buffer.mNonContig = 0;
1264                break;
1265            }
1266
1267            // Non-blocking if track is stopped or paused
1268            if (mState != STATE_ACTIVE) {
1269                requested = &ClientProxy::kNonBlocking;
1270            }
1271
1272        }   // end of lock scope
1273
1274        buffer.mFrameCount = audioBuffer->frameCount;
1275        // FIXME starts the requested timeout and elapsed over from scratch
1276        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1277
1278    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1279
1280    audioBuffer->frameCount = buffer.mFrameCount;
1281    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1282    audioBuffer->raw = buffer.mRaw;
1283    if (nonContig != NULL) {
1284        *nonContig = buffer.mNonContig;
1285    }
1286    return status;
1287}
1288
1289void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1290{
1291    if (mTransfer == TRANSFER_SHARED) {
1292        return;
1293    }
1294
1295    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1296    if (stepCount == 0) {
1297        return;
1298    }
1299
1300    Proxy::Buffer buffer;
1301    buffer.mFrameCount = stepCount;
1302    buffer.mRaw = audioBuffer->raw;
1303
1304    AutoMutex lock(mLock);
1305    mInUnderrun = false;
1306    mProxy->releaseBuffer(&buffer);
1307
1308    // restart track if it was disabled by audioflinger due to previous underrun
1309    if (mState == STATE_ACTIVE) {
1310        audio_track_cblk_t* cblk = mCblk;
1311        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1312            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1313            // FIXME ignoring status
1314            mAudioTrack->start();
1315        }
1316    }
1317}
1318
1319// -------------------------------------------------------------------------
1320
1321ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1322{
1323    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1324        return INVALID_OPERATION;
1325    }
1326
1327    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1328        // Sanity-check: user is most-likely passing an error code, and it would
1329        // make the return value ambiguous (actualSize vs error).
1330        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1331        return BAD_VALUE;
1332    }
1333
1334    size_t written = 0;
1335    Buffer audioBuffer;
1336
1337    while (userSize >= mFrameSize) {
1338        audioBuffer.frameCount = userSize / mFrameSize;
1339
1340        status_t err = obtainBuffer(&audioBuffer,
1341                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1342        if (err < 0) {
1343            if (written > 0) {
1344                break;
1345            }
1346            return ssize_t(err);
1347        }
1348
1349        size_t toWrite;
1350        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1351            // Divide capacity by 2 to take expansion into account
1352            toWrite = audioBuffer.size >> 1;
1353            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1354        } else {
1355            toWrite = audioBuffer.size;
1356            memcpy(audioBuffer.i8, buffer, toWrite);
1357        }
1358        buffer = ((const char *) buffer) + toWrite;
1359        userSize -= toWrite;
1360        written += toWrite;
1361
1362        releaseBuffer(&audioBuffer);
1363    }
1364
1365    return written;
1366}
1367
1368// -------------------------------------------------------------------------
1369
1370TimedAudioTrack::TimedAudioTrack() {
1371    mIsTimed = true;
1372}
1373
1374status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1375{
1376    AutoMutex lock(mLock);
1377    status_t result = UNKNOWN_ERROR;
1378
1379#if 1
1380    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1381    // while we are accessing the cblk
1382    sp<IAudioTrack> audioTrack = mAudioTrack;
1383    sp<IMemory> iMem = mCblkMemory;
1384#endif
1385
1386    // If the track is not invalid already, try to allocate a buffer.  alloc
1387    // fails indicating that the server is dead, flag the track as invalid so
1388    // we can attempt to restore in just a bit.
1389    audio_track_cblk_t* cblk = mCblk;
1390    if (!(cblk->mFlags & CBLK_INVALID)) {
1391        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1392        if (result == DEAD_OBJECT) {
1393            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1394        }
1395    }
1396
1397    // If the track is invalid at this point, attempt to restore it. and try the
1398    // allocation one more time.
1399    if (cblk->mFlags & CBLK_INVALID) {
1400        result = restoreTrack_l("allocateTimedBuffer");
1401
1402        if (result == NO_ERROR) {
1403            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1404        }
1405    }
1406
1407    return result;
1408}
1409
1410status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1411                                           int64_t pts)
1412{
1413    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1414    {
1415        AutoMutex lock(mLock);
1416        audio_track_cblk_t* cblk = mCblk;
1417        // restart track if it was disabled by audioflinger due to previous underrun
1418        if (buffer->size() != 0 && status == NO_ERROR &&
1419                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1420            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1421            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1422            // FIXME ignoring status
1423            mAudioTrack->start();
1424        }
1425    }
1426    return status;
1427}
1428
1429status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1430                                                TargetTimeline target)
1431{
1432    return mAudioTrack->setMediaTimeTransform(xform, target);
1433}
1434
1435// -------------------------------------------------------------------------
1436
1437nsecs_t AudioTrack::processAudioBuffer()
1438{
1439    // Currently the AudioTrack thread is not created if there are no callbacks.
1440    // Would it ever make sense to run the thread, even without callbacks?
1441    // If so, then replace this by checks at each use for mCbf != NULL.
1442    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1443
1444    mLock.lock();
1445    if (mAwaitBoost) {
1446        mAwaitBoost = false;
1447        mLock.unlock();
1448        static const int32_t kMaxTries = 5;
1449        int32_t tryCounter = kMaxTries;
1450        uint32_t pollUs = 10000;
1451        do {
1452            int policy = sched_getscheduler(0);
1453            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1454                break;
1455            }
1456            usleep(pollUs);
1457            pollUs <<= 1;
1458        } while (tryCounter-- > 0);
1459        if (tryCounter < 0) {
1460            ALOGE("did not receive expected priority boost on time");
1461        }
1462        // Run again immediately
1463        return 0;
1464    }
1465
1466    // Can only reference mCblk while locked
1467    int32_t flags = android_atomic_and(
1468        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1469
1470    // Check for track invalidation
1471    if (flags & CBLK_INVALID) {
1472        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1473        // AudioSystem cache. We should not exit here but after calling the callback so
1474        // that the upper layers can recreate the track
1475        if (!isOffloaded_l() || (mSequence == mObservedSequence)) {
1476            status_t status = restoreTrack_l("processAudioBuffer");
1477            mLock.unlock();
1478            // Run again immediately, but with a new IAudioTrack
1479            return 0;
1480        }
1481    }
1482
1483    bool waitStreamEnd = mState == STATE_STOPPING;
1484    bool active = mState == STATE_ACTIVE;
1485
1486    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1487    bool newUnderrun = false;
1488    if (flags & CBLK_UNDERRUN) {
1489#if 0
1490        // Currently in shared buffer mode, when the server reaches the end of buffer,
1491        // the track stays active in continuous underrun state.  It's up to the application
1492        // to pause or stop the track, or set the position to a new offset within buffer.
1493        // This was some experimental code to auto-pause on underrun.   Keeping it here
1494        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1495        if (mTransfer == TRANSFER_SHARED) {
1496            mState = STATE_PAUSED;
1497            active = false;
1498        }
1499#endif
1500        if (!mInUnderrun) {
1501            mInUnderrun = true;
1502            newUnderrun = true;
1503        }
1504    }
1505
1506    // Get current position of server
1507    size_t position = mProxy->getPosition();
1508
1509    // Manage marker callback
1510    bool markerReached = false;
1511    size_t markerPosition = mMarkerPosition;
1512    // FIXME fails for wraparound, need 64 bits
1513    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1514        mMarkerReached = markerReached = true;
1515    }
1516
1517    // Determine number of new position callback(s) that will be needed, while locked
1518    size_t newPosCount = 0;
1519    size_t newPosition = mNewPosition;
1520    size_t updatePeriod = mUpdatePeriod;
1521    // FIXME fails for wraparound, need 64 bits
1522    if (updatePeriod > 0 && position >= newPosition) {
1523        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1524        mNewPosition += updatePeriod * newPosCount;
1525    }
1526
1527    // Cache other fields that will be needed soon
1528    uint32_t loopPeriod = mLoopPeriod;
1529    uint32_t sampleRate = mSampleRate;
1530    uint32_t notificationFrames = mNotificationFramesAct;
1531    if (mRefreshRemaining) {
1532        mRefreshRemaining = false;
1533        mRemainingFrames = notificationFrames;
1534        mRetryOnPartialBuffer = false;
1535    }
1536    size_t misalignment = mProxy->getMisalignment();
1537    uint32_t sequence = mSequence;
1538    sp<AudioTrackClientProxy> proxy = mProxy;
1539
1540    // These fields don't need to be cached, because they are assigned only by set():
1541    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1542    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1543
1544    mLock.unlock();
1545
1546    if (waitStreamEnd) {
1547        struct timespec timeout;
1548        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1549        timeout.tv_nsec = 0;
1550
1551        status_t status = proxy->waitStreamEndDone(&timeout);
1552        switch (status) {
1553        case NO_ERROR:
1554        case DEAD_OBJECT:
1555        case TIMED_OUT:
1556            mCbf(EVENT_STREAM_END, mUserData, NULL);
1557            {
1558                AutoMutex lock(mLock);
1559                // The previously assigned value of waitStreamEnd is no longer valid,
1560                // since the mutex has been unlocked and either the callback handler
1561                // or another thread could have re-started the AudioTrack during that time.
1562                waitStreamEnd = mState == STATE_STOPPING;
1563                if (waitStreamEnd) {
1564                    mState = STATE_STOPPED;
1565                }
1566            }
1567            if (waitStreamEnd && status != DEAD_OBJECT) {
1568               return NS_INACTIVE;
1569            }
1570            break;
1571        }
1572        return 0;
1573    }
1574
1575    // perform callbacks while unlocked
1576    if (newUnderrun) {
1577        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1578    }
1579    // FIXME we will miss loops if loop cycle was signaled several times since last call
1580    //       to processAudioBuffer()
1581    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1582        mCbf(EVENT_LOOP_END, mUserData, NULL);
1583    }
1584    if (flags & CBLK_BUFFER_END) {
1585        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1586    }
1587    if (markerReached) {
1588        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1589    }
1590    while (newPosCount > 0) {
1591        size_t temp = newPosition;
1592        mCbf(EVENT_NEW_POS, mUserData, &temp);
1593        newPosition += updatePeriod;
1594        newPosCount--;
1595    }
1596
1597    if (mObservedSequence != sequence) {
1598        mObservedSequence = sequence;
1599        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1600        // for offloaded tracks, just wait for the upper layers to recreate the track
1601        if (isOffloaded()) {
1602            return NS_INACTIVE;
1603        }
1604    }
1605
1606    // if inactive, then don't run me again until re-started
1607    if (!active) {
1608        return NS_INACTIVE;
1609    }
1610
1611    // Compute the estimated time until the next timed event (position, markers, loops)
1612    // FIXME only for non-compressed audio
1613    uint32_t minFrames = ~0;
1614    if (!markerReached && position < markerPosition) {
1615        minFrames = markerPosition - position;
1616    }
1617    if (loopPeriod > 0 && loopPeriod < minFrames) {
1618        minFrames = loopPeriod;
1619    }
1620    if (updatePeriod > 0 && updatePeriod < minFrames) {
1621        minFrames = updatePeriod;
1622    }
1623
1624    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1625    static const uint32_t kPoll = 0;
1626    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1627        minFrames = kPoll * notificationFrames;
1628    }
1629
1630    // Convert frame units to time units
1631    nsecs_t ns = NS_WHENEVER;
1632    if (minFrames != (uint32_t) ~0) {
1633        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1634        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1635        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1636    }
1637
1638    // If not supplying data by EVENT_MORE_DATA, then we're done
1639    if (mTransfer != TRANSFER_CALLBACK) {
1640        return ns;
1641    }
1642
1643    struct timespec timeout;
1644    const struct timespec *requested = &ClientProxy::kForever;
1645    if (ns != NS_WHENEVER) {
1646        timeout.tv_sec = ns / 1000000000LL;
1647        timeout.tv_nsec = ns % 1000000000LL;
1648        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1649        requested = &timeout;
1650    }
1651
1652    while (mRemainingFrames > 0) {
1653
1654        Buffer audioBuffer;
1655        audioBuffer.frameCount = mRemainingFrames;
1656        size_t nonContig;
1657        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1658        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1659                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1660        requested = &ClientProxy::kNonBlocking;
1661        size_t avail = audioBuffer.frameCount + nonContig;
1662        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1663                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1664        if (err != NO_ERROR) {
1665            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1666                    (isOffloaded() && (err == DEAD_OBJECT))) {
1667                return 0;
1668            }
1669            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1670            return NS_NEVER;
1671        }
1672
1673        if (mRetryOnPartialBuffer && !isOffloaded()) {
1674            mRetryOnPartialBuffer = false;
1675            if (avail < mRemainingFrames) {
1676                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1677                if (ns < 0 || myns < ns) {
1678                    ns = myns;
1679                }
1680                return ns;
1681            }
1682        }
1683
1684        // Divide buffer size by 2 to take into account the expansion
1685        // due to 8 to 16 bit conversion: the callback must fill only half
1686        // of the destination buffer
1687        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1688            audioBuffer.size >>= 1;
1689        }
1690
1691        size_t reqSize = audioBuffer.size;
1692        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1693        size_t writtenSize = audioBuffer.size;
1694
1695        // Sanity check on returned size
1696        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1697            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1698                    reqSize, (int) writtenSize);
1699            return NS_NEVER;
1700        }
1701
1702        if (writtenSize == 0) {
1703            // The callback is done filling buffers
1704            // Keep this thread going to handle timed events and
1705            // still try to get more data in intervals of WAIT_PERIOD_MS
1706            // but don't just loop and block the CPU, so wait
1707            return WAIT_PERIOD_MS * 1000000LL;
1708        }
1709
1710        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1711            // 8 to 16 bit conversion, note that source and destination are the same address
1712            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1713            audioBuffer.size <<= 1;
1714        }
1715
1716        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1717        audioBuffer.frameCount = releasedFrames;
1718        mRemainingFrames -= releasedFrames;
1719        if (misalignment >= releasedFrames) {
1720            misalignment -= releasedFrames;
1721        } else {
1722            misalignment = 0;
1723        }
1724
1725        releaseBuffer(&audioBuffer);
1726
1727        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1728        // if callback doesn't like to accept the full chunk
1729        if (writtenSize < reqSize) {
1730            continue;
1731        }
1732
1733        // There could be enough non-contiguous frames available to satisfy the remaining request
1734        if (mRemainingFrames <= nonContig) {
1735            continue;
1736        }
1737
1738#if 0
1739        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1740        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1741        // that total to a sum == notificationFrames.
1742        if (0 < misalignment && misalignment <= mRemainingFrames) {
1743            mRemainingFrames = misalignment;
1744            return (mRemainingFrames * 1100000000LL) / sampleRate;
1745        }
1746#endif
1747
1748    }
1749    mRemainingFrames = notificationFrames;
1750    mRetryOnPartialBuffer = true;
1751
1752    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1753    return 0;
1754}
1755
1756status_t AudioTrack::restoreTrack_l(const char *from)
1757{
1758    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1759          isOffloaded_l() ? "Offloaded" : "PCM", from);
1760    ++mSequence;
1761    status_t result;
1762
1763    // refresh the audio configuration cache in this process to make sure we get new
1764    // output parameters in createTrack_l()
1765    AudioSystem::clearAudioConfigCache();
1766
1767    if (isOffloaded_l()) {
1768        // FIXME re-creation of offloaded tracks is not yet implemented
1769        return DEAD_OBJECT;
1770    }
1771
1772    // if the new IAudioTrack is created, createTrack_l() will modify the
1773    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1774    // It will also delete the strong references on previous IAudioTrack and IMemory
1775
1776    // take the frames that will be lost by track recreation into account in saved position
1777    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1778    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1779    result = createTrack_l(position /*epoch*/);
1780
1781    if (result == NO_ERROR) {
1782        // continue playback from last known position, but
1783        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1784        if (mStaticProxy != NULL) {
1785            mLoopPeriod = 0;
1786            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1787        }
1788        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1789        //       track destruction have been played? This is critical for SoundPool implementation
1790        //       This must be broken, and needs to be tested/debugged.
1791#if 0
1792        // restore write index and set other indexes to reflect empty buffer status
1793        if (!strcmp(from, "start")) {
1794            // Make sure that a client relying on callback events indicating underrun or
1795            // the actual amount of audio frames played (e.g SoundPool) receives them.
1796            if (mSharedBuffer == 0) {
1797                // restart playback even if buffer is not completely filled.
1798                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1799            }
1800        }
1801#endif
1802        if (mState == STATE_ACTIVE) {
1803            result = mAudioTrack->start();
1804        }
1805    }
1806    if (result != NO_ERROR) {
1807        ALOGW("restoreTrack_l() failed status %d", result);
1808        mState = STATE_STOPPED;
1809    }
1810
1811    return result;
1812}
1813
1814status_t AudioTrack::setParameters(const String8& keyValuePairs)
1815{
1816    AutoMutex lock(mLock);
1817    return mAudioTrack->setParameters(keyValuePairs);
1818}
1819
1820status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1821{
1822    AutoMutex lock(mLock);
1823    // FIXME not implemented for fast tracks; should use proxy and SSQ
1824    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1825        return INVALID_OPERATION;
1826    }
1827    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1828        return INVALID_OPERATION;
1829    }
1830    status_t status = mAudioTrack->getTimestamp(timestamp);
1831    if (status == NO_ERROR) {
1832        timestamp.mPosition += mProxy->getEpoch();
1833    }
1834    return status;
1835}
1836
1837String8 AudioTrack::getParameters(const String8& keys)
1838{
1839    audio_io_handle_t output = getOutput();
1840    if (output != AUDIO_IO_HANDLE_NONE) {
1841        return AudioSystem::getParameters(output, keys);
1842    } else {
1843        return String8::empty();
1844    }
1845}
1846
1847bool AudioTrack::isOffloaded() const
1848{
1849    AutoMutex lock(mLock);
1850    return isOffloaded_l();
1851}
1852
1853status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1854{
1855
1856    const size_t SIZE = 256;
1857    char buffer[SIZE];
1858    String8 result;
1859
1860    result.append(" AudioTrack::dump\n");
1861    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1862            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1863    result.append(buffer);
1864    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1865            mChannelCount, mFrameCount);
1866    result.append(buffer);
1867    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1868    result.append(buffer);
1869    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1870    result.append(buffer);
1871    ::write(fd, result.string(), result.size());
1872    return NO_ERROR;
1873}
1874
1875uint32_t AudioTrack::getUnderrunFrames() const
1876{
1877    AutoMutex lock(mLock);
1878    return mProxy->getUnderrunFrames();
1879}
1880
1881void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
1882    mAttributes.flags = 0x0;
1883
1884    switch(streamType) {
1885    case AUDIO_STREAM_DEFAULT:
1886    case AUDIO_STREAM_MUSIC:
1887        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1888        mAttributes.usage = AUDIO_USAGE_MEDIA;
1889        break;
1890    case AUDIO_STREAM_VOICE_CALL:
1891        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1892        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1893        break;
1894    case AUDIO_STREAM_ENFORCED_AUDIBLE:
1895        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
1896        // intended fall through, attributes in common with STREAM_SYSTEM
1897    case AUDIO_STREAM_SYSTEM:
1898        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1899        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1900        break;
1901    case AUDIO_STREAM_RING:
1902        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1903        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1904        break;
1905    case AUDIO_STREAM_ALARM:
1906        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1907        mAttributes.usage = AUDIO_USAGE_ALARM;
1908        break;
1909    case AUDIO_STREAM_NOTIFICATION:
1910        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1911        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
1912        break;
1913    case AUDIO_STREAM_BLUETOOTH_SCO:
1914        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1915        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1916        mAttributes.flags |= AUDIO_FLAG_SCO;
1917        break;
1918    case AUDIO_STREAM_DTMF:
1919        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1920        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1921        break;
1922    case AUDIO_STREAM_TTS:
1923        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1924        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1925        break;
1926    default:
1927        ALOGE("invalid stream type %d when converting to attributes", streamType);
1928    }
1929}
1930
1931void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
1932    // flags to stream type mapping
1933    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
1934        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
1935        return;
1936    }
1937    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
1938        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
1939        return;
1940    }
1941
1942    // usage to stream type mapping
1943    switch (aa.usage) {
1944    case AUDIO_USAGE_MEDIA:
1945    case AUDIO_USAGE_GAME:
1946    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
1947    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
1948        mStreamType = AUDIO_STREAM_MUSIC;
1949        return;
1950    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
1951        mStreamType = AUDIO_STREAM_SYSTEM;
1952        return;
1953    case AUDIO_USAGE_VOICE_COMMUNICATION:
1954        mStreamType = AUDIO_STREAM_VOICE_CALL;
1955        return;
1956
1957    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
1958        mStreamType = AUDIO_STREAM_DTMF;
1959        return;
1960
1961    case AUDIO_USAGE_ALARM:
1962        mStreamType = AUDIO_STREAM_ALARM;
1963        return;
1964    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
1965        mStreamType = AUDIO_STREAM_RING;
1966        return;
1967
1968    case AUDIO_USAGE_NOTIFICATION:
1969    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
1970    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
1971    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
1972    case AUDIO_USAGE_NOTIFICATION_EVENT:
1973        mStreamType = AUDIO_STREAM_NOTIFICATION;
1974        return;
1975
1976    case AUDIO_USAGE_UNKNOWN:
1977    default:
1978        mStreamType = AUDIO_STREAM_MUSIC;
1979    }
1980}
1981
1982bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
1983    // has flags that map to a strategy?
1984    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
1985        return true;
1986    }
1987
1988    // has known usage?
1989    switch (paa->usage) {
1990    case AUDIO_USAGE_UNKNOWN:
1991    case AUDIO_USAGE_MEDIA:
1992    case AUDIO_USAGE_VOICE_COMMUNICATION:
1993    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
1994    case AUDIO_USAGE_ALARM:
1995    case AUDIO_USAGE_NOTIFICATION:
1996    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
1997    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
1998    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
1999    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2000    case AUDIO_USAGE_NOTIFICATION_EVENT:
2001    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2002    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2003    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2004    case AUDIO_USAGE_GAME:
2005        break;
2006    default:
2007        return false;
2008    }
2009    return true;
2010}
2011// =========================================================================
2012
2013void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2014{
2015    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2016    if (audioTrack != 0) {
2017        AutoMutex lock(audioTrack->mLock);
2018        audioTrack->mProxy->binderDied();
2019    }
2020}
2021
2022// =========================================================================
2023
2024AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2025    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2026      mIgnoreNextPausedInt(false)
2027{
2028}
2029
2030AudioTrack::AudioTrackThread::~AudioTrackThread()
2031{
2032}
2033
2034bool AudioTrack::AudioTrackThread::threadLoop()
2035{
2036    {
2037        AutoMutex _l(mMyLock);
2038        if (mPaused) {
2039            mMyCond.wait(mMyLock);
2040            // caller will check for exitPending()
2041            return true;
2042        }
2043        if (mIgnoreNextPausedInt) {
2044            mIgnoreNextPausedInt = false;
2045            mPausedInt = false;
2046        }
2047        if (mPausedInt) {
2048            if (mPausedNs > 0) {
2049                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2050            } else {
2051                mMyCond.wait(mMyLock);
2052            }
2053            mPausedInt = false;
2054            return true;
2055        }
2056    }
2057    nsecs_t ns = mReceiver.processAudioBuffer();
2058    switch (ns) {
2059    case 0:
2060        return true;
2061    case NS_INACTIVE:
2062        pauseInternal();
2063        return true;
2064    case NS_NEVER:
2065        return false;
2066    case NS_WHENEVER:
2067        // FIXME increase poll interval, or make event-driven
2068        ns = 1000000000LL;
2069        // fall through
2070    default:
2071        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
2072        pauseInternal(ns);
2073        return true;
2074    }
2075}
2076
2077void AudioTrack::AudioTrackThread::requestExit()
2078{
2079    // must be in this order to avoid a race condition
2080    Thread::requestExit();
2081    resume();
2082}
2083
2084void AudioTrack::AudioTrackThread::pause()
2085{
2086    AutoMutex _l(mMyLock);
2087    mPaused = true;
2088}
2089
2090void AudioTrack::AudioTrackThread::resume()
2091{
2092    AutoMutex _l(mMyLock);
2093    mIgnoreNextPausedInt = true;
2094    if (mPaused || mPausedInt) {
2095        mPaused = false;
2096        mPausedInt = false;
2097        mMyCond.signal();
2098    }
2099}
2100
2101void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2102{
2103    AutoMutex _l(mMyLock);
2104    mPausedInt = true;
2105    mPausedNs = ns;
2106}
2107
2108}; // namespace android
2109