AudioTrack.cpp revision 34fb29696b0f3abf61b10f8d053b1f33d501de0a
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31
32#define WAIT_PERIOD_MS                  10
33#define WAIT_STREAM_END_TIMEOUT_SEC     120
34
35
36namespace android {
37// ---------------------------------------------------------------------------
38
39// static
40status_t AudioTrack::getMinFrameCount(
41        size_t* frameCount,
42        audio_stream_type_t streamType,
43        uint32_t sampleRate)
44{
45    if (frameCount == NULL) {
46        return BAD_VALUE;
47    }
48
49    // FIXME merge with similar code in createTrack_l(), except we're missing
50    //       some information here that is available in createTrack_l():
51    //          audio_io_handle_t output
52    //          audio_format_t format
53    //          audio_channel_mask_t channelMask
54    //          audio_output_flags_t flags
55    uint32_t afSampleRate;
56    status_t status;
57    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
58    if (status != NO_ERROR) {
59        ALOGE("Unable to query output sample rate for stream type %d; status %d",
60                streamType, status);
61        return status;
62    }
63    size_t afFrameCount;
64    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
65    if (status != NO_ERROR) {
66        ALOGE("Unable to query output frame count for stream type %d; status %d",
67                streamType, status);
68        return status;
69    }
70    uint32_t afLatency;
71    status = AudioSystem::getOutputLatency(&afLatency, streamType);
72    if (status != NO_ERROR) {
73        ALOGE("Unable to query output latency for stream type %d; status %d",
74                streamType, status);
75        return status;
76    }
77
78    // Ensure that buffer depth covers at least audio hardware latency
79    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
80    if (minBufCount < 2) {
81        minBufCount = 2;
82    }
83
84    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
85            afFrameCount * minBufCount * sampleRate / afSampleRate;
86    // The formula above should always produce a non-zero value, but return an error
87    // in the unlikely event that it does not, as that's part of the API contract.
88    if (*frameCount == 0) {
89        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
90                streamType, sampleRate);
91        return BAD_VALUE;
92    }
93    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
94            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
95    return NO_ERROR;
96}
97
98// ---------------------------------------------------------------------------
99
100AudioTrack::AudioTrack()
101    : mStatus(NO_INIT),
102      mIsTimed(false),
103      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
104      mPreviousSchedulingGroup(SP_DEFAULT),
105      mPausedPosition(0)
106{
107    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
108    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
109    mAttributes.flags = 0x0;
110    strcpy(mAttributes.tags, "");
111}
112
113AudioTrack::AudioTrack(
114        audio_stream_type_t streamType,
115        uint32_t sampleRate,
116        audio_format_t format,
117        audio_channel_mask_t channelMask,
118        size_t frameCount,
119        audio_output_flags_t flags,
120        callback_t cbf,
121        void* user,
122        uint32_t notificationFrames,
123        int sessionId,
124        transfer_type transferType,
125        const audio_offload_info_t *offloadInfo,
126        int uid,
127        pid_t pid)
128    : mStatus(NO_INIT),
129      mIsTimed(false),
130      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
131      mPreviousSchedulingGroup(SP_DEFAULT),
132      mPausedPosition(0)
133{
134    mStatus = set(streamType, sampleRate, format, channelMask,
135            frameCount, flags, cbf, user, notificationFrames,
136            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
137            offloadInfo, uid, pid, NULL /*no audio attributes*/);
138}
139
140AudioTrack::AudioTrack(
141        audio_stream_type_t streamType,
142        uint32_t sampleRate,
143        audio_format_t format,
144        audio_channel_mask_t channelMask,
145        const sp<IMemory>& sharedBuffer,
146        audio_output_flags_t flags,
147        callback_t cbf,
148        void* user,
149        uint32_t notificationFrames,
150        int sessionId,
151        transfer_type transferType,
152        const audio_offload_info_t *offloadInfo,
153        int uid,
154        pid_t pid)
155    : mStatus(NO_INIT),
156      mIsTimed(false),
157      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
158      mPreviousSchedulingGroup(SP_DEFAULT),
159      mPausedPosition(0)
160{
161    mStatus = set(streamType, sampleRate, format, channelMask,
162            0 /*frameCount*/, flags, cbf, user, notificationFrames,
163            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
164            uid, pid, NULL /*no audio attributes*/);
165}
166
167AudioTrack::~AudioTrack()
168{
169    if (mStatus == NO_ERROR) {
170        // Make sure that callback function exits in the case where
171        // it is looping on buffer full condition in obtainBuffer().
172        // Otherwise the callback thread will never exit.
173        stop();
174        if (mAudioTrackThread != 0) {
175            mProxy->interrupt();
176            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
177            mAudioTrackThread->requestExitAndWait();
178            mAudioTrackThread.clear();
179        }
180        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
181        mAudioTrack.clear();
182        mCblkMemory.clear();
183        mSharedBuffer.clear();
184        IPCThreadState::self()->flushCommands();
185        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
186                IPCThreadState::self()->getCallingPid(), mClientPid);
187        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
188    }
189}
190
191status_t AudioTrack::set(
192        audio_stream_type_t streamType,
193        uint32_t sampleRate,
194        audio_format_t format,
195        audio_channel_mask_t channelMask,
196        size_t frameCount,
197        audio_output_flags_t flags,
198        callback_t cbf,
199        void* user,
200        uint32_t notificationFrames,
201        const sp<IMemory>& sharedBuffer,
202        bool threadCanCallJava,
203        int sessionId,
204        transfer_type transferType,
205        const audio_offload_info_t *offloadInfo,
206        int uid,
207        pid_t pid,
208        audio_attributes_t* pAttributes)
209{
210    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
211          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
212          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
213          sessionId, transferType);
214
215    switch (transferType) {
216    case TRANSFER_DEFAULT:
217        if (sharedBuffer != 0) {
218            transferType = TRANSFER_SHARED;
219        } else if (cbf == NULL || threadCanCallJava) {
220            transferType = TRANSFER_SYNC;
221        } else {
222            transferType = TRANSFER_CALLBACK;
223        }
224        break;
225    case TRANSFER_CALLBACK:
226        if (cbf == NULL || sharedBuffer != 0) {
227            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
228            return BAD_VALUE;
229        }
230        break;
231    case TRANSFER_OBTAIN:
232    case TRANSFER_SYNC:
233        if (sharedBuffer != 0) {
234            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
235            return BAD_VALUE;
236        }
237        break;
238    case TRANSFER_SHARED:
239        if (sharedBuffer == 0) {
240            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
241            return BAD_VALUE;
242        }
243        break;
244    default:
245        ALOGE("Invalid transfer type %d", transferType);
246        return BAD_VALUE;
247    }
248    mSharedBuffer = sharedBuffer;
249    mTransfer = transferType;
250
251    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
252            sharedBuffer->size());
253
254    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
255
256    AutoMutex lock(mLock);
257
258    // invariant that mAudioTrack != 0 is true only after set() returns successfully
259    if (mAudioTrack != 0) {
260        ALOGE("Track already in use");
261        return INVALID_OPERATION;
262    }
263
264    // handle default values first.
265    if (streamType == AUDIO_STREAM_DEFAULT) {
266        streamType = AUDIO_STREAM_MUSIC;
267    }
268
269    if (pAttributes == NULL) {
270        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
271            ALOGE("Invalid stream type %d", streamType);
272            return BAD_VALUE;
273        }
274        setAttributesFromStreamType(streamType);
275        mStreamType = streamType;
276    } else {
277        if (!isValidAttributes(pAttributes)) {
278            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
279                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
280                pAttributes->tags);
281        }
282        // stream type shouldn't be looked at, this track has audio attributes
283        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
284        setStreamTypeFromAttributes(mAttributes);
285        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
286                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
287    }
288
289    status_t status;
290    if (sampleRate == 0) {
291        status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
292        if (status != NO_ERROR) {
293            ALOGE("Could not get output sample rate for stream type %d; status %d",
294                    mStreamType, status);
295            return status;
296        }
297    }
298    mSampleRate = sampleRate;
299
300    // these below should probably come from the audioFlinger too...
301    if (format == AUDIO_FORMAT_DEFAULT) {
302        format = AUDIO_FORMAT_PCM_16_BIT;
303    }
304
305    // validate parameters
306    if (!audio_is_valid_format(format)) {
307        ALOGE("Invalid format %#x", format);
308        return BAD_VALUE;
309    }
310    mFormat = format;
311
312    if (!audio_is_output_channel(channelMask)) {
313        ALOGE("Invalid channel mask %#x", channelMask);
314        return BAD_VALUE;
315    }
316    mChannelMask = channelMask;
317    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
318    mChannelCount = channelCount;
319
320    // AudioFlinger does not currently support 8-bit data in shared memory
321    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
322        ALOGE("8-bit data in shared memory is not supported");
323        return BAD_VALUE;
324    }
325
326    // force direct flag if format is not linear PCM
327    // or offload was requested
328    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
329            || !audio_is_linear_pcm(format)) {
330        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
331                    ? "Offload request, forcing to Direct Output"
332                    : "Not linear PCM, forcing to Direct Output");
333        flags = (audio_output_flags_t)
334                // FIXME why can't we allow direct AND fast?
335                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
336    }
337    // only allow deep buffering for music stream type
338    if (mStreamType != AUDIO_STREAM_MUSIC) {
339        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
340    }
341
342    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
343        if (audio_is_linear_pcm(format)) {
344            mFrameSize = channelCount * audio_bytes_per_sample(format);
345        } else {
346            mFrameSize = sizeof(uint8_t);
347        }
348        mFrameSizeAF = mFrameSize;
349    } else {
350        ALOG_ASSERT(audio_is_linear_pcm(format));
351        mFrameSize = channelCount * audio_bytes_per_sample(format);
352        mFrameSizeAF = channelCount * audio_bytes_per_sample(
353                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
354        // createTrack will return an error if PCM format is not supported by server,
355        // so no need to check for specific PCM formats here
356    }
357
358    // Make copy of input parameter offloadInfo so that in the future:
359    //  (a) createTrack_l doesn't need it as an input parameter
360    //  (b) we can support re-creation of offloaded tracks
361    if (offloadInfo != NULL) {
362        mOffloadInfoCopy = *offloadInfo;
363        mOffloadInfo = &mOffloadInfoCopy;
364    } else {
365        mOffloadInfo = NULL;
366    }
367
368    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
369    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
370    mSendLevel = 0.0f;
371    // mFrameCount is initialized in createTrack_l
372    mReqFrameCount = frameCount;
373    mNotificationFramesReq = notificationFrames;
374    mNotificationFramesAct = 0;
375    mSessionId = sessionId;
376    int callingpid = IPCThreadState::self()->getCallingPid();
377    int mypid = getpid();
378    if (uid == -1 || (callingpid != mypid)) {
379        mClientUid = IPCThreadState::self()->getCallingUid();
380    } else {
381        mClientUid = uid;
382    }
383    if (pid == -1 || (callingpid != mypid)) {
384        mClientPid = callingpid;
385    } else {
386        mClientPid = pid;
387    }
388    mAuxEffectId = 0;
389    mFlags = flags;
390    mCbf = cbf;
391
392    if (cbf != NULL) {
393        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
394        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
395    }
396
397    // create the IAudioTrack
398    status = createTrack_l(0 /*epoch*/);
399
400    if (status != NO_ERROR) {
401        if (mAudioTrackThread != 0) {
402            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
403            mAudioTrackThread->requestExitAndWait();
404            mAudioTrackThread.clear();
405        }
406        return status;
407    }
408
409    mStatus = NO_ERROR;
410    mState = STATE_STOPPED;
411    mUserData = user;
412    mLoopPeriod = 0;
413    mMarkerPosition = 0;
414    mMarkerReached = false;
415    mNewPosition = 0;
416    mUpdatePeriod = 0;
417    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
418    mSequence = 1;
419    mObservedSequence = mSequence;
420    mInUnderrun = false;
421
422    return NO_ERROR;
423}
424
425// -------------------------------------------------------------------------
426
427status_t AudioTrack::start()
428{
429    AutoMutex lock(mLock);
430
431    if (mState == STATE_ACTIVE) {
432        return INVALID_OPERATION;
433    }
434
435    mInUnderrun = true;
436
437    State previousState = mState;
438    if (previousState == STATE_PAUSED_STOPPING) {
439        mState = STATE_STOPPING;
440    } else {
441        mState = STATE_ACTIVE;
442    }
443    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
444        // reset current position as seen by client to 0
445        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
446        // force refresh of remaining frames by processAudioBuffer() as last
447        // write before stop could be partial.
448        mRefreshRemaining = true;
449    }
450    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
451    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
452
453    sp<AudioTrackThread> t = mAudioTrackThread;
454    if (t != 0) {
455        if (previousState == STATE_STOPPING) {
456            mProxy->interrupt();
457        } else {
458            t->resume();
459        }
460    } else {
461        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
462        get_sched_policy(0, &mPreviousSchedulingGroup);
463        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
464    }
465
466    status_t status = NO_ERROR;
467    if (!(flags & CBLK_INVALID)) {
468        status = mAudioTrack->start();
469        if (status == DEAD_OBJECT) {
470            flags |= CBLK_INVALID;
471        }
472    }
473    if (flags & CBLK_INVALID) {
474        status = restoreTrack_l("start");
475    }
476
477    if (status != NO_ERROR) {
478        ALOGE("start() status %d", status);
479        mState = previousState;
480        if (t != 0) {
481            if (previousState != STATE_STOPPING) {
482                t->pause();
483            }
484        } else {
485            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
486            set_sched_policy(0, mPreviousSchedulingGroup);
487        }
488    }
489
490    return status;
491}
492
493void AudioTrack::stop()
494{
495    AutoMutex lock(mLock);
496    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
497        return;
498    }
499
500    if (isOffloaded_l()) {
501        mState = STATE_STOPPING;
502    } else {
503        mState = STATE_STOPPED;
504    }
505
506    mProxy->interrupt();
507    mAudioTrack->stop();
508    // the playback head position will reset to 0, so if a marker is set, we need
509    // to activate it again
510    mMarkerReached = false;
511#if 0
512    // Force flush if a shared buffer is used otherwise audioflinger
513    // will not stop before end of buffer is reached.
514    // It may be needed to make sure that we stop playback, likely in case looping is on.
515    if (mSharedBuffer != 0) {
516        flush_l();
517    }
518#endif
519
520    sp<AudioTrackThread> t = mAudioTrackThread;
521    if (t != 0) {
522        if (!isOffloaded_l()) {
523            t->pause();
524        }
525    } else {
526        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
527        set_sched_policy(0, mPreviousSchedulingGroup);
528    }
529}
530
531bool AudioTrack::stopped() const
532{
533    AutoMutex lock(mLock);
534    return mState != STATE_ACTIVE;
535}
536
537void AudioTrack::flush()
538{
539    if (mSharedBuffer != 0) {
540        return;
541    }
542    AutoMutex lock(mLock);
543    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
544        return;
545    }
546    flush_l();
547}
548
549void AudioTrack::flush_l()
550{
551    ALOG_ASSERT(mState != STATE_ACTIVE);
552
553    // clear playback marker and periodic update counter
554    mMarkerPosition = 0;
555    mMarkerReached = false;
556    mUpdatePeriod = 0;
557    mRefreshRemaining = true;
558
559    mState = STATE_FLUSHED;
560    if (isOffloaded_l()) {
561        mProxy->interrupt();
562    }
563    mProxy->flush();
564    mAudioTrack->flush();
565}
566
567void AudioTrack::pause()
568{
569    AutoMutex lock(mLock);
570    if (mState == STATE_ACTIVE) {
571        mState = STATE_PAUSED;
572    } else if (mState == STATE_STOPPING) {
573        mState = STATE_PAUSED_STOPPING;
574    } else {
575        return;
576    }
577    mProxy->interrupt();
578    mAudioTrack->pause();
579
580    if (isOffloaded_l()) {
581        if (mOutput != AUDIO_IO_HANDLE_NONE) {
582            uint32_t halFrames;
583            // OffloadThread sends HAL pause in its threadLoop.. time saved
584            // here can be slightly off
585            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
586            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
587        }
588    }
589}
590
591status_t AudioTrack::setVolume(float left, float right)
592{
593    // This duplicates a test by AudioTrack JNI, but that is not the only caller
594    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
595            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
596        return BAD_VALUE;
597    }
598
599    AutoMutex lock(mLock);
600    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
601    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
602
603    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
604
605    if (isOffloaded_l()) {
606        mAudioTrack->signal();
607    }
608    return NO_ERROR;
609}
610
611status_t AudioTrack::setVolume(float volume)
612{
613    return setVolume(volume, volume);
614}
615
616status_t AudioTrack::setAuxEffectSendLevel(float level)
617{
618    // This duplicates a test by AudioTrack JNI, but that is not the only caller
619    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
620        return BAD_VALUE;
621    }
622
623    AutoMutex lock(mLock);
624    mSendLevel = level;
625    mProxy->setSendLevel(level);
626
627    return NO_ERROR;
628}
629
630void AudioTrack::getAuxEffectSendLevel(float* level) const
631{
632    if (level != NULL) {
633        *level = mSendLevel;
634    }
635}
636
637status_t AudioTrack::setSampleRate(uint32_t rate)
638{
639    if (mIsTimed || isOffloadedOrDirect()) {
640        return INVALID_OPERATION;
641    }
642
643    uint32_t afSamplingRate;
644    if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
645        return NO_INIT;
646    }
647    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
648    if (rate == 0 || rate > afSamplingRate*2 ) {
649        return BAD_VALUE;
650    }
651
652    AutoMutex lock(mLock);
653    mSampleRate = rate;
654    mProxy->setSampleRate(rate);
655
656    return NO_ERROR;
657}
658
659uint32_t AudioTrack::getSampleRate() const
660{
661    if (mIsTimed) {
662        return 0;
663    }
664
665    AutoMutex lock(mLock);
666
667    // sample rate can be updated during playback by the offloaded decoder so we need to
668    // query the HAL and update if needed.
669// FIXME use Proxy return channel to update the rate from server and avoid polling here
670    if (isOffloadedOrDirect_l()) {
671        if (mOutput != AUDIO_IO_HANDLE_NONE) {
672            uint32_t sampleRate = 0;
673            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
674            if (status == NO_ERROR) {
675                mSampleRate = sampleRate;
676            }
677        }
678    }
679    return mSampleRate;
680}
681
682status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
683{
684    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
685        return INVALID_OPERATION;
686    }
687
688    if (loopCount == 0) {
689        ;
690    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
691            loopEnd - loopStart >= MIN_LOOP) {
692        ;
693    } else {
694        return BAD_VALUE;
695    }
696
697    AutoMutex lock(mLock);
698    // See setPosition() regarding setting parameters such as loop points or position while active
699    if (mState == STATE_ACTIVE) {
700        return INVALID_OPERATION;
701    }
702    setLoop_l(loopStart, loopEnd, loopCount);
703    return NO_ERROR;
704}
705
706void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
707{
708    // FIXME If setting a loop also sets position to start of loop, then
709    //       this is correct.  Otherwise it should be removed.
710    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
711    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
712    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
713}
714
715status_t AudioTrack::setMarkerPosition(uint32_t marker)
716{
717    // The only purpose of setting marker position is to get a callback
718    if (mCbf == NULL || isOffloadedOrDirect()) {
719        return INVALID_OPERATION;
720    }
721
722    AutoMutex lock(mLock);
723    mMarkerPosition = marker;
724    mMarkerReached = false;
725
726    return NO_ERROR;
727}
728
729status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
730{
731    if (isOffloadedOrDirect()) {
732        return INVALID_OPERATION;
733    }
734    if (marker == NULL) {
735        return BAD_VALUE;
736    }
737
738    AutoMutex lock(mLock);
739    *marker = mMarkerPosition;
740
741    return NO_ERROR;
742}
743
744status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
745{
746    // The only purpose of setting position update period is to get a callback
747    if (mCbf == NULL || isOffloadedOrDirect()) {
748        return INVALID_OPERATION;
749    }
750
751    AutoMutex lock(mLock);
752    mNewPosition = mProxy->getPosition() + updatePeriod;
753    mUpdatePeriod = updatePeriod;
754
755    return NO_ERROR;
756}
757
758status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
759{
760    if (isOffloadedOrDirect()) {
761        return INVALID_OPERATION;
762    }
763    if (updatePeriod == NULL) {
764        return BAD_VALUE;
765    }
766
767    AutoMutex lock(mLock);
768    *updatePeriod = mUpdatePeriod;
769
770    return NO_ERROR;
771}
772
773status_t AudioTrack::setPosition(uint32_t position)
774{
775    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
776        return INVALID_OPERATION;
777    }
778    if (position > mFrameCount) {
779        return BAD_VALUE;
780    }
781
782    AutoMutex lock(mLock);
783    // Currently we require that the player is inactive before setting parameters such as position
784    // or loop points.  Otherwise, there could be a race condition: the application could read the
785    // current position, compute a new position or loop parameters, and then set that position or
786    // loop parameters but it would do the "wrong" thing since the position has continued to advance
787    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
788    // to specify how it wants to handle such scenarios.
789    if (mState == STATE_ACTIVE) {
790        return INVALID_OPERATION;
791    }
792    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
793    mLoopPeriod = 0;
794    // FIXME Check whether loops and setting position are incompatible in old code.
795    // If we use setLoop for both purposes we lose the capability to set the position while looping.
796    mStaticProxy->setLoop(position, mFrameCount, 0);
797
798    return NO_ERROR;
799}
800
801status_t AudioTrack::getPosition(uint32_t *position) const
802{
803    if (position == NULL) {
804        return BAD_VALUE;
805    }
806
807    AutoMutex lock(mLock);
808    if (isOffloadedOrDirect_l()) {
809        uint32_t dspFrames = 0;
810
811        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
812            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
813            *position = mPausedPosition;
814            return NO_ERROR;
815        }
816
817        if (mOutput != AUDIO_IO_HANDLE_NONE) {
818            uint32_t halFrames;
819            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
820        }
821        *position = dspFrames;
822    } else {
823        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
824        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
825                mProxy->getPosition();
826    }
827    return NO_ERROR;
828}
829
830status_t AudioTrack::getBufferPosition(uint32_t *position)
831{
832    if (mSharedBuffer == 0 || mIsTimed) {
833        return INVALID_OPERATION;
834    }
835    if (position == NULL) {
836        return BAD_VALUE;
837    }
838
839    AutoMutex lock(mLock);
840    *position = mStaticProxy->getBufferPosition();
841    return NO_ERROR;
842}
843
844status_t AudioTrack::reload()
845{
846    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
847        return INVALID_OPERATION;
848    }
849
850    AutoMutex lock(mLock);
851    // See setPosition() regarding setting parameters such as loop points or position while active
852    if (mState == STATE_ACTIVE) {
853        return INVALID_OPERATION;
854    }
855    mNewPosition = mUpdatePeriod;
856    mLoopPeriod = 0;
857    // FIXME The new code cannot reload while keeping a loop specified.
858    // Need to check how the old code handled this, and whether it's a significant change.
859    mStaticProxy->setLoop(0, mFrameCount, 0);
860    return NO_ERROR;
861}
862
863audio_io_handle_t AudioTrack::getOutput() const
864{
865    AutoMutex lock(mLock);
866    return mOutput;
867}
868
869status_t AudioTrack::attachAuxEffect(int effectId)
870{
871    AutoMutex lock(mLock);
872    status_t status = mAudioTrack->attachAuxEffect(effectId);
873    if (status == NO_ERROR) {
874        mAuxEffectId = effectId;
875    }
876    return status;
877}
878
879// -------------------------------------------------------------------------
880
881// must be called with mLock held
882status_t AudioTrack::createTrack_l(size_t epoch)
883{
884    status_t status;
885    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
886    if (audioFlinger == 0) {
887        ALOGE("Could not get audioflinger");
888        return NO_INIT;
889    }
890
891    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
892            mChannelMask, mFlags, mOffloadInfo);
893    if (output == AUDIO_IO_HANDLE_NONE) {
894        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
895              " channel mask %#x, flags %#x",
896              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
897        return BAD_VALUE;
898    }
899    {
900    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
901    // we must release it ourselves if anything goes wrong.
902
903    // Not all of these values are needed under all conditions, but it is easier to get them all
904
905    uint32_t afLatency;
906    status = AudioSystem::getLatency(output, &afLatency);
907    if (status != NO_ERROR) {
908        ALOGE("getLatency(%d) failed status %d", output, status);
909        goto release;
910    }
911
912    size_t afFrameCount;
913    status = AudioSystem::getFrameCount(output, &afFrameCount);
914    if (status != NO_ERROR) {
915        ALOGE("getFrameCount(output=%d) status %d", output, status);
916        goto release;
917    }
918
919    uint32_t afSampleRate;
920    status = AudioSystem::getSamplingRate(output, &afSampleRate);
921    if (status != NO_ERROR) {
922        ALOGE("getSamplingRate(output=%d) status %d", output, status);
923        goto release;
924    }
925
926    // Client decides whether the track is TIMED (see below), but can only express a preference
927    // for FAST.  Server will perform additional tests.
928    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
929            // either of these use cases:
930            // use case 1: shared buffer
931            (mSharedBuffer != 0) ||
932            // use case 2: callback transfer mode
933            (mTransfer == TRANSFER_CALLBACK)) &&
934            // matching sample rate
935            (mSampleRate == afSampleRate))) {
936        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
937        // once denied, do not request again if IAudioTrack is re-created
938        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
939    }
940    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
941
942    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
943    //  n = 1   fast track with single buffering; nBuffering is ignored
944    //  n = 2   fast track with double buffering
945    //  n = 2   normal track, no sample rate conversion
946    //  n = 3   normal track, with sample rate conversion
947    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
948    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
949    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
950
951    mNotificationFramesAct = mNotificationFramesReq;
952
953    size_t frameCount = mReqFrameCount;
954    if (!audio_is_linear_pcm(mFormat)) {
955
956        if (mSharedBuffer != 0) {
957            // Same comment as below about ignoring frameCount parameter for set()
958            frameCount = mSharedBuffer->size();
959        } else if (frameCount == 0) {
960            frameCount = afFrameCount;
961        }
962        if (mNotificationFramesAct != frameCount) {
963            mNotificationFramesAct = frameCount;
964        }
965    } else if (mSharedBuffer != 0) {
966
967        // Ensure that buffer alignment matches channel count
968        // 8-bit data in shared memory is not currently supported by AudioFlinger
969        size_t alignment = audio_bytes_per_sample(
970                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
971        if (alignment & 1) {
972            alignment = 1;
973        }
974        if (mChannelCount > 1) {
975            // More than 2 channels does not require stronger alignment than stereo
976            alignment <<= 1;
977        }
978        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
979            ALOGE("Invalid buffer alignment: address %p, channel count %u",
980                    mSharedBuffer->pointer(), mChannelCount);
981            status = BAD_VALUE;
982            goto release;
983        }
984
985        // When initializing a shared buffer AudioTrack via constructors,
986        // there's no frameCount parameter.
987        // But when initializing a shared buffer AudioTrack via set(),
988        // there _is_ a frameCount parameter.  We silently ignore it.
989        frameCount = mSharedBuffer->size() / mFrameSizeAF;
990
991    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
992
993        // FIXME move these calculations and associated checks to server
994
995        // Ensure that buffer depth covers at least audio hardware latency
996        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
997        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
998                afFrameCount, minBufCount, afSampleRate, afLatency);
999        if (minBufCount <= nBuffering) {
1000            minBufCount = nBuffering;
1001        }
1002
1003        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
1004        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1005                ", afLatency=%d",
1006                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1007
1008        if (frameCount == 0) {
1009            frameCount = minFrameCount;
1010        } else if (frameCount < minFrameCount) {
1011            // not ALOGW because it happens all the time when playing key clicks over A2DP
1012            ALOGV("Minimum buffer size corrected from %zu to %zu",
1013                     frameCount, minFrameCount);
1014            frameCount = minFrameCount;
1015        }
1016        // Make sure that application is notified with sufficient margin before underrun
1017        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1018            mNotificationFramesAct = frameCount/nBuffering;
1019        }
1020
1021    } else {
1022        // For fast tracks, the frame count calculations and checks are done by server
1023    }
1024
1025    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1026    if (mIsTimed) {
1027        trackFlags |= IAudioFlinger::TRACK_TIMED;
1028    }
1029
1030    pid_t tid = -1;
1031    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1032        trackFlags |= IAudioFlinger::TRACK_FAST;
1033        if (mAudioTrackThread != 0) {
1034            tid = mAudioTrackThread->getTid();
1035        }
1036    }
1037
1038    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1039        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1040    }
1041
1042    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1043        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1044    }
1045
1046    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1047                                // but we will still need the original value also
1048    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1049                                                      mSampleRate,
1050                                                      // AudioFlinger only sees 16-bit PCM
1051                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1052                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1053                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1054                                                      mChannelMask,
1055                                                      &temp,
1056                                                      &trackFlags,
1057                                                      mSharedBuffer,
1058                                                      output,
1059                                                      tid,
1060                                                      &mSessionId,
1061                                                      mClientUid,
1062                                                      &status);
1063
1064    if (status != NO_ERROR) {
1065        ALOGE("AudioFlinger could not create track, status: %d", status);
1066        goto release;
1067    }
1068    ALOG_ASSERT(track != 0);
1069
1070    // AudioFlinger now owns the reference to the I/O handle,
1071    // so we are no longer responsible for releasing it.
1072
1073    sp<IMemory> iMem = track->getCblk();
1074    if (iMem == 0) {
1075        ALOGE("Could not get control block");
1076        return NO_INIT;
1077    }
1078    void *iMemPointer = iMem->pointer();
1079    if (iMemPointer == NULL) {
1080        ALOGE("Could not get control block pointer");
1081        return NO_INIT;
1082    }
1083    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1084    if (mAudioTrack != 0) {
1085        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1086        mDeathNotifier.clear();
1087    }
1088    mAudioTrack = track;
1089    mCblkMemory = iMem;
1090    IPCThreadState::self()->flushCommands();
1091
1092    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1093    mCblk = cblk;
1094    // note that temp is the (possibly revised) value of frameCount
1095    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1096        // In current design, AudioTrack client checks and ensures frame count validity before
1097        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1098        // for fast track as it uses a special method of assigning frame count.
1099        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1100    }
1101    frameCount = temp;
1102
1103    mAwaitBoost = false;
1104    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1105        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1106            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1107            mAwaitBoost = true;
1108            if (mSharedBuffer == 0) {
1109                // Theoretically double-buffering is not required for fast tracks,
1110                // due to tighter scheduling.  But in practice, to accommodate kernels with
1111                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1112                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1113                    mNotificationFramesAct = frameCount/nBuffering;
1114                }
1115            }
1116        } else {
1117            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1118            // once denied, do not request again if IAudioTrack is re-created
1119            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1120            if (mSharedBuffer == 0) {
1121                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1122                    mNotificationFramesAct = frameCount/nBuffering;
1123                }
1124            }
1125        }
1126    }
1127    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1128        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1129            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1130        } else {
1131            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1132            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1133            // FIXME This is a warning, not an error, so don't return error status
1134            //return NO_INIT;
1135        }
1136    }
1137    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1138        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1139            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1140        } else {
1141            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1142            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1143            // FIXME This is a warning, not an error, so don't return error status
1144            //return NO_INIT;
1145        }
1146    }
1147
1148    // We retain a copy of the I/O handle, but don't own the reference
1149    mOutput = output;
1150    mRefreshRemaining = true;
1151
1152    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1153    // is the value of pointer() for the shared buffer, otherwise buffers points
1154    // immediately after the control block.  This address is for the mapping within client
1155    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1156    void* buffers;
1157    if (mSharedBuffer == 0) {
1158        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1159    } else {
1160        buffers = mSharedBuffer->pointer();
1161    }
1162
1163    mAudioTrack->attachAuxEffect(mAuxEffectId);
1164    // FIXME don't believe this lie
1165    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1166
1167    mFrameCount = frameCount;
1168    // If IAudioTrack is re-created, don't let the requested frameCount
1169    // decrease.  This can confuse clients that cache frameCount().
1170    if (frameCount > mReqFrameCount) {
1171        mReqFrameCount = frameCount;
1172    }
1173
1174    // update proxy
1175    if (mSharedBuffer == 0) {
1176        mStaticProxy.clear();
1177        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1178    } else {
1179        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1180        mProxy = mStaticProxy;
1181    }
1182    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1183    mProxy->setSendLevel(mSendLevel);
1184    mProxy->setSampleRate(mSampleRate);
1185    mProxy->setEpoch(epoch);
1186    mProxy->setMinimum(mNotificationFramesAct);
1187
1188    mDeathNotifier = new DeathNotifier(this);
1189    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1190
1191    return NO_ERROR;
1192    }
1193
1194release:
1195    AudioSystem::releaseOutput(output);
1196    if (status == NO_ERROR) {
1197        status = NO_INIT;
1198    }
1199    return status;
1200}
1201
1202status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1203{
1204    if (audioBuffer == NULL) {
1205        return BAD_VALUE;
1206    }
1207    if (mTransfer != TRANSFER_OBTAIN) {
1208        audioBuffer->frameCount = 0;
1209        audioBuffer->size = 0;
1210        audioBuffer->raw = NULL;
1211        return INVALID_OPERATION;
1212    }
1213
1214    const struct timespec *requested;
1215    struct timespec timeout;
1216    if (waitCount == -1) {
1217        requested = &ClientProxy::kForever;
1218    } else if (waitCount == 0) {
1219        requested = &ClientProxy::kNonBlocking;
1220    } else if (waitCount > 0) {
1221        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1222        timeout.tv_sec = ms / 1000;
1223        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1224        requested = &timeout;
1225    } else {
1226        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1227        requested = NULL;
1228    }
1229    return obtainBuffer(audioBuffer, requested);
1230}
1231
1232status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1233        struct timespec *elapsed, size_t *nonContig)
1234{
1235    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1236    uint32_t oldSequence = 0;
1237    uint32_t newSequence;
1238
1239    Proxy::Buffer buffer;
1240    status_t status = NO_ERROR;
1241
1242    static const int32_t kMaxTries = 5;
1243    int32_t tryCounter = kMaxTries;
1244
1245    do {
1246        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1247        // keep them from going away if another thread re-creates the track during obtainBuffer()
1248        sp<AudioTrackClientProxy> proxy;
1249        sp<IMemory> iMem;
1250
1251        {   // start of lock scope
1252            AutoMutex lock(mLock);
1253
1254            newSequence = mSequence;
1255            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1256            if (status == DEAD_OBJECT) {
1257                // re-create track, unless someone else has already done so
1258                if (newSequence == oldSequence) {
1259                    status = restoreTrack_l("obtainBuffer");
1260                    if (status != NO_ERROR) {
1261                        buffer.mFrameCount = 0;
1262                        buffer.mRaw = NULL;
1263                        buffer.mNonContig = 0;
1264                        break;
1265                    }
1266                }
1267            }
1268            oldSequence = newSequence;
1269
1270            // Keep the extra references
1271            proxy = mProxy;
1272            iMem = mCblkMemory;
1273
1274            if (mState == STATE_STOPPING) {
1275                status = -EINTR;
1276                buffer.mFrameCount = 0;
1277                buffer.mRaw = NULL;
1278                buffer.mNonContig = 0;
1279                break;
1280            }
1281
1282            // Non-blocking if track is stopped or paused
1283            if (mState != STATE_ACTIVE) {
1284                requested = &ClientProxy::kNonBlocking;
1285            }
1286
1287        }   // end of lock scope
1288
1289        buffer.mFrameCount = audioBuffer->frameCount;
1290        // FIXME starts the requested timeout and elapsed over from scratch
1291        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1292
1293    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1294
1295    audioBuffer->frameCount = buffer.mFrameCount;
1296    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1297    audioBuffer->raw = buffer.mRaw;
1298    if (nonContig != NULL) {
1299        *nonContig = buffer.mNonContig;
1300    }
1301    return status;
1302}
1303
1304void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1305{
1306    if (mTransfer == TRANSFER_SHARED) {
1307        return;
1308    }
1309
1310    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1311    if (stepCount == 0) {
1312        return;
1313    }
1314
1315    Proxy::Buffer buffer;
1316    buffer.mFrameCount = stepCount;
1317    buffer.mRaw = audioBuffer->raw;
1318
1319    AutoMutex lock(mLock);
1320    mInUnderrun = false;
1321    mProxy->releaseBuffer(&buffer);
1322
1323    // restart track if it was disabled by audioflinger due to previous underrun
1324    if (mState == STATE_ACTIVE) {
1325        audio_track_cblk_t* cblk = mCblk;
1326        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1327            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1328            // FIXME ignoring status
1329            mAudioTrack->start();
1330        }
1331    }
1332}
1333
1334// -------------------------------------------------------------------------
1335
1336ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1337{
1338    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1339        return INVALID_OPERATION;
1340    }
1341
1342    if (isDirect()) {
1343        AutoMutex lock(mLock);
1344        int32_t flags = android_atomic_and(
1345                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1346                            &mCblk->mFlags);
1347        if (flags & CBLK_INVALID) {
1348            return DEAD_OBJECT;
1349        }
1350    }
1351
1352    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1353        // Sanity-check: user is most-likely passing an error code, and it would
1354        // make the return value ambiguous (actualSize vs error).
1355        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1356        return BAD_VALUE;
1357    }
1358
1359    size_t written = 0;
1360    Buffer audioBuffer;
1361
1362    while (userSize >= mFrameSize) {
1363        audioBuffer.frameCount = userSize / mFrameSize;
1364
1365        status_t err = obtainBuffer(&audioBuffer,
1366                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1367        if (err < 0) {
1368            if (written > 0) {
1369                break;
1370            }
1371            return ssize_t(err);
1372        }
1373
1374        size_t toWrite;
1375        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1376            // Divide capacity by 2 to take expansion into account
1377            toWrite = audioBuffer.size >> 1;
1378            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1379        } else {
1380            toWrite = audioBuffer.size;
1381            memcpy(audioBuffer.i8, buffer, toWrite);
1382        }
1383        buffer = ((const char *) buffer) + toWrite;
1384        userSize -= toWrite;
1385        written += toWrite;
1386
1387        releaseBuffer(&audioBuffer);
1388    }
1389
1390    return written;
1391}
1392
1393// -------------------------------------------------------------------------
1394
1395TimedAudioTrack::TimedAudioTrack() {
1396    mIsTimed = true;
1397}
1398
1399status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1400{
1401    AutoMutex lock(mLock);
1402    status_t result = UNKNOWN_ERROR;
1403
1404#if 1
1405    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1406    // while we are accessing the cblk
1407    sp<IAudioTrack> audioTrack = mAudioTrack;
1408    sp<IMemory> iMem = mCblkMemory;
1409#endif
1410
1411    // If the track is not invalid already, try to allocate a buffer.  alloc
1412    // fails indicating that the server is dead, flag the track as invalid so
1413    // we can attempt to restore in just a bit.
1414    audio_track_cblk_t* cblk = mCblk;
1415    if (!(cblk->mFlags & CBLK_INVALID)) {
1416        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1417        if (result == DEAD_OBJECT) {
1418            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1419        }
1420    }
1421
1422    // If the track is invalid at this point, attempt to restore it. and try the
1423    // allocation one more time.
1424    if (cblk->mFlags & CBLK_INVALID) {
1425        result = restoreTrack_l("allocateTimedBuffer");
1426
1427        if (result == NO_ERROR) {
1428            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1429        }
1430    }
1431
1432    return result;
1433}
1434
1435status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1436                                           int64_t pts)
1437{
1438    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1439    {
1440        AutoMutex lock(mLock);
1441        audio_track_cblk_t* cblk = mCblk;
1442        // restart track if it was disabled by audioflinger due to previous underrun
1443        if (buffer->size() != 0 && status == NO_ERROR &&
1444                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1445            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1446            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1447            // FIXME ignoring status
1448            mAudioTrack->start();
1449        }
1450    }
1451    return status;
1452}
1453
1454status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1455                                                TargetTimeline target)
1456{
1457    return mAudioTrack->setMediaTimeTransform(xform, target);
1458}
1459
1460// -------------------------------------------------------------------------
1461
1462nsecs_t AudioTrack::processAudioBuffer()
1463{
1464    // Currently the AudioTrack thread is not created if there are no callbacks.
1465    // Would it ever make sense to run the thread, even without callbacks?
1466    // If so, then replace this by checks at each use for mCbf != NULL.
1467    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1468
1469    mLock.lock();
1470    if (mAwaitBoost) {
1471        mAwaitBoost = false;
1472        mLock.unlock();
1473        static const int32_t kMaxTries = 5;
1474        int32_t tryCounter = kMaxTries;
1475        uint32_t pollUs = 10000;
1476        do {
1477            int policy = sched_getscheduler(0);
1478            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1479                break;
1480            }
1481            usleep(pollUs);
1482            pollUs <<= 1;
1483        } while (tryCounter-- > 0);
1484        if (tryCounter < 0) {
1485            ALOGE("did not receive expected priority boost on time");
1486        }
1487        // Run again immediately
1488        return 0;
1489    }
1490
1491    // Can only reference mCblk while locked
1492    int32_t flags = android_atomic_and(
1493        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1494
1495    // Check for track invalidation
1496    if (flags & CBLK_INVALID) {
1497        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1498        // AudioSystem cache. We should not exit here but after calling the callback so
1499        // that the upper layers can recreate the track
1500        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1501            status_t status = restoreTrack_l("processAudioBuffer");
1502            mLock.unlock();
1503            // Run again immediately, but with a new IAudioTrack
1504            return 0;
1505        }
1506    }
1507
1508    bool waitStreamEnd = mState == STATE_STOPPING;
1509    bool active = mState == STATE_ACTIVE;
1510
1511    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1512    bool newUnderrun = false;
1513    if (flags & CBLK_UNDERRUN) {
1514#if 0
1515        // Currently in shared buffer mode, when the server reaches the end of buffer,
1516        // the track stays active in continuous underrun state.  It's up to the application
1517        // to pause or stop the track, or set the position to a new offset within buffer.
1518        // This was some experimental code to auto-pause on underrun.   Keeping it here
1519        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1520        if (mTransfer == TRANSFER_SHARED) {
1521            mState = STATE_PAUSED;
1522            active = false;
1523        }
1524#endif
1525        if (!mInUnderrun) {
1526            mInUnderrun = true;
1527            newUnderrun = true;
1528        }
1529    }
1530
1531    // Get current position of server
1532    size_t position = mProxy->getPosition();
1533
1534    // Manage marker callback
1535    bool markerReached = false;
1536    size_t markerPosition = mMarkerPosition;
1537    // FIXME fails for wraparound, need 64 bits
1538    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1539        mMarkerReached = markerReached = true;
1540    }
1541
1542    // Determine number of new position callback(s) that will be needed, while locked
1543    size_t newPosCount = 0;
1544    size_t newPosition = mNewPosition;
1545    size_t updatePeriod = mUpdatePeriod;
1546    // FIXME fails for wraparound, need 64 bits
1547    if (updatePeriod > 0 && position >= newPosition) {
1548        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1549        mNewPosition += updatePeriod * newPosCount;
1550    }
1551
1552    // Cache other fields that will be needed soon
1553    uint32_t loopPeriod = mLoopPeriod;
1554    uint32_t sampleRate = mSampleRate;
1555    uint32_t notificationFrames = mNotificationFramesAct;
1556    if (mRefreshRemaining) {
1557        mRefreshRemaining = false;
1558        mRemainingFrames = notificationFrames;
1559        mRetryOnPartialBuffer = false;
1560    }
1561    size_t misalignment = mProxy->getMisalignment();
1562    uint32_t sequence = mSequence;
1563    sp<AudioTrackClientProxy> proxy = mProxy;
1564
1565    // These fields don't need to be cached, because they are assigned only by set():
1566    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1567    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1568
1569    mLock.unlock();
1570
1571    if (waitStreamEnd) {
1572        struct timespec timeout;
1573        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1574        timeout.tv_nsec = 0;
1575
1576        status_t status = proxy->waitStreamEndDone(&timeout);
1577        switch (status) {
1578        case NO_ERROR:
1579        case DEAD_OBJECT:
1580        case TIMED_OUT:
1581            mCbf(EVENT_STREAM_END, mUserData, NULL);
1582            {
1583                AutoMutex lock(mLock);
1584                // The previously assigned value of waitStreamEnd is no longer valid,
1585                // since the mutex has been unlocked and either the callback handler
1586                // or another thread could have re-started the AudioTrack during that time.
1587                waitStreamEnd = mState == STATE_STOPPING;
1588                if (waitStreamEnd) {
1589                    mState = STATE_STOPPED;
1590                }
1591            }
1592            if (waitStreamEnd && status != DEAD_OBJECT) {
1593               return NS_INACTIVE;
1594            }
1595            break;
1596        }
1597        return 0;
1598    }
1599
1600    // perform callbacks while unlocked
1601    if (newUnderrun) {
1602        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1603    }
1604    // FIXME we will miss loops if loop cycle was signaled several times since last call
1605    //       to processAudioBuffer()
1606    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1607        mCbf(EVENT_LOOP_END, mUserData, NULL);
1608    }
1609    if (flags & CBLK_BUFFER_END) {
1610        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1611    }
1612    if (markerReached) {
1613        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1614    }
1615    while (newPosCount > 0) {
1616        size_t temp = newPosition;
1617        mCbf(EVENT_NEW_POS, mUserData, &temp);
1618        newPosition += updatePeriod;
1619        newPosCount--;
1620    }
1621
1622    if (mObservedSequence != sequence) {
1623        mObservedSequence = sequence;
1624        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1625        // for offloaded tracks, just wait for the upper layers to recreate the track
1626        if (isOffloadedOrDirect()) {
1627            return NS_INACTIVE;
1628        }
1629    }
1630
1631    // if inactive, then don't run me again until re-started
1632    if (!active) {
1633        return NS_INACTIVE;
1634    }
1635
1636    // Compute the estimated time until the next timed event (position, markers, loops)
1637    // FIXME only for non-compressed audio
1638    uint32_t minFrames = ~0;
1639    if (!markerReached && position < markerPosition) {
1640        minFrames = markerPosition - position;
1641    }
1642    if (loopPeriod > 0 && loopPeriod < minFrames) {
1643        minFrames = loopPeriod;
1644    }
1645    if (updatePeriod > 0 && updatePeriod < minFrames) {
1646        minFrames = updatePeriod;
1647    }
1648
1649    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1650    static const uint32_t kPoll = 0;
1651    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1652        minFrames = kPoll * notificationFrames;
1653    }
1654
1655    // Convert frame units to time units
1656    nsecs_t ns = NS_WHENEVER;
1657    if (minFrames != (uint32_t) ~0) {
1658        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1659        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1660        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1661    }
1662
1663    // If not supplying data by EVENT_MORE_DATA, then we're done
1664    if (mTransfer != TRANSFER_CALLBACK) {
1665        return ns;
1666    }
1667
1668    struct timespec timeout;
1669    const struct timespec *requested = &ClientProxy::kForever;
1670    if (ns != NS_WHENEVER) {
1671        timeout.tv_sec = ns / 1000000000LL;
1672        timeout.tv_nsec = ns % 1000000000LL;
1673        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1674        requested = &timeout;
1675    }
1676
1677    while (mRemainingFrames > 0) {
1678
1679        Buffer audioBuffer;
1680        audioBuffer.frameCount = mRemainingFrames;
1681        size_t nonContig;
1682        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1683        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1684                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1685        requested = &ClientProxy::kNonBlocking;
1686        size_t avail = audioBuffer.frameCount + nonContig;
1687        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1688                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1689        if (err != NO_ERROR) {
1690            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1691                    (isOffloaded() && (err == DEAD_OBJECT))) {
1692                return 0;
1693            }
1694            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1695            return NS_NEVER;
1696        }
1697
1698        if (mRetryOnPartialBuffer && !isOffloaded()) {
1699            mRetryOnPartialBuffer = false;
1700            if (avail < mRemainingFrames) {
1701                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1702                if (ns < 0 || myns < ns) {
1703                    ns = myns;
1704                }
1705                return ns;
1706            }
1707        }
1708
1709        // Divide buffer size by 2 to take into account the expansion
1710        // due to 8 to 16 bit conversion: the callback must fill only half
1711        // of the destination buffer
1712        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1713            audioBuffer.size >>= 1;
1714        }
1715
1716        size_t reqSize = audioBuffer.size;
1717        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1718        size_t writtenSize = audioBuffer.size;
1719
1720        // Sanity check on returned size
1721        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1722            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1723                    reqSize, ssize_t(writtenSize));
1724            return NS_NEVER;
1725        }
1726
1727        if (writtenSize == 0) {
1728            // The callback is done filling buffers
1729            // Keep this thread going to handle timed events and
1730            // still try to get more data in intervals of WAIT_PERIOD_MS
1731            // but don't just loop and block the CPU, so wait
1732            return WAIT_PERIOD_MS * 1000000LL;
1733        }
1734
1735        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1736            // 8 to 16 bit conversion, note that source and destination are the same address
1737            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1738            audioBuffer.size <<= 1;
1739        }
1740
1741        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1742        audioBuffer.frameCount = releasedFrames;
1743        mRemainingFrames -= releasedFrames;
1744        if (misalignment >= releasedFrames) {
1745            misalignment -= releasedFrames;
1746        } else {
1747            misalignment = 0;
1748        }
1749
1750        releaseBuffer(&audioBuffer);
1751
1752        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1753        // if callback doesn't like to accept the full chunk
1754        if (writtenSize < reqSize) {
1755            continue;
1756        }
1757
1758        // There could be enough non-contiguous frames available to satisfy the remaining request
1759        if (mRemainingFrames <= nonContig) {
1760            continue;
1761        }
1762
1763#if 0
1764        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1765        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1766        // that total to a sum == notificationFrames.
1767        if (0 < misalignment && misalignment <= mRemainingFrames) {
1768            mRemainingFrames = misalignment;
1769            return (mRemainingFrames * 1100000000LL) / sampleRate;
1770        }
1771#endif
1772
1773    }
1774    mRemainingFrames = notificationFrames;
1775    mRetryOnPartialBuffer = true;
1776
1777    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1778    return 0;
1779}
1780
1781status_t AudioTrack::restoreTrack_l(const char *from)
1782{
1783    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1784          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1785    ++mSequence;
1786    status_t result;
1787
1788    // refresh the audio configuration cache in this process to make sure we get new
1789    // output parameters in createTrack_l()
1790    AudioSystem::clearAudioConfigCache();
1791
1792    if (isOffloadedOrDirect_l()) {
1793        // FIXME re-creation of offloaded tracks is not yet implemented
1794        return DEAD_OBJECT;
1795    }
1796
1797    // if the new IAudioTrack is created, createTrack_l() will modify the
1798    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1799    // It will also delete the strong references on previous IAudioTrack and IMemory
1800
1801    // take the frames that will be lost by track recreation into account in saved position
1802    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1803    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1804    result = createTrack_l(position /*epoch*/);
1805
1806    if (result == NO_ERROR) {
1807        // continue playback from last known position, but
1808        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1809        if (mStaticProxy != NULL) {
1810            mLoopPeriod = 0;
1811            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1812        }
1813        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1814        //       track destruction have been played? This is critical for SoundPool implementation
1815        //       This must be broken, and needs to be tested/debugged.
1816#if 0
1817        // restore write index and set other indexes to reflect empty buffer status
1818        if (!strcmp(from, "start")) {
1819            // Make sure that a client relying on callback events indicating underrun or
1820            // the actual amount of audio frames played (e.g SoundPool) receives them.
1821            if (mSharedBuffer == 0) {
1822                // restart playback even if buffer is not completely filled.
1823                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1824            }
1825        }
1826#endif
1827        if (mState == STATE_ACTIVE) {
1828            result = mAudioTrack->start();
1829        }
1830    }
1831    if (result != NO_ERROR) {
1832        ALOGW("restoreTrack_l() failed status %d", result);
1833        mState = STATE_STOPPED;
1834    }
1835
1836    return result;
1837}
1838
1839status_t AudioTrack::setParameters(const String8& keyValuePairs)
1840{
1841    AutoMutex lock(mLock);
1842    return mAudioTrack->setParameters(keyValuePairs);
1843}
1844
1845status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1846{
1847    AutoMutex lock(mLock);
1848    // FIXME not implemented for fast tracks; should use proxy and SSQ
1849    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1850        return INVALID_OPERATION;
1851    }
1852    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1853        return INVALID_OPERATION;
1854    }
1855    status_t status = mAudioTrack->getTimestamp(timestamp);
1856    if (status == NO_ERROR) {
1857        timestamp.mPosition += mProxy->getEpoch();
1858    }
1859    return status;
1860}
1861
1862String8 AudioTrack::getParameters(const String8& keys)
1863{
1864    audio_io_handle_t output = getOutput();
1865    if (output != AUDIO_IO_HANDLE_NONE) {
1866        return AudioSystem::getParameters(output, keys);
1867    } else {
1868        return String8::empty();
1869    }
1870}
1871
1872bool AudioTrack::isOffloaded() const
1873{
1874    AutoMutex lock(mLock);
1875    return isOffloaded_l();
1876}
1877
1878bool AudioTrack::isDirect() const
1879{
1880    AutoMutex lock(mLock);
1881    return isDirect_l();
1882}
1883
1884bool AudioTrack::isOffloadedOrDirect() const
1885{
1886    AutoMutex lock(mLock);
1887    return isOffloadedOrDirect_l();
1888}
1889
1890
1891status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1892{
1893
1894    const size_t SIZE = 256;
1895    char buffer[SIZE];
1896    String8 result;
1897
1898    result.append(" AudioTrack::dump\n");
1899    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1900            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1901    result.append(buffer);
1902    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1903            mChannelCount, mFrameCount);
1904    result.append(buffer);
1905    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1906    result.append(buffer);
1907    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1908    result.append(buffer);
1909    ::write(fd, result.string(), result.size());
1910    return NO_ERROR;
1911}
1912
1913uint32_t AudioTrack::getUnderrunFrames() const
1914{
1915    AutoMutex lock(mLock);
1916    return mProxy->getUnderrunFrames();
1917}
1918
1919void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
1920    mAttributes.flags = 0x0;
1921
1922    switch(streamType) {
1923    case AUDIO_STREAM_DEFAULT:
1924    case AUDIO_STREAM_MUSIC:
1925        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1926        mAttributes.usage = AUDIO_USAGE_MEDIA;
1927        break;
1928    case AUDIO_STREAM_VOICE_CALL:
1929        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1930        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1931        break;
1932    case AUDIO_STREAM_ENFORCED_AUDIBLE:
1933        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
1934        // intended fall through, attributes in common with STREAM_SYSTEM
1935    case AUDIO_STREAM_SYSTEM:
1936        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1937        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1938        break;
1939    case AUDIO_STREAM_RING:
1940        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1941        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1942        break;
1943    case AUDIO_STREAM_ALARM:
1944        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1945        mAttributes.usage = AUDIO_USAGE_ALARM;
1946        break;
1947    case AUDIO_STREAM_NOTIFICATION:
1948        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1949        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
1950        break;
1951    case AUDIO_STREAM_BLUETOOTH_SCO:
1952        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1953        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1954        mAttributes.flags |= AUDIO_FLAG_SCO;
1955        break;
1956    case AUDIO_STREAM_DTMF:
1957        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1958        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1959        break;
1960    case AUDIO_STREAM_TTS:
1961        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1962        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1963        break;
1964    default:
1965        ALOGE("invalid stream type %d when converting to attributes", streamType);
1966    }
1967}
1968
1969void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
1970    // flags to stream type mapping
1971    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
1972        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
1973        return;
1974    }
1975    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
1976        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
1977        return;
1978    }
1979
1980    // usage to stream type mapping
1981    switch (aa.usage) {
1982    case AUDIO_USAGE_MEDIA:
1983    case AUDIO_USAGE_GAME:
1984    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
1985    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
1986        mStreamType = AUDIO_STREAM_MUSIC;
1987        return;
1988    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
1989        mStreamType = AUDIO_STREAM_SYSTEM;
1990        return;
1991    case AUDIO_USAGE_VOICE_COMMUNICATION:
1992        mStreamType = AUDIO_STREAM_VOICE_CALL;
1993        return;
1994
1995    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
1996        mStreamType = AUDIO_STREAM_DTMF;
1997        return;
1998
1999    case AUDIO_USAGE_ALARM:
2000        mStreamType = AUDIO_STREAM_ALARM;
2001        return;
2002    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2003        mStreamType = AUDIO_STREAM_RING;
2004        return;
2005
2006    case AUDIO_USAGE_NOTIFICATION:
2007    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2008    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2009    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2010    case AUDIO_USAGE_NOTIFICATION_EVENT:
2011        mStreamType = AUDIO_STREAM_NOTIFICATION;
2012        return;
2013
2014    case AUDIO_USAGE_UNKNOWN:
2015    default:
2016        mStreamType = AUDIO_STREAM_MUSIC;
2017    }
2018}
2019
2020bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
2021    // has flags that map to a strategy?
2022    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
2023        return true;
2024    }
2025
2026    // has known usage?
2027    switch (paa->usage) {
2028    case AUDIO_USAGE_UNKNOWN:
2029    case AUDIO_USAGE_MEDIA:
2030    case AUDIO_USAGE_VOICE_COMMUNICATION:
2031    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2032    case AUDIO_USAGE_ALARM:
2033    case AUDIO_USAGE_NOTIFICATION:
2034    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2035    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2036    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2037    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2038    case AUDIO_USAGE_NOTIFICATION_EVENT:
2039    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2040    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2041    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2042    case AUDIO_USAGE_GAME:
2043        break;
2044    default:
2045        return false;
2046    }
2047    return true;
2048}
2049// =========================================================================
2050
2051void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2052{
2053    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2054    if (audioTrack != 0) {
2055        AutoMutex lock(audioTrack->mLock);
2056        audioTrack->mProxy->binderDied();
2057    }
2058}
2059
2060// =========================================================================
2061
2062AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2063    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2064      mIgnoreNextPausedInt(false)
2065{
2066}
2067
2068AudioTrack::AudioTrackThread::~AudioTrackThread()
2069{
2070}
2071
2072bool AudioTrack::AudioTrackThread::threadLoop()
2073{
2074    {
2075        AutoMutex _l(mMyLock);
2076        if (mPaused) {
2077            mMyCond.wait(mMyLock);
2078            // caller will check for exitPending()
2079            return true;
2080        }
2081        if (mIgnoreNextPausedInt) {
2082            mIgnoreNextPausedInt = false;
2083            mPausedInt = false;
2084        }
2085        if (mPausedInt) {
2086            if (mPausedNs > 0) {
2087                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2088            } else {
2089                mMyCond.wait(mMyLock);
2090            }
2091            mPausedInt = false;
2092            return true;
2093        }
2094    }
2095    nsecs_t ns = mReceiver.processAudioBuffer();
2096    switch (ns) {
2097    case 0:
2098        return true;
2099    case NS_INACTIVE:
2100        pauseInternal();
2101        return true;
2102    case NS_NEVER:
2103        return false;
2104    case NS_WHENEVER:
2105        // FIXME increase poll interval, or make event-driven
2106        ns = 1000000000LL;
2107        // fall through
2108    default:
2109        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2110        pauseInternal(ns);
2111        return true;
2112    }
2113}
2114
2115void AudioTrack::AudioTrackThread::requestExit()
2116{
2117    // must be in this order to avoid a race condition
2118    Thread::requestExit();
2119    resume();
2120}
2121
2122void AudioTrack::AudioTrackThread::pause()
2123{
2124    AutoMutex _l(mMyLock);
2125    mPaused = true;
2126}
2127
2128void AudioTrack::AudioTrackThread::resume()
2129{
2130    AutoMutex _l(mMyLock);
2131    mIgnoreNextPausedInt = true;
2132    if (mPaused || mPausedInt) {
2133        mPaused = false;
2134        mPausedInt = false;
2135        mMyCond.signal();
2136    }
2137}
2138
2139void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2140{
2141    AutoMutex _l(mMyLock);
2142    mPausedInt = true;
2143    mPausedNs = ns;
2144}
2145
2146}; // namespace android
2147