AudioTrack.cpp revision ab5cdbaf65ca509681d2726aacdf3ac8bfb6b3fa
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <math.h>
23#include <sys/resource.h>
24#include <audio_utils/primitives.h>
25#include <binder/IPCThreadState.h>
26#include <media/AudioTrack.h>
27#include <utils/Log.h>
28#include <private/media/AudioTrackShared.h>
29#include <media/IAudioFlinger.h>
30
31#define WAIT_PERIOD_MS                  10
32#define WAIT_STREAM_END_TIMEOUT_SEC     120
33
34
35namespace android {
36// ---------------------------------------------------------------------------
37
38// static
39status_t AudioTrack::getMinFrameCount(
40        size_t* frameCount,
41        audio_stream_type_t streamType,
42        uint32_t sampleRate)
43{
44    if (frameCount == NULL) {
45        return BAD_VALUE;
46    }
47
48    // FIXME merge with similar code in createTrack_l(), except we're missing
49    //       some information here that is available in createTrack_l():
50    //          audio_io_handle_t output
51    //          audio_format_t format
52    //          audio_channel_mask_t channelMask
53    //          audio_output_flags_t flags
54    uint32_t afSampleRate;
55    status_t status;
56    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
57    if (status != NO_ERROR) {
58        ALOGE("Unable to query output sample rate for stream type %d; status %d",
59                streamType, status);
60        return status;
61    }
62    size_t afFrameCount;
63    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
64    if (status != NO_ERROR) {
65        ALOGE("Unable to query output frame count for stream type %d; status %d",
66                streamType, status);
67        return status;
68    }
69    uint32_t afLatency;
70    status = AudioSystem::getOutputLatency(&afLatency, streamType);
71    if (status != NO_ERROR) {
72        ALOGE("Unable to query output latency for stream type %d; status %d",
73                streamType, status);
74        return status;
75    }
76
77    // Ensure that buffer depth covers at least audio hardware latency
78    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
79    if (minBufCount < 2) {
80        minBufCount = 2;
81    }
82
83    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
84            afFrameCount * minBufCount * sampleRate / afSampleRate;
85    // The formula above should always produce a non-zero value, but return an error
86    // in the unlikely event that it does not, as that's part of the API contract.
87    if (*frameCount == 0) {
88        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
89                streamType, sampleRate);
90        return BAD_VALUE;
91    }
92    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
93            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
94    return NO_ERROR;
95}
96
97// ---------------------------------------------------------------------------
98
99AudioTrack::AudioTrack()
100    : mStatus(NO_INIT),
101      mIsTimed(false),
102      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
103      mPreviousSchedulingGroup(SP_DEFAULT),
104      mPausedPosition(0)
105{
106    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
107    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
108    mAttributes.flags = 0x0;
109    strcpy(mAttributes.tags, "");
110}
111
112AudioTrack::AudioTrack(
113        audio_stream_type_t streamType,
114        uint32_t sampleRate,
115        audio_format_t format,
116        audio_channel_mask_t channelMask,
117        size_t frameCount,
118        audio_output_flags_t flags,
119        callback_t cbf,
120        void* user,
121        uint32_t notificationFrames,
122        int sessionId,
123        transfer_type transferType,
124        const audio_offload_info_t *offloadInfo,
125        int uid,
126        pid_t pid)
127    : mStatus(NO_INIT),
128      mIsTimed(false),
129      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
130      mPreviousSchedulingGroup(SP_DEFAULT),
131      mPausedPosition(0)
132{
133    mStatus = set(streamType, sampleRate, format, channelMask,
134            frameCount, flags, cbf, user, notificationFrames,
135            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
136            offloadInfo, uid, pid, NULL /*no audio attributes*/);
137}
138
139AudioTrack::AudioTrack(
140        audio_stream_type_t streamType,
141        uint32_t sampleRate,
142        audio_format_t format,
143        audio_channel_mask_t channelMask,
144        const sp<IMemory>& sharedBuffer,
145        audio_output_flags_t flags,
146        callback_t cbf,
147        void* user,
148        uint32_t notificationFrames,
149        int sessionId,
150        transfer_type transferType,
151        const audio_offload_info_t *offloadInfo,
152        int uid,
153        pid_t pid)
154    : mStatus(NO_INIT),
155      mIsTimed(false),
156      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
157      mPreviousSchedulingGroup(SP_DEFAULT),
158      mPausedPosition(0)
159{
160    mStatus = set(streamType, sampleRate, format, channelMask,
161            0 /*frameCount*/, flags, cbf, user, notificationFrames,
162            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
163            uid, pid, NULL /*no audio attributes*/);
164}
165
166AudioTrack::~AudioTrack()
167{
168    if (mStatus == NO_ERROR) {
169        // Make sure that callback function exits in the case where
170        // it is looping on buffer full condition in obtainBuffer().
171        // Otherwise the callback thread will never exit.
172        stop();
173        if (mAudioTrackThread != 0) {
174            mProxy->interrupt();
175            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
176            mAudioTrackThread->requestExitAndWait();
177            mAudioTrackThread.clear();
178        }
179        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
180        mAudioTrack.clear();
181        mCblkMemory.clear();
182        mSharedBuffer.clear();
183        IPCThreadState::self()->flushCommands();
184        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
185                IPCThreadState::self()->getCallingPid(), mClientPid);
186        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
187    }
188}
189
190status_t AudioTrack::set(
191        audio_stream_type_t streamType,
192        uint32_t sampleRate,
193        audio_format_t format,
194        audio_channel_mask_t channelMask,
195        size_t frameCount,
196        audio_output_flags_t flags,
197        callback_t cbf,
198        void* user,
199        uint32_t notificationFrames,
200        const sp<IMemory>& sharedBuffer,
201        bool threadCanCallJava,
202        int sessionId,
203        transfer_type transferType,
204        const audio_offload_info_t *offloadInfo,
205        int uid,
206        pid_t pid,
207        audio_attributes_t* pAttributes)
208{
209    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
210          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
211          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
212          sessionId, transferType);
213
214    switch (transferType) {
215    case TRANSFER_DEFAULT:
216        if (sharedBuffer != 0) {
217            transferType = TRANSFER_SHARED;
218        } else if (cbf == NULL || threadCanCallJava) {
219            transferType = TRANSFER_SYNC;
220        } else {
221            transferType = TRANSFER_CALLBACK;
222        }
223        break;
224    case TRANSFER_CALLBACK:
225        if (cbf == NULL || sharedBuffer != 0) {
226            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
227            return BAD_VALUE;
228        }
229        break;
230    case TRANSFER_OBTAIN:
231    case TRANSFER_SYNC:
232        if (sharedBuffer != 0) {
233            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
234            return BAD_VALUE;
235        }
236        break;
237    case TRANSFER_SHARED:
238        if (sharedBuffer == 0) {
239            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
240            return BAD_VALUE;
241        }
242        break;
243    default:
244        ALOGE("Invalid transfer type %d", transferType);
245        return BAD_VALUE;
246    }
247    mSharedBuffer = sharedBuffer;
248    mTransfer = transferType;
249
250    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
251            sharedBuffer->size());
252
253    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
254
255    AutoMutex lock(mLock);
256
257    // invariant that mAudioTrack != 0 is true only after set() returns successfully
258    if (mAudioTrack != 0) {
259        ALOGE("Track already in use");
260        return INVALID_OPERATION;
261    }
262
263    // handle default values first.
264    if (streamType == AUDIO_STREAM_DEFAULT) {
265        streamType = AUDIO_STREAM_MUSIC;
266    }
267
268    if (pAttributes == NULL) {
269        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
270            ALOGE("Invalid stream type %d", streamType);
271            return BAD_VALUE;
272        }
273        setAttributesFromStreamType(streamType);
274        mStreamType = streamType;
275    } else {
276        if (!isValidAttributes(pAttributes)) {
277            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
278                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
279                pAttributes->tags);
280        }
281        // stream type shouldn't be looked at, this track has audio attributes
282        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
283        setStreamTypeFromAttributes(mAttributes);
284        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
285                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
286    }
287
288    status_t status;
289    if (sampleRate == 0) {
290        status = AudioSystem::getOutputSamplingRateForAttr(&sampleRate, &mAttributes);
291        if (status != NO_ERROR) {
292            ALOGE("Could not get output sample rate for stream type %d; status %d",
293                    mStreamType, status);
294            return status;
295        }
296    }
297    mSampleRate = sampleRate;
298
299    // these below should probably come from the audioFlinger too...
300    if (format == AUDIO_FORMAT_DEFAULT) {
301        format = AUDIO_FORMAT_PCM_16_BIT;
302    }
303
304    // validate parameters
305    if (!audio_is_valid_format(format)) {
306        ALOGE("Invalid format %#x", format);
307        return BAD_VALUE;
308    }
309    mFormat = format;
310
311    if (!audio_is_output_channel(channelMask)) {
312        ALOGE("Invalid channel mask %#x", channelMask);
313        return BAD_VALUE;
314    }
315    mChannelMask = channelMask;
316    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
317    mChannelCount = channelCount;
318
319    // AudioFlinger does not currently support 8-bit data in shared memory
320    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
321        ALOGE("8-bit data in shared memory is not supported");
322        return BAD_VALUE;
323    }
324
325    // force direct flag if format is not linear PCM
326    // or offload was requested
327    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
328            || !audio_is_linear_pcm(format)) {
329        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
330                    ? "Offload request, forcing to Direct Output"
331                    : "Not linear PCM, forcing to Direct Output");
332        flags = (audio_output_flags_t)
333                // FIXME why can't we allow direct AND fast?
334                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
335    }
336    // only allow deep buffering for music stream type
337    if (mStreamType != AUDIO_STREAM_MUSIC) {
338        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
339    }
340
341    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
342        if (audio_is_linear_pcm(format)) {
343            mFrameSize = channelCount * audio_bytes_per_sample(format);
344        } else {
345            mFrameSize = sizeof(uint8_t);
346        }
347        mFrameSizeAF = mFrameSize;
348    } else {
349        ALOG_ASSERT(audio_is_linear_pcm(format));
350        mFrameSize = channelCount * audio_bytes_per_sample(format);
351        mFrameSizeAF = channelCount * audio_bytes_per_sample(
352                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
353        // createTrack will return an error if PCM format is not supported by server,
354        // so no need to check for specific PCM formats here
355    }
356
357    // Make copy of input parameter offloadInfo so that in the future:
358    //  (a) createTrack_l doesn't need it as an input parameter
359    //  (b) we can support re-creation of offloaded tracks
360    if (offloadInfo != NULL) {
361        mOffloadInfoCopy = *offloadInfo;
362        mOffloadInfo = &mOffloadInfoCopy;
363    } else {
364        mOffloadInfo = NULL;
365    }
366
367    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
368    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
369    mSendLevel = 0.0f;
370    // mFrameCount is initialized in createTrack_l
371    mReqFrameCount = frameCount;
372    mNotificationFramesReq = notificationFrames;
373    mNotificationFramesAct = 0;
374    mSessionId = sessionId;
375    int callingpid = IPCThreadState::self()->getCallingPid();
376    int mypid = getpid();
377    if (uid == -1 || (callingpid != mypid)) {
378        mClientUid = IPCThreadState::self()->getCallingUid();
379    } else {
380        mClientUid = uid;
381    }
382    if (pid == -1 || (callingpid != mypid)) {
383        mClientPid = callingpid;
384    } else {
385        mClientPid = pid;
386    }
387    mAuxEffectId = 0;
388    mFlags = flags;
389    mCbf = cbf;
390
391    if (cbf != NULL) {
392        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
393        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
394    }
395
396    // create the IAudioTrack
397    status = createTrack_l(0 /*epoch*/);
398
399    if (status != NO_ERROR) {
400        if (mAudioTrackThread != 0) {
401            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
402            mAudioTrackThread->requestExitAndWait();
403            mAudioTrackThread.clear();
404        }
405        return status;
406    }
407
408    mStatus = NO_ERROR;
409    mState = STATE_STOPPED;
410    mUserData = user;
411    mLoopPeriod = 0;
412    mMarkerPosition = 0;
413    mMarkerReached = false;
414    mNewPosition = 0;
415    mUpdatePeriod = 0;
416    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
417    mSequence = 1;
418    mObservedSequence = mSequence;
419    mInUnderrun = false;
420
421    return NO_ERROR;
422}
423
424// -------------------------------------------------------------------------
425
426status_t AudioTrack::start()
427{
428    AutoMutex lock(mLock);
429
430    if (mState == STATE_ACTIVE) {
431        return INVALID_OPERATION;
432    }
433
434    mInUnderrun = true;
435
436    State previousState = mState;
437    if (previousState == STATE_PAUSED_STOPPING) {
438        mState = STATE_STOPPING;
439    } else {
440        mState = STATE_ACTIVE;
441    }
442    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
443        // reset current position as seen by client to 0
444        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
445        // force refresh of remaining frames by processAudioBuffer() as last
446        // write before stop could be partial.
447        mRefreshRemaining = true;
448    }
449    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
450    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
451
452    sp<AudioTrackThread> t = mAudioTrackThread;
453    if (t != 0) {
454        if (previousState == STATE_STOPPING) {
455            mProxy->interrupt();
456        } else {
457            t->resume();
458        }
459    } else {
460        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
461        get_sched_policy(0, &mPreviousSchedulingGroup);
462        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
463    }
464
465    status_t status = NO_ERROR;
466    if (!(flags & CBLK_INVALID)) {
467        status = mAudioTrack->start();
468        if (status == DEAD_OBJECT) {
469            flags |= CBLK_INVALID;
470        }
471    }
472    if (flags & CBLK_INVALID) {
473        status = restoreTrack_l("start");
474    }
475
476    if (status != NO_ERROR) {
477        ALOGE("start() status %d", status);
478        mState = previousState;
479        if (t != 0) {
480            if (previousState != STATE_STOPPING) {
481                t->pause();
482            }
483        } else {
484            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
485            set_sched_policy(0, mPreviousSchedulingGroup);
486        }
487    }
488
489    return status;
490}
491
492void AudioTrack::stop()
493{
494    AutoMutex lock(mLock);
495    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
496        return;
497    }
498
499    if (isOffloaded_l()) {
500        mState = STATE_STOPPING;
501    } else {
502        mState = STATE_STOPPED;
503    }
504
505    mProxy->interrupt();
506    mAudioTrack->stop();
507    // the playback head position will reset to 0, so if a marker is set, we need
508    // to activate it again
509    mMarkerReached = false;
510#if 0
511    // Force flush if a shared buffer is used otherwise audioflinger
512    // will not stop before end of buffer is reached.
513    // It may be needed to make sure that we stop playback, likely in case looping is on.
514    if (mSharedBuffer != 0) {
515        flush_l();
516    }
517#endif
518
519    sp<AudioTrackThread> t = mAudioTrackThread;
520    if (t != 0) {
521        if (!isOffloaded_l()) {
522            t->pause();
523        }
524    } else {
525        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
526        set_sched_policy(0, mPreviousSchedulingGroup);
527    }
528}
529
530bool AudioTrack::stopped() const
531{
532    AutoMutex lock(mLock);
533    return mState != STATE_ACTIVE;
534}
535
536void AudioTrack::flush()
537{
538    if (mSharedBuffer != 0) {
539        return;
540    }
541    AutoMutex lock(mLock);
542    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
543        return;
544    }
545    flush_l();
546}
547
548void AudioTrack::flush_l()
549{
550    ALOG_ASSERT(mState != STATE_ACTIVE);
551
552    // clear playback marker and periodic update counter
553    mMarkerPosition = 0;
554    mMarkerReached = false;
555    mUpdatePeriod = 0;
556    mRefreshRemaining = true;
557
558    mState = STATE_FLUSHED;
559    if (isOffloaded_l()) {
560        mProxy->interrupt();
561    }
562    mProxy->flush();
563    mAudioTrack->flush();
564}
565
566void AudioTrack::pause()
567{
568    AutoMutex lock(mLock);
569    if (mState == STATE_ACTIVE) {
570        mState = STATE_PAUSED;
571    } else if (mState == STATE_STOPPING) {
572        mState = STATE_PAUSED_STOPPING;
573    } else {
574        return;
575    }
576    mProxy->interrupt();
577    mAudioTrack->pause();
578
579    if (isOffloaded_l()) {
580        if (mOutput != AUDIO_IO_HANDLE_NONE) {
581            uint32_t halFrames;
582            // OffloadThread sends HAL pause in its threadLoop.. time saved
583            // here can be slightly off
584            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
585            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
586        }
587    }
588}
589
590status_t AudioTrack::setVolume(float left, float right)
591{
592    // This duplicates a test by AudioTrack JNI, but that is not the only caller
593    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
594            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
595        return BAD_VALUE;
596    }
597
598    AutoMutex lock(mLock);
599    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
600    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
601
602    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
603
604    if (isOffloaded_l()) {
605        mAudioTrack->signal();
606    }
607    return NO_ERROR;
608}
609
610status_t AudioTrack::setVolume(float volume)
611{
612    return setVolume(volume, volume);
613}
614
615status_t AudioTrack::setAuxEffectSendLevel(float level)
616{
617    // This duplicates a test by AudioTrack JNI, but that is not the only caller
618    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
619        return BAD_VALUE;
620    }
621
622    AutoMutex lock(mLock);
623    mSendLevel = level;
624    mProxy->setSendLevel(level);
625
626    return NO_ERROR;
627}
628
629void AudioTrack::getAuxEffectSendLevel(float* level) const
630{
631    if (level != NULL) {
632        *level = mSendLevel;
633    }
634}
635
636status_t AudioTrack::setSampleRate(uint32_t rate)
637{
638    if (mIsTimed || isOffloadedOrDirect()) {
639        return INVALID_OPERATION;
640    }
641
642    uint32_t afSamplingRate;
643    if (AudioSystem::getOutputSamplingRateForAttr(&afSamplingRate, &mAttributes) != NO_ERROR) {
644        return NO_INIT;
645    }
646    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
647    if (rate == 0 || rate > afSamplingRate*2 ) {
648        return BAD_VALUE;
649    }
650
651    AutoMutex lock(mLock);
652    mSampleRate = rate;
653    mProxy->setSampleRate(rate);
654
655    return NO_ERROR;
656}
657
658uint32_t AudioTrack::getSampleRate() const
659{
660    if (mIsTimed) {
661        return 0;
662    }
663
664    AutoMutex lock(mLock);
665
666    // sample rate can be updated during playback by the offloaded decoder so we need to
667    // query the HAL and update if needed.
668// FIXME use Proxy return channel to update the rate from server and avoid polling here
669    if (isOffloadedOrDirect_l()) {
670        if (mOutput != AUDIO_IO_HANDLE_NONE) {
671            uint32_t sampleRate = 0;
672            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
673            if (status == NO_ERROR) {
674                mSampleRate = sampleRate;
675            }
676        }
677    }
678    return mSampleRate;
679}
680
681status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
682{
683    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
684        return INVALID_OPERATION;
685    }
686
687    if (loopCount == 0) {
688        ;
689    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
690            loopEnd - loopStart >= MIN_LOOP) {
691        ;
692    } else {
693        return BAD_VALUE;
694    }
695
696    AutoMutex lock(mLock);
697    // See setPosition() regarding setting parameters such as loop points or position while active
698    if (mState == STATE_ACTIVE) {
699        return INVALID_OPERATION;
700    }
701    setLoop_l(loopStart, loopEnd, loopCount);
702    return NO_ERROR;
703}
704
705void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
706{
707    // FIXME If setting a loop also sets position to start of loop, then
708    //       this is correct.  Otherwise it should be removed.
709    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
710    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
711    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
712}
713
714status_t AudioTrack::setMarkerPosition(uint32_t marker)
715{
716    // The only purpose of setting marker position is to get a callback
717    if (mCbf == NULL || isOffloadedOrDirect()) {
718        return INVALID_OPERATION;
719    }
720
721    AutoMutex lock(mLock);
722    mMarkerPosition = marker;
723    mMarkerReached = false;
724
725    return NO_ERROR;
726}
727
728status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
729{
730    if (isOffloadedOrDirect()) {
731        return INVALID_OPERATION;
732    }
733    if (marker == NULL) {
734        return BAD_VALUE;
735    }
736
737    AutoMutex lock(mLock);
738    *marker = mMarkerPosition;
739
740    return NO_ERROR;
741}
742
743status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
744{
745    // The only purpose of setting position update period is to get a callback
746    if (mCbf == NULL || isOffloadedOrDirect()) {
747        return INVALID_OPERATION;
748    }
749
750    AutoMutex lock(mLock);
751    mNewPosition = mProxy->getPosition() + updatePeriod;
752    mUpdatePeriod = updatePeriod;
753
754    return NO_ERROR;
755}
756
757status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
758{
759    if (isOffloadedOrDirect()) {
760        return INVALID_OPERATION;
761    }
762    if (updatePeriod == NULL) {
763        return BAD_VALUE;
764    }
765
766    AutoMutex lock(mLock);
767    *updatePeriod = mUpdatePeriod;
768
769    return NO_ERROR;
770}
771
772status_t AudioTrack::setPosition(uint32_t position)
773{
774    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
775        return INVALID_OPERATION;
776    }
777    if (position > mFrameCount) {
778        return BAD_VALUE;
779    }
780
781    AutoMutex lock(mLock);
782    // Currently we require that the player is inactive before setting parameters such as position
783    // or loop points.  Otherwise, there could be a race condition: the application could read the
784    // current position, compute a new position or loop parameters, and then set that position or
785    // loop parameters but it would do the "wrong" thing since the position has continued to advance
786    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
787    // to specify how it wants to handle such scenarios.
788    if (mState == STATE_ACTIVE) {
789        return INVALID_OPERATION;
790    }
791    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
792    mLoopPeriod = 0;
793    // FIXME Check whether loops and setting position are incompatible in old code.
794    // If we use setLoop for both purposes we lose the capability to set the position while looping.
795    mStaticProxy->setLoop(position, mFrameCount, 0);
796
797    return NO_ERROR;
798}
799
800status_t AudioTrack::getPosition(uint32_t *position) const
801{
802    if (position == NULL) {
803        return BAD_VALUE;
804    }
805
806    AutoMutex lock(mLock);
807    if (isOffloadedOrDirect_l()) {
808        uint32_t dspFrames = 0;
809
810        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
811            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
812            *position = mPausedPosition;
813            return NO_ERROR;
814        }
815
816        if (mOutput != AUDIO_IO_HANDLE_NONE) {
817            uint32_t halFrames;
818            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
819        }
820        *position = dspFrames;
821    } else {
822        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
823        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
824                mProxy->getPosition();
825    }
826    return NO_ERROR;
827}
828
829status_t AudioTrack::getBufferPosition(uint32_t *position)
830{
831    if (mSharedBuffer == 0 || mIsTimed) {
832        return INVALID_OPERATION;
833    }
834    if (position == NULL) {
835        return BAD_VALUE;
836    }
837
838    AutoMutex lock(mLock);
839    *position = mStaticProxy->getBufferPosition();
840    return NO_ERROR;
841}
842
843status_t AudioTrack::reload()
844{
845    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
846        return INVALID_OPERATION;
847    }
848
849    AutoMutex lock(mLock);
850    // See setPosition() regarding setting parameters such as loop points or position while active
851    if (mState == STATE_ACTIVE) {
852        return INVALID_OPERATION;
853    }
854    mNewPosition = mUpdatePeriod;
855    mLoopPeriod = 0;
856    // FIXME The new code cannot reload while keeping a loop specified.
857    // Need to check how the old code handled this, and whether it's a significant change.
858    mStaticProxy->setLoop(0, mFrameCount, 0);
859    return NO_ERROR;
860}
861
862audio_io_handle_t AudioTrack::getOutput() const
863{
864    AutoMutex lock(mLock);
865    return mOutput;
866}
867
868status_t AudioTrack::attachAuxEffect(int effectId)
869{
870    AutoMutex lock(mLock);
871    status_t status = mAudioTrack->attachAuxEffect(effectId);
872    if (status == NO_ERROR) {
873        mAuxEffectId = effectId;
874    }
875    return status;
876}
877
878// -------------------------------------------------------------------------
879
880// must be called with mLock held
881status_t AudioTrack::createTrack_l(size_t epoch)
882{
883    status_t status;
884    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
885    if (audioFlinger == 0) {
886        ALOGE("Could not get audioflinger");
887        return NO_INIT;
888    }
889
890    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
891            mChannelMask, mFlags, mOffloadInfo);
892    if (output == AUDIO_IO_HANDLE_NONE) {
893        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
894              " channel mask %#x, flags %#x",
895              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
896        return BAD_VALUE;
897    }
898    {
899    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
900    // we must release it ourselves if anything goes wrong.
901
902    // Not all of these values are needed under all conditions, but it is easier to get them all
903
904    uint32_t afLatency;
905    status = AudioSystem::getLatency(output, &afLatency);
906    if (status != NO_ERROR) {
907        ALOGE("getLatency(%d) failed status %d", output, status);
908        goto release;
909    }
910
911    size_t afFrameCount;
912    status = AudioSystem::getFrameCount(output, &afFrameCount);
913    if (status != NO_ERROR) {
914        ALOGE("getFrameCount(output=%d) status %d", output, status);
915        goto release;
916    }
917
918    uint32_t afSampleRate;
919    status = AudioSystem::getSamplingRate(output, &afSampleRate);
920    if (status != NO_ERROR) {
921        ALOGE("getSamplingRate(output=%d) status %d", output, status);
922        goto release;
923    }
924
925    // Client decides whether the track is TIMED (see below), but can only express a preference
926    // for FAST.  Server will perform additional tests.
927    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
928            // either of these use cases:
929            // use case 1: shared buffer
930            (mSharedBuffer != 0) ||
931            // use case 2: callback transfer mode
932            (mTransfer == TRANSFER_CALLBACK)) &&
933            // matching sample rate
934            (mSampleRate == afSampleRate))) {
935        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
936        // once denied, do not request again if IAudioTrack is re-created
937        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
938    }
939    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
940
941    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
942    //  n = 1   fast track with single buffering; nBuffering is ignored
943    //  n = 2   fast track with double buffering
944    //  n = 2   normal track, no sample rate conversion
945    //  n = 3   normal track, with sample rate conversion
946    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
947    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
948    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
949
950    mNotificationFramesAct = mNotificationFramesReq;
951
952    size_t frameCount = mReqFrameCount;
953    if (!audio_is_linear_pcm(mFormat)) {
954
955        if (mSharedBuffer != 0) {
956            // Same comment as below about ignoring frameCount parameter for set()
957            frameCount = mSharedBuffer->size();
958        } else if (frameCount == 0) {
959            frameCount = afFrameCount;
960        }
961        if (mNotificationFramesAct != frameCount) {
962            mNotificationFramesAct = frameCount;
963        }
964    } else if (mSharedBuffer != 0) {
965
966        // Ensure that buffer alignment matches channel count
967        // 8-bit data in shared memory is not currently supported by AudioFlinger
968        size_t alignment = audio_bytes_per_sample(
969                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
970        if (alignment & 1) {
971            alignment = 1;
972        }
973        if (mChannelCount > 1) {
974            // More than 2 channels does not require stronger alignment than stereo
975            alignment <<= 1;
976        }
977        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
978            ALOGE("Invalid buffer alignment: address %p, channel count %u",
979                    mSharedBuffer->pointer(), mChannelCount);
980            status = BAD_VALUE;
981            goto release;
982        }
983
984        // When initializing a shared buffer AudioTrack via constructors,
985        // there's no frameCount parameter.
986        // But when initializing a shared buffer AudioTrack via set(),
987        // there _is_ a frameCount parameter.  We silently ignore it.
988        frameCount = mSharedBuffer->size() / mFrameSizeAF;
989
990    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
991
992        // FIXME move these calculations and associated checks to server
993
994        // Ensure that buffer depth covers at least audio hardware latency
995        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
996        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
997                afFrameCount, minBufCount, afSampleRate, afLatency);
998        if (minBufCount <= nBuffering) {
999            minBufCount = nBuffering;
1000        }
1001
1002        size_t minFrameCount = (afFrameCount*mSampleRate*minBufCount)/afSampleRate;
1003        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1004                ", afLatency=%d",
1005                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1006
1007        if (frameCount == 0) {
1008            frameCount = minFrameCount;
1009        } else if (frameCount < minFrameCount) {
1010            // not ALOGW because it happens all the time when playing key clicks over A2DP
1011            ALOGV("Minimum buffer size corrected from %d to %d",
1012                     frameCount, minFrameCount);
1013            frameCount = minFrameCount;
1014        }
1015        // Make sure that application is notified with sufficient margin before underrun
1016        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1017            mNotificationFramesAct = frameCount/nBuffering;
1018        }
1019
1020    } else {
1021        // For fast tracks, the frame count calculations and checks are done by server
1022    }
1023
1024    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1025    if (mIsTimed) {
1026        trackFlags |= IAudioFlinger::TRACK_TIMED;
1027    }
1028
1029    pid_t tid = -1;
1030    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1031        trackFlags |= IAudioFlinger::TRACK_FAST;
1032        if (mAudioTrackThread != 0) {
1033            tid = mAudioTrackThread->getTid();
1034        }
1035    }
1036
1037    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1038        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1039    }
1040
1041    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1042        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1043    }
1044
1045    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1046                                // but we will still need the original value also
1047    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1048                                                      mSampleRate,
1049                                                      // AudioFlinger only sees 16-bit PCM
1050                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1051                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1052                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1053                                                      mChannelMask,
1054                                                      &temp,
1055                                                      &trackFlags,
1056                                                      mSharedBuffer,
1057                                                      output,
1058                                                      tid,
1059                                                      &mSessionId,
1060                                                      mClientUid,
1061                                                      &status);
1062
1063    if (status != NO_ERROR) {
1064        ALOGE("AudioFlinger could not create track, status: %d", status);
1065        goto release;
1066    }
1067    ALOG_ASSERT(track != 0);
1068
1069    // AudioFlinger now owns the reference to the I/O handle,
1070    // so we are no longer responsible for releasing it.
1071
1072    sp<IMemory> iMem = track->getCblk();
1073    if (iMem == 0) {
1074        ALOGE("Could not get control block");
1075        return NO_INIT;
1076    }
1077    void *iMemPointer = iMem->pointer();
1078    if (iMemPointer == NULL) {
1079        ALOGE("Could not get control block pointer");
1080        return NO_INIT;
1081    }
1082    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1083    if (mAudioTrack != 0) {
1084        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1085        mDeathNotifier.clear();
1086    }
1087    mAudioTrack = track;
1088    mCblkMemory = iMem;
1089    IPCThreadState::self()->flushCommands();
1090
1091    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1092    mCblk = cblk;
1093    // note that temp is the (possibly revised) value of frameCount
1094    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1095        // In current design, AudioTrack client checks and ensures frame count validity before
1096        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1097        // for fast track as it uses a special method of assigning frame count.
1098        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
1099    }
1100    frameCount = temp;
1101
1102    mAwaitBoost = false;
1103    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1104        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1105            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
1106            mAwaitBoost = true;
1107            if (mSharedBuffer == 0) {
1108                // Theoretically double-buffering is not required for fast tracks,
1109                // due to tighter scheduling.  But in practice, to accommodate kernels with
1110                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1111                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1112                    mNotificationFramesAct = frameCount/nBuffering;
1113                }
1114            }
1115        } else {
1116            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
1117            // once denied, do not request again if IAudioTrack is re-created
1118            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1119            if (mSharedBuffer == 0) {
1120                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1121                    mNotificationFramesAct = frameCount/nBuffering;
1122                }
1123            }
1124        }
1125    }
1126    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1127        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1128            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1129        } else {
1130            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1131            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1132            // FIXME This is a warning, not an error, so don't return error status
1133            //return NO_INIT;
1134        }
1135    }
1136    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1137        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1138            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1139        } else {
1140            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1141            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1142            // FIXME This is a warning, not an error, so don't return error status
1143            //return NO_INIT;
1144        }
1145    }
1146
1147    // We retain a copy of the I/O handle, but don't own the reference
1148    mOutput = output;
1149    mRefreshRemaining = true;
1150
1151    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1152    // is the value of pointer() for the shared buffer, otherwise buffers points
1153    // immediately after the control block.  This address is for the mapping within client
1154    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1155    void* buffers;
1156    if (mSharedBuffer == 0) {
1157        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1158    } else {
1159        buffers = mSharedBuffer->pointer();
1160    }
1161
1162    mAudioTrack->attachAuxEffect(mAuxEffectId);
1163    // FIXME don't believe this lie
1164    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1165
1166    mFrameCount = frameCount;
1167    // If IAudioTrack is re-created, don't let the requested frameCount
1168    // decrease.  This can confuse clients that cache frameCount().
1169    if (frameCount > mReqFrameCount) {
1170        mReqFrameCount = frameCount;
1171    }
1172
1173    // update proxy
1174    if (mSharedBuffer == 0) {
1175        mStaticProxy.clear();
1176        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1177    } else {
1178        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1179        mProxy = mStaticProxy;
1180    }
1181    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1182    mProxy->setSendLevel(mSendLevel);
1183    mProxy->setSampleRate(mSampleRate);
1184    mProxy->setEpoch(epoch);
1185    mProxy->setMinimum(mNotificationFramesAct);
1186
1187    mDeathNotifier = new DeathNotifier(this);
1188    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1189
1190    return NO_ERROR;
1191    }
1192
1193release:
1194    AudioSystem::releaseOutput(output);
1195    if (status == NO_ERROR) {
1196        status = NO_INIT;
1197    }
1198    return status;
1199}
1200
1201status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1202{
1203    if (audioBuffer == NULL) {
1204        return BAD_VALUE;
1205    }
1206    if (mTransfer != TRANSFER_OBTAIN) {
1207        audioBuffer->frameCount = 0;
1208        audioBuffer->size = 0;
1209        audioBuffer->raw = NULL;
1210        return INVALID_OPERATION;
1211    }
1212
1213    const struct timespec *requested;
1214    struct timespec timeout;
1215    if (waitCount == -1) {
1216        requested = &ClientProxy::kForever;
1217    } else if (waitCount == 0) {
1218        requested = &ClientProxy::kNonBlocking;
1219    } else if (waitCount > 0) {
1220        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1221        timeout.tv_sec = ms / 1000;
1222        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1223        requested = &timeout;
1224    } else {
1225        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1226        requested = NULL;
1227    }
1228    return obtainBuffer(audioBuffer, requested);
1229}
1230
1231status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1232        struct timespec *elapsed, size_t *nonContig)
1233{
1234    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1235    uint32_t oldSequence = 0;
1236    uint32_t newSequence;
1237
1238    Proxy::Buffer buffer;
1239    status_t status = NO_ERROR;
1240
1241    static const int32_t kMaxTries = 5;
1242    int32_t tryCounter = kMaxTries;
1243
1244    do {
1245        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1246        // keep them from going away if another thread re-creates the track during obtainBuffer()
1247        sp<AudioTrackClientProxy> proxy;
1248        sp<IMemory> iMem;
1249
1250        {   // start of lock scope
1251            AutoMutex lock(mLock);
1252
1253            newSequence = mSequence;
1254            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1255            if (status == DEAD_OBJECT) {
1256                // re-create track, unless someone else has already done so
1257                if (newSequence == oldSequence) {
1258                    status = restoreTrack_l("obtainBuffer");
1259                    if (status != NO_ERROR) {
1260                        buffer.mFrameCount = 0;
1261                        buffer.mRaw = NULL;
1262                        buffer.mNonContig = 0;
1263                        break;
1264                    }
1265                }
1266            }
1267            oldSequence = newSequence;
1268
1269            // Keep the extra references
1270            proxy = mProxy;
1271            iMem = mCblkMemory;
1272
1273            if (mState == STATE_STOPPING) {
1274                status = -EINTR;
1275                buffer.mFrameCount = 0;
1276                buffer.mRaw = NULL;
1277                buffer.mNonContig = 0;
1278                break;
1279            }
1280
1281            // Non-blocking if track is stopped or paused
1282            if (mState != STATE_ACTIVE) {
1283                requested = &ClientProxy::kNonBlocking;
1284            }
1285
1286        }   // end of lock scope
1287
1288        buffer.mFrameCount = audioBuffer->frameCount;
1289        // FIXME starts the requested timeout and elapsed over from scratch
1290        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1291
1292    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1293
1294    audioBuffer->frameCount = buffer.mFrameCount;
1295    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1296    audioBuffer->raw = buffer.mRaw;
1297    if (nonContig != NULL) {
1298        *nonContig = buffer.mNonContig;
1299    }
1300    return status;
1301}
1302
1303void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1304{
1305    if (mTransfer == TRANSFER_SHARED) {
1306        return;
1307    }
1308
1309    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1310    if (stepCount == 0) {
1311        return;
1312    }
1313
1314    Proxy::Buffer buffer;
1315    buffer.mFrameCount = stepCount;
1316    buffer.mRaw = audioBuffer->raw;
1317
1318    AutoMutex lock(mLock);
1319    mInUnderrun = false;
1320    mProxy->releaseBuffer(&buffer);
1321
1322    // restart track if it was disabled by audioflinger due to previous underrun
1323    if (mState == STATE_ACTIVE) {
1324        audio_track_cblk_t* cblk = mCblk;
1325        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1326            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1327            // FIXME ignoring status
1328            mAudioTrack->start();
1329        }
1330    }
1331}
1332
1333// -------------------------------------------------------------------------
1334
1335ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1336{
1337    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1338        return INVALID_OPERATION;
1339    }
1340
1341    if (isDirect()) {
1342        AutoMutex lock(mLock);
1343        int32_t flags = android_atomic_and(
1344                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1345                            &mCblk->mFlags);
1346        if (flags & CBLK_INVALID) {
1347            return DEAD_OBJECT;
1348        }
1349    }
1350
1351    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1352        // Sanity-check: user is most-likely passing an error code, and it would
1353        // make the return value ambiguous (actualSize vs error).
1354        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1355        return BAD_VALUE;
1356    }
1357
1358    size_t written = 0;
1359    Buffer audioBuffer;
1360
1361    while (userSize >= mFrameSize) {
1362        audioBuffer.frameCount = userSize / mFrameSize;
1363
1364        status_t err = obtainBuffer(&audioBuffer,
1365                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1366        if (err < 0) {
1367            if (written > 0) {
1368                break;
1369            }
1370            return ssize_t(err);
1371        }
1372
1373        size_t toWrite;
1374        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1375            // Divide capacity by 2 to take expansion into account
1376            toWrite = audioBuffer.size >> 1;
1377            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1378        } else {
1379            toWrite = audioBuffer.size;
1380            memcpy(audioBuffer.i8, buffer, toWrite);
1381        }
1382        buffer = ((const char *) buffer) + toWrite;
1383        userSize -= toWrite;
1384        written += toWrite;
1385
1386        releaseBuffer(&audioBuffer);
1387    }
1388
1389    return written;
1390}
1391
1392// -------------------------------------------------------------------------
1393
1394TimedAudioTrack::TimedAudioTrack() {
1395    mIsTimed = true;
1396}
1397
1398status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1399{
1400    AutoMutex lock(mLock);
1401    status_t result = UNKNOWN_ERROR;
1402
1403#if 1
1404    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1405    // while we are accessing the cblk
1406    sp<IAudioTrack> audioTrack = mAudioTrack;
1407    sp<IMemory> iMem = mCblkMemory;
1408#endif
1409
1410    // If the track is not invalid already, try to allocate a buffer.  alloc
1411    // fails indicating that the server is dead, flag the track as invalid so
1412    // we can attempt to restore in just a bit.
1413    audio_track_cblk_t* cblk = mCblk;
1414    if (!(cblk->mFlags & CBLK_INVALID)) {
1415        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1416        if (result == DEAD_OBJECT) {
1417            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1418        }
1419    }
1420
1421    // If the track is invalid at this point, attempt to restore it. and try the
1422    // allocation one more time.
1423    if (cblk->mFlags & CBLK_INVALID) {
1424        result = restoreTrack_l("allocateTimedBuffer");
1425
1426        if (result == NO_ERROR) {
1427            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1428        }
1429    }
1430
1431    return result;
1432}
1433
1434status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1435                                           int64_t pts)
1436{
1437    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1438    {
1439        AutoMutex lock(mLock);
1440        audio_track_cblk_t* cblk = mCblk;
1441        // restart track if it was disabled by audioflinger due to previous underrun
1442        if (buffer->size() != 0 && status == NO_ERROR &&
1443                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1444            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1445            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1446            // FIXME ignoring status
1447            mAudioTrack->start();
1448        }
1449    }
1450    return status;
1451}
1452
1453status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1454                                                TargetTimeline target)
1455{
1456    return mAudioTrack->setMediaTimeTransform(xform, target);
1457}
1458
1459// -------------------------------------------------------------------------
1460
1461nsecs_t AudioTrack::processAudioBuffer()
1462{
1463    // Currently the AudioTrack thread is not created if there are no callbacks.
1464    // Would it ever make sense to run the thread, even without callbacks?
1465    // If so, then replace this by checks at each use for mCbf != NULL.
1466    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1467
1468    mLock.lock();
1469    if (mAwaitBoost) {
1470        mAwaitBoost = false;
1471        mLock.unlock();
1472        static const int32_t kMaxTries = 5;
1473        int32_t tryCounter = kMaxTries;
1474        uint32_t pollUs = 10000;
1475        do {
1476            int policy = sched_getscheduler(0);
1477            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1478                break;
1479            }
1480            usleep(pollUs);
1481            pollUs <<= 1;
1482        } while (tryCounter-- > 0);
1483        if (tryCounter < 0) {
1484            ALOGE("did not receive expected priority boost on time");
1485        }
1486        // Run again immediately
1487        return 0;
1488    }
1489
1490    // Can only reference mCblk while locked
1491    int32_t flags = android_atomic_and(
1492        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1493
1494    // Check for track invalidation
1495    if (flags & CBLK_INVALID) {
1496        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1497        // AudioSystem cache. We should not exit here but after calling the callback so
1498        // that the upper layers can recreate the track
1499        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1500            status_t status = restoreTrack_l("processAudioBuffer");
1501            mLock.unlock();
1502            // Run again immediately, but with a new IAudioTrack
1503            return 0;
1504        }
1505    }
1506
1507    bool waitStreamEnd = mState == STATE_STOPPING;
1508    bool active = mState == STATE_ACTIVE;
1509
1510    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1511    bool newUnderrun = false;
1512    if (flags & CBLK_UNDERRUN) {
1513#if 0
1514        // Currently in shared buffer mode, when the server reaches the end of buffer,
1515        // the track stays active in continuous underrun state.  It's up to the application
1516        // to pause or stop the track, or set the position to a new offset within buffer.
1517        // This was some experimental code to auto-pause on underrun.   Keeping it here
1518        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1519        if (mTransfer == TRANSFER_SHARED) {
1520            mState = STATE_PAUSED;
1521            active = false;
1522        }
1523#endif
1524        if (!mInUnderrun) {
1525            mInUnderrun = true;
1526            newUnderrun = true;
1527        }
1528    }
1529
1530    // Get current position of server
1531    size_t position = mProxy->getPosition();
1532
1533    // Manage marker callback
1534    bool markerReached = false;
1535    size_t markerPosition = mMarkerPosition;
1536    // FIXME fails for wraparound, need 64 bits
1537    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1538        mMarkerReached = markerReached = true;
1539    }
1540
1541    // Determine number of new position callback(s) that will be needed, while locked
1542    size_t newPosCount = 0;
1543    size_t newPosition = mNewPosition;
1544    size_t updatePeriod = mUpdatePeriod;
1545    // FIXME fails for wraparound, need 64 bits
1546    if (updatePeriod > 0 && position >= newPosition) {
1547        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1548        mNewPosition += updatePeriod * newPosCount;
1549    }
1550
1551    // Cache other fields that will be needed soon
1552    uint32_t loopPeriod = mLoopPeriod;
1553    uint32_t sampleRate = mSampleRate;
1554    uint32_t notificationFrames = mNotificationFramesAct;
1555    if (mRefreshRemaining) {
1556        mRefreshRemaining = false;
1557        mRemainingFrames = notificationFrames;
1558        mRetryOnPartialBuffer = false;
1559    }
1560    size_t misalignment = mProxy->getMisalignment();
1561    uint32_t sequence = mSequence;
1562    sp<AudioTrackClientProxy> proxy = mProxy;
1563
1564    // These fields don't need to be cached, because they are assigned only by set():
1565    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1566    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1567
1568    mLock.unlock();
1569
1570    if (waitStreamEnd) {
1571        struct timespec timeout;
1572        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1573        timeout.tv_nsec = 0;
1574
1575        status_t status = proxy->waitStreamEndDone(&timeout);
1576        switch (status) {
1577        case NO_ERROR:
1578        case DEAD_OBJECT:
1579        case TIMED_OUT:
1580            mCbf(EVENT_STREAM_END, mUserData, NULL);
1581            {
1582                AutoMutex lock(mLock);
1583                // The previously assigned value of waitStreamEnd is no longer valid,
1584                // since the mutex has been unlocked and either the callback handler
1585                // or another thread could have re-started the AudioTrack during that time.
1586                waitStreamEnd = mState == STATE_STOPPING;
1587                if (waitStreamEnd) {
1588                    mState = STATE_STOPPED;
1589                }
1590            }
1591            if (waitStreamEnd && status != DEAD_OBJECT) {
1592               return NS_INACTIVE;
1593            }
1594            break;
1595        }
1596        return 0;
1597    }
1598
1599    // perform callbacks while unlocked
1600    if (newUnderrun) {
1601        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1602    }
1603    // FIXME we will miss loops if loop cycle was signaled several times since last call
1604    //       to processAudioBuffer()
1605    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1606        mCbf(EVENT_LOOP_END, mUserData, NULL);
1607    }
1608    if (flags & CBLK_BUFFER_END) {
1609        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1610    }
1611    if (markerReached) {
1612        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1613    }
1614    while (newPosCount > 0) {
1615        size_t temp = newPosition;
1616        mCbf(EVENT_NEW_POS, mUserData, &temp);
1617        newPosition += updatePeriod;
1618        newPosCount--;
1619    }
1620
1621    if (mObservedSequence != sequence) {
1622        mObservedSequence = sequence;
1623        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1624        // for offloaded tracks, just wait for the upper layers to recreate the track
1625        if (isOffloadedOrDirect()) {
1626            return NS_INACTIVE;
1627        }
1628    }
1629
1630    // if inactive, then don't run me again until re-started
1631    if (!active) {
1632        return NS_INACTIVE;
1633    }
1634
1635    // Compute the estimated time until the next timed event (position, markers, loops)
1636    // FIXME only for non-compressed audio
1637    uint32_t minFrames = ~0;
1638    if (!markerReached && position < markerPosition) {
1639        minFrames = markerPosition - position;
1640    }
1641    if (loopPeriod > 0 && loopPeriod < minFrames) {
1642        minFrames = loopPeriod;
1643    }
1644    if (updatePeriod > 0 && updatePeriod < minFrames) {
1645        minFrames = updatePeriod;
1646    }
1647
1648    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1649    static const uint32_t kPoll = 0;
1650    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1651        minFrames = kPoll * notificationFrames;
1652    }
1653
1654    // Convert frame units to time units
1655    nsecs_t ns = NS_WHENEVER;
1656    if (minFrames != (uint32_t) ~0) {
1657        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1658        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1659        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1660    }
1661
1662    // If not supplying data by EVENT_MORE_DATA, then we're done
1663    if (mTransfer != TRANSFER_CALLBACK) {
1664        return ns;
1665    }
1666
1667    struct timespec timeout;
1668    const struct timespec *requested = &ClientProxy::kForever;
1669    if (ns != NS_WHENEVER) {
1670        timeout.tv_sec = ns / 1000000000LL;
1671        timeout.tv_nsec = ns % 1000000000LL;
1672        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1673        requested = &timeout;
1674    }
1675
1676    while (mRemainingFrames > 0) {
1677
1678        Buffer audioBuffer;
1679        audioBuffer.frameCount = mRemainingFrames;
1680        size_t nonContig;
1681        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1682        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1683                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1684        requested = &ClientProxy::kNonBlocking;
1685        size_t avail = audioBuffer.frameCount + nonContig;
1686        ALOGV("obtainBuffer(%u) returned %u = %u + %u err %d",
1687                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1688        if (err != NO_ERROR) {
1689            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1690                    (isOffloaded() && (err == DEAD_OBJECT))) {
1691                return 0;
1692            }
1693            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1694            return NS_NEVER;
1695        }
1696
1697        if (mRetryOnPartialBuffer && !isOffloaded()) {
1698            mRetryOnPartialBuffer = false;
1699            if (avail < mRemainingFrames) {
1700                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1701                if (ns < 0 || myns < ns) {
1702                    ns = myns;
1703                }
1704                return ns;
1705            }
1706        }
1707
1708        // Divide buffer size by 2 to take into account the expansion
1709        // due to 8 to 16 bit conversion: the callback must fill only half
1710        // of the destination buffer
1711        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1712            audioBuffer.size >>= 1;
1713        }
1714
1715        size_t reqSize = audioBuffer.size;
1716        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1717        size_t writtenSize = audioBuffer.size;
1718
1719        // Sanity check on returned size
1720        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1721            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1722                    reqSize, (int) writtenSize);
1723            return NS_NEVER;
1724        }
1725
1726        if (writtenSize == 0) {
1727            // The callback is done filling buffers
1728            // Keep this thread going to handle timed events and
1729            // still try to get more data in intervals of WAIT_PERIOD_MS
1730            // but don't just loop and block the CPU, so wait
1731            return WAIT_PERIOD_MS * 1000000LL;
1732        }
1733
1734        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1735            // 8 to 16 bit conversion, note that source and destination are the same address
1736            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1737            audioBuffer.size <<= 1;
1738        }
1739
1740        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1741        audioBuffer.frameCount = releasedFrames;
1742        mRemainingFrames -= releasedFrames;
1743        if (misalignment >= releasedFrames) {
1744            misalignment -= releasedFrames;
1745        } else {
1746            misalignment = 0;
1747        }
1748
1749        releaseBuffer(&audioBuffer);
1750
1751        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1752        // if callback doesn't like to accept the full chunk
1753        if (writtenSize < reqSize) {
1754            continue;
1755        }
1756
1757        // There could be enough non-contiguous frames available to satisfy the remaining request
1758        if (mRemainingFrames <= nonContig) {
1759            continue;
1760        }
1761
1762#if 0
1763        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1764        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1765        // that total to a sum == notificationFrames.
1766        if (0 < misalignment && misalignment <= mRemainingFrames) {
1767            mRemainingFrames = misalignment;
1768            return (mRemainingFrames * 1100000000LL) / sampleRate;
1769        }
1770#endif
1771
1772    }
1773    mRemainingFrames = notificationFrames;
1774    mRetryOnPartialBuffer = true;
1775
1776    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1777    return 0;
1778}
1779
1780status_t AudioTrack::restoreTrack_l(const char *from)
1781{
1782    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1783          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1784    ++mSequence;
1785    status_t result;
1786
1787    // refresh the audio configuration cache in this process to make sure we get new
1788    // output parameters in createTrack_l()
1789    AudioSystem::clearAudioConfigCache();
1790
1791    if (isOffloadedOrDirect_l()) {
1792        // FIXME re-creation of offloaded tracks is not yet implemented
1793        return DEAD_OBJECT;
1794    }
1795
1796    // if the new IAudioTrack is created, createTrack_l() will modify the
1797    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1798    // It will also delete the strong references on previous IAudioTrack and IMemory
1799
1800    // take the frames that will be lost by track recreation into account in saved position
1801    size_t position = mProxy->getPosition() + mProxy->getFramesFilled();
1802    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1803    result = createTrack_l(position /*epoch*/);
1804
1805    if (result == NO_ERROR) {
1806        // continue playback from last known position, but
1807        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1808        if (mStaticProxy != NULL) {
1809            mLoopPeriod = 0;
1810            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1811        }
1812        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1813        //       track destruction have been played? This is critical for SoundPool implementation
1814        //       This must be broken, and needs to be tested/debugged.
1815#if 0
1816        // restore write index and set other indexes to reflect empty buffer status
1817        if (!strcmp(from, "start")) {
1818            // Make sure that a client relying on callback events indicating underrun or
1819            // the actual amount of audio frames played (e.g SoundPool) receives them.
1820            if (mSharedBuffer == 0) {
1821                // restart playback even if buffer is not completely filled.
1822                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1823            }
1824        }
1825#endif
1826        if (mState == STATE_ACTIVE) {
1827            result = mAudioTrack->start();
1828        }
1829    }
1830    if (result != NO_ERROR) {
1831        ALOGW("restoreTrack_l() failed status %d", result);
1832        mState = STATE_STOPPED;
1833    }
1834
1835    return result;
1836}
1837
1838status_t AudioTrack::setParameters(const String8& keyValuePairs)
1839{
1840    AutoMutex lock(mLock);
1841    return mAudioTrack->setParameters(keyValuePairs);
1842}
1843
1844status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1845{
1846    AutoMutex lock(mLock);
1847    // FIXME not implemented for fast tracks; should use proxy and SSQ
1848    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1849        return INVALID_OPERATION;
1850    }
1851    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
1852        return INVALID_OPERATION;
1853    }
1854    status_t status = mAudioTrack->getTimestamp(timestamp);
1855    if (status == NO_ERROR) {
1856        timestamp.mPosition += mProxy->getEpoch();
1857    }
1858    return status;
1859}
1860
1861String8 AudioTrack::getParameters(const String8& keys)
1862{
1863    audio_io_handle_t output = getOutput();
1864    if (output != AUDIO_IO_HANDLE_NONE) {
1865        return AudioSystem::getParameters(output, keys);
1866    } else {
1867        return String8::empty();
1868    }
1869}
1870
1871bool AudioTrack::isOffloaded() const
1872{
1873    AutoMutex lock(mLock);
1874    return isOffloaded_l();
1875}
1876
1877bool AudioTrack::isDirect() const
1878{
1879    AutoMutex lock(mLock);
1880    return isDirect_l();
1881}
1882
1883bool AudioTrack::isOffloadedOrDirect() const
1884{
1885    AutoMutex lock(mLock);
1886    return isOffloadedOrDirect_l();
1887}
1888
1889
1890status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
1891{
1892
1893    const size_t SIZE = 256;
1894    char buffer[SIZE];
1895    String8 result;
1896
1897    result.append(" AudioTrack::dump\n");
1898    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1899            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
1900    result.append(buffer);
1901    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
1902            mChannelCount, mFrameCount);
1903    result.append(buffer);
1904    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1905    result.append(buffer);
1906    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1907    result.append(buffer);
1908    ::write(fd, result.string(), result.size());
1909    return NO_ERROR;
1910}
1911
1912uint32_t AudioTrack::getUnderrunFrames() const
1913{
1914    AutoMutex lock(mLock);
1915    return mProxy->getUnderrunFrames();
1916}
1917
1918void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
1919    mAttributes.flags = 0x0;
1920
1921    switch(streamType) {
1922    case AUDIO_STREAM_DEFAULT:
1923    case AUDIO_STREAM_MUSIC:
1924        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
1925        mAttributes.usage = AUDIO_USAGE_MEDIA;
1926        break;
1927    case AUDIO_STREAM_VOICE_CALL:
1928        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1929        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1930        break;
1931    case AUDIO_STREAM_ENFORCED_AUDIBLE:
1932        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
1933        // intended fall through, attributes in common with STREAM_SYSTEM
1934    case AUDIO_STREAM_SYSTEM:
1935        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1936        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
1937        break;
1938    case AUDIO_STREAM_RING:
1939        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1940        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
1941        break;
1942    case AUDIO_STREAM_ALARM:
1943        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1944        mAttributes.usage = AUDIO_USAGE_ALARM;
1945        break;
1946    case AUDIO_STREAM_NOTIFICATION:
1947        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1948        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
1949        break;
1950    case AUDIO_STREAM_BLUETOOTH_SCO:
1951        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1952        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
1953        mAttributes.flags |= AUDIO_FLAG_SCO;
1954        break;
1955    case AUDIO_STREAM_DTMF:
1956        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
1957        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
1958        break;
1959    case AUDIO_STREAM_TTS:
1960        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
1961        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
1962        break;
1963    default:
1964        ALOGE("invalid stream type %d when converting to attributes", streamType);
1965    }
1966}
1967
1968void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
1969    // flags to stream type mapping
1970    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
1971        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
1972        return;
1973    }
1974    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
1975        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
1976        return;
1977    }
1978
1979    // usage to stream type mapping
1980    switch (aa.usage) {
1981    case AUDIO_USAGE_MEDIA:
1982    case AUDIO_USAGE_GAME:
1983    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
1984    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
1985        mStreamType = AUDIO_STREAM_MUSIC;
1986        return;
1987    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
1988        mStreamType = AUDIO_STREAM_SYSTEM;
1989        return;
1990    case AUDIO_USAGE_VOICE_COMMUNICATION:
1991        mStreamType = AUDIO_STREAM_VOICE_CALL;
1992        return;
1993
1994    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
1995        mStreamType = AUDIO_STREAM_DTMF;
1996        return;
1997
1998    case AUDIO_USAGE_ALARM:
1999        mStreamType = AUDIO_STREAM_ALARM;
2000        return;
2001    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2002        mStreamType = AUDIO_STREAM_RING;
2003        return;
2004
2005    case AUDIO_USAGE_NOTIFICATION:
2006    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2007    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2008    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2009    case AUDIO_USAGE_NOTIFICATION_EVENT:
2010        mStreamType = AUDIO_STREAM_NOTIFICATION;
2011        return;
2012
2013    case AUDIO_USAGE_UNKNOWN:
2014    default:
2015        mStreamType = AUDIO_STREAM_MUSIC;
2016    }
2017}
2018
2019bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
2020    // has flags that map to a strategy?
2021    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO)) != 0) {
2022        return true;
2023    }
2024
2025    // has known usage?
2026    switch (paa->usage) {
2027    case AUDIO_USAGE_UNKNOWN:
2028    case AUDIO_USAGE_MEDIA:
2029    case AUDIO_USAGE_VOICE_COMMUNICATION:
2030    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2031    case AUDIO_USAGE_ALARM:
2032    case AUDIO_USAGE_NOTIFICATION:
2033    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2034    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2035    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2036    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2037    case AUDIO_USAGE_NOTIFICATION_EVENT:
2038    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2039    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2040    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2041    case AUDIO_USAGE_GAME:
2042        break;
2043    default:
2044        return false;
2045    }
2046    return true;
2047}
2048// =========================================================================
2049
2050void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2051{
2052    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2053    if (audioTrack != 0) {
2054        AutoMutex lock(audioTrack->mLock);
2055        audioTrack->mProxy->binderDied();
2056    }
2057}
2058
2059// =========================================================================
2060
2061AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2062    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2063      mIgnoreNextPausedInt(false)
2064{
2065}
2066
2067AudioTrack::AudioTrackThread::~AudioTrackThread()
2068{
2069}
2070
2071bool AudioTrack::AudioTrackThread::threadLoop()
2072{
2073    {
2074        AutoMutex _l(mMyLock);
2075        if (mPaused) {
2076            mMyCond.wait(mMyLock);
2077            // caller will check for exitPending()
2078            return true;
2079        }
2080        if (mIgnoreNextPausedInt) {
2081            mIgnoreNextPausedInt = false;
2082            mPausedInt = false;
2083        }
2084        if (mPausedInt) {
2085            if (mPausedNs > 0) {
2086                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2087            } else {
2088                mMyCond.wait(mMyLock);
2089            }
2090            mPausedInt = false;
2091            return true;
2092        }
2093    }
2094    nsecs_t ns = mReceiver.processAudioBuffer();
2095    switch (ns) {
2096    case 0:
2097        return true;
2098    case NS_INACTIVE:
2099        pauseInternal();
2100        return true;
2101    case NS_NEVER:
2102        return false;
2103    case NS_WHENEVER:
2104        // FIXME increase poll interval, or make event-driven
2105        ns = 1000000000LL;
2106        // fall through
2107    default:
2108        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
2109        pauseInternal(ns);
2110        return true;
2111    }
2112}
2113
2114void AudioTrack::AudioTrackThread::requestExit()
2115{
2116    // must be in this order to avoid a race condition
2117    Thread::requestExit();
2118    resume();
2119}
2120
2121void AudioTrack::AudioTrackThread::pause()
2122{
2123    AutoMutex _l(mMyLock);
2124    mPaused = true;
2125}
2126
2127void AudioTrack::AudioTrackThread::resume()
2128{
2129    AutoMutex _l(mMyLock);
2130    mIgnoreNextPausedInt = true;
2131    if (mPaused || mPausedInt) {
2132        mPaused = false;
2133        mPausedInt = false;
2134        mMyCond.signal();
2135    }
2136}
2137
2138void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2139{
2140    AutoMutex _l(mMyLock);
2141    mPausedInt = true;
2142    mPausedNs = ns;
2143}
2144
2145}; // namespace android
2146