AudioTrack.cpp revision 0d6db582f2ccc9f8943c5f3965e2994b7d137158
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioResamplerPublic.h>
32
33#define WAIT_PERIOD_MS                  10
34#define WAIT_STREAM_END_TIMEOUT_SEC     120
35
36
37namespace android {
38// ---------------------------------------------------------------------------
39
40static int64_t convertTimespecToUs(const struct timespec &tv)
41{
42    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
43}
44
45// current monotonic time in microseconds.
46static int64_t getNowUs()
47{
48    struct timespec tv;
49    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
50    return convertTimespecToUs(tv);
51}
52
53// static
54status_t AudioTrack::getMinFrameCount(
55        size_t* frameCount,
56        audio_stream_type_t streamType,
57        uint32_t sampleRate)
58{
59    if (frameCount == NULL) {
60        return BAD_VALUE;
61    }
62
63    // FIXME merge with similar code in createTrack_l(), except we're missing
64    //       some information here that is available in createTrack_l():
65    //          audio_io_handle_t output
66    //          audio_format_t format
67    //          audio_channel_mask_t channelMask
68    //          audio_output_flags_t flags
69    uint32_t afSampleRate;
70    status_t status;
71    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
72    if (status != NO_ERROR) {
73        ALOGE("Unable to query output sample rate for stream type %d; status %d",
74                streamType, status);
75        return status;
76    }
77    size_t afFrameCount;
78    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
79    if (status != NO_ERROR) {
80        ALOGE("Unable to query output frame count for stream type %d; status %d",
81                streamType, status);
82        return status;
83    }
84    uint32_t afLatency;
85    status = AudioSystem::getOutputLatency(&afLatency, streamType);
86    if (status != NO_ERROR) {
87        ALOGE("Unable to query output latency for stream type %d; status %d",
88                streamType, status);
89        return status;
90    }
91
92    // Ensure that buffer depth covers at least audio hardware latency
93    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
94    if (minBufCount < 2) {
95        minBufCount = 2;
96    }
97
98    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
99            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
100    // The formula above should always produce a non-zero value, but return an error
101    // in the unlikely event that it does not, as that's part of the API contract.
102    if (*frameCount == 0) {
103        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
104                streamType, sampleRate);
105        return BAD_VALUE;
106    }
107    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
108            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
109    return NO_ERROR;
110}
111
112// ---------------------------------------------------------------------------
113
114AudioTrack::AudioTrack()
115    : mStatus(NO_INIT),
116      mIsTimed(false),
117      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
118      mPreviousSchedulingGroup(SP_DEFAULT),
119      mPausedPosition(0)
120{
121    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
122    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
123    mAttributes.flags = 0x0;
124    strcpy(mAttributes.tags, "");
125}
126
127AudioTrack::AudioTrack(
128        audio_stream_type_t streamType,
129        uint32_t sampleRate,
130        audio_format_t format,
131        audio_channel_mask_t channelMask,
132        size_t frameCount,
133        audio_output_flags_t flags,
134        callback_t cbf,
135        void* user,
136        uint32_t notificationFrames,
137        int sessionId,
138        transfer_type transferType,
139        const audio_offload_info_t *offloadInfo,
140        int uid,
141        pid_t pid,
142        const audio_attributes_t* pAttributes)
143    : mStatus(NO_INIT),
144      mIsTimed(false),
145      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
146      mPreviousSchedulingGroup(SP_DEFAULT),
147      mPausedPosition(0)
148{
149    mStatus = set(streamType, sampleRate, format, channelMask,
150            frameCount, flags, cbf, user, notificationFrames,
151            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
152            offloadInfo, uid, pid, pAttributes);
153}
154
155AudioTrack::AudioTrack(
156        audio_stream_type_t streamType,
157        uint32_t sampleRate,
158        audio_format_t format,
159        audio_channel_mask_t channelMask,
160        const sp<IMemory>& sharedBuffer,
161        audio_output_flags_t flags,
162        callback_t cbf,
163        void* user,
164        uint32_t notificationFrames,
165        int sessionId,
166        transfer_type transferType,
167        const audio_offload_info_t *offloadInfo,
168        int uid,
169        pid_t pid,
170        const audio_attributes_t* pAttributes)
171    : mStatus(NO_INIT),
172      mIsTimed(false),
173      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
174      mPreviousSchedulingGroup(SP_DEFAULT),
175      mPausedPosition(0)
176{
177    mStatus = set(streamType, sampleRate, format, channelMask,
178            0 /*frameCount*/, flags, cbf, user, notificationFrames,
179            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
180            uid, pid, pAttributes);
181}
182
183AudioTrack::~AudioTrack()
184{
185    if (mStatus == NO_ERROR) {
186        // Make sure that callback function exits in the case where
187        // it is looping on buffer full condition in obtainBuffer().
188        // Otherwise the callback thread will never exit.
189        stop();
190        if (mAudioTrackThread != 0) {
191            mProxy->interrupt();
192            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
193            mAudioTrackThread->requestExitAndWait();
194            mAudioTrackThread.clear();
195        }
196        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
197        mAudioTrack.clear();
198        mCblkMemory.clear();
199        mSharedBuffer.clear();
200        IPCThreadState::self()->flushCommands();
201        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
202                IPCThreadState::self()->getCallingPid(), mClientPid);
203        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
204    }
205}
206
207status_t AudioTrack::set(
208        audio_stream_type_t streamType,
209        uint32_t sampleRate,
210        audio_format_t format,
211        audio_channel_mask_t channelMask,
212        size_t frameCount,
213        audio_output_flags_t flags,
214        callback_t cbf,
215        void* user,
216        uint32_t notificationFrames,
217        const sp<IMemory>& sharedBuffer,
218        bool threadCanCallJava,
219        int sessionId,
220        transfer_type transferType,
221        const audio_offload_info_t *offloadInfo,
222        int uid,
223        pid_t pid,
224        const audio_attributes_t* pAttributes)
225{
226    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
227          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
228          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
229          sessionId, transferType);
230
231    switch (transferType) {
232    case TRANSFER_DEFAULT:
233        if (sharedBuffer != 0) {
234            transferType = TRANSFER_SHARED;
235        } else if (cbf == NULL || threadCanCallJava) {
236            transferType = TRANSFER_SYNC;
237        } else {
238            transferType = TRANSFER_CALLBACK;
239        }
240        break;
241    case TRANSFER_CALLBACK:
242        if (cbf == NULL || sharedBuffer != 0) {
243            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
244            return BAD_VALUE;
245        }
246        break;
247    case TRANSFER_OBTAIN:
248    case TRANSFER_SYNC:
249        if (sharedBuffer != 0) {
250            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
251            return BAD_VALUE;
252        }
253        break;
254    case TRANSFER_SHARED:
255        if (sharedBuffer == 0) {
256            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
257            return BAD_VALUE;
258        }
259        break;
260    default:
261        ALOGE("Invalid transfer type %d", transferType);
262        return BAD_VALUE;
263    }
264    mSharedBuffer = sharedBuffer;
265    mTransfer = transferType;
266
267    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
268            sharedBuffer->size());
269
270    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
271
272    AutoMutex lock(mLock);
273
274    // invariant that mAudioTrack != 0 is true only after set() returns successfully
275    if (mAudioTrack != 0) {
276        ALOGE("Track already in use");
277        return INVALID_OPERATION;
278    }
279
280    // handle default values first.
281    // TODO once AudioPolicyManager fully supports audio_attributes_t,
282    //   remove stream "text-to-speech" redirect
283    if ((streamType == AUDIO_STREAM_DEFAULT) || (streamType == AUDIO_STREAM_TTS)) {
284        streamType = AUDIO_STREAM_MUSIC;
285    }
286
287    if (pAttributes == NULL) {
288        if (uint32_t(streamType) >= AUDIO_STREAM_CNT) {
289            ALOGE("Invalid stream type %d", streamType);
290            return BAD_VALUE;
291        }
292        setAttributesFromStreamType(streamType);
293        mStreamType = streamType;
294    } else {
295        if (!isValidAttributes(pAttributes)) {
296            ALOGE("Invalid attributes: usage=%d content=%d flags=0x%x tags=[%s]",
297                pAttributes->usage, pAttributes->content_type, pAttributes->flags,
298                pAttributes->tags);
299        }
300        // stream type shouldn't be looked at, this track has audio attributes
301        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
302        setStreamTypeFromAttributes(mAttributes);
303        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
304                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
305    }
306
307    // these below should probably come from the audioFlinger too...
308    if (format == AUDIO_FORMAT_DEFAULT) {
309        format = AUDIO_FORMAT_PCM_16_BIT;
310    }
311
312    // validate parameters
313    if (!audio_is_valid_format(format)) {
314        ALOGE("Invalid format %#x", format);
315        return BAD_VALUE;
316    }
317    mFormat = format;
318
319    if (!audio_is_output_channel(channelMask)) {
320        ALOGE("Invalid channel mask %#x", channelMask);
321        return BAD_VALUE;
322    }
323    mChannelMask = channelMask;
324    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
325    mChannelCount = channelCount;
326
327    // AudioFlinger does not currently support 8-bit data in shared memory
328    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
329        ALOGE("8-bit data in shared memory is not supported");
330        return BAD_VALUE;
331    }
332
333    // force direct flag if format is not linear PCM
334    // or offload was requested
335    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
336            || !audio_is_linear_pcm(format)) {
337        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
338                    ? "Offload request, forcing to Direct Output"
339                    : "Not linear PCM, forcing to Direct Output");
340        flags = (audio_output_flags_t)
341                // FIXME why can't we allow direct AND fast?
342                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
343    }
344    // only allow deep buffering for music stream type
345    if (mStreamType != AUDIO_STREAM_MUSIC) {
346        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
347    }
348
349    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
350        if (audio_is_linear_pcm(format)) {
351            mFrameSize = channelCount * audio_bytes_per_sample(format);
352        } else {
353            mFrameSize = sizeof(uint8_t);
354        }
355        mFrameSizeAF = mFrameSize;
356    } else {
357        ALOG_ASSERT(audio_is_linear_pcm(format));
358        mFrameSize = channelCount * audio_bytes_per_sample(format);
359        mFrameSizeAF = channelCount * audio_bytes_per_sample(
360                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
361        // createTrack will return an error if PCM format is not supported by server,
362        // so no need to check for specific PCM formats here
363    }
364
365    // sampling rate must be specified for direct outputs
366    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
367        return BAD_VALUE;
368    }
369    mSampleRate = sampleRate;
370
371    // Make copy of input parameter offloadInfo so that in the future:
372    //  (a) createTrack_l doesn't need it as an input parameter
373    //  (b) we can support re-creation of offloaded tracks
374    if (offloadInfo != NULL) {
375        mOffloadInfoCopy = *offloadInfo;
376        mOffloadInfo = &mOffloadInfoCopy;
377    } else {
378        mOffloadInfo = NULL;
379    }
380
381    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
382    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
383    mSendLevel = 0.0f;
384    // mFrameCount is initialized in createTrack_l
385    mReqFrameCount = frameCount;
386    mNotificationFramesReq = notificationFrames;
387    mNotificationFramesAct = 0;
388    mSessionId = sessionId;
389    int callingpid = IPCThreadState::self()->getCallingPid();
390    int mypid = getpid();
391    if (uid == -1 || (callingpid != mypid)) {
392        mClientUid = IPCThreadState::self()->getCallingUid();
393    } else {
394        mClientUid = uid;
395    }
396    if (pid == -1 || (callingpid != mypid)) {
397        mClientPid = callingpid;
398    } else {
399        mClientPid = pid;
400    }
401    mAuxEffectId = 0;
402    mFlags = flags;
403    mCbf = cbf;
404
405    if (cbf != NULL) {
406        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
407        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
408    }
409
410    // create the IAudioTrack
411    status_t status = createTrack_l();
412
413    if (status != NO_ERROR) {
414        if (mAudioTrackThread != 0) {
415            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
416            mAudioTrackThread->requestExitAndWait();
417            mAudioTrackThread.clear();
418        }
419        return status;
420    }
421
422    mStatus = NO_ERROR;
423    mState = STATE_STOPPED;
424    mUserData = user;
425    mLoopPeriod = 0;
426    mMarkerPosition = 0;
427    mMarkerReached = false;
428    mNewPosition = 0;
429    mUpdatePeriod = 0;
430    mServer = 0;
431    mPosition = 0;
432    mReleased = 0;
433    mStartUs = 0;
434    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
435    mSequence = 1;
436    mObservedSequence = mSequence;
437    mInUnderrun = false;
438
439    return NO_ERROR;
440}
441
442// -------------------------------------------------------------------------
443
444status_t AudioTrack::start()
445{
446    AutoMutex lock(mLock);
447
448    if (mState == STATE_ACTIVE) {
449        return INVALID_OPERATION;
450    }
451
452    mInUnderrun = true;
453
454    State previousState = mState;
455    if (previousState == STATE_PAUSED_STOPPING) {
456        mState = STATE_STOPPING;
457    } else {
458        mState = STATE_ACTIVE;
459    }
460    (void) updateAndGetPosition_l();
461    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
462        // reset current position as seen by client to 0
463        mPosition = 0;
464        // For offloaded tracks, we don't know if the hardware counters are really zero here,
465        // since the flush is asynchronous and stop may not fully drain.
466        // We save the time when the track is started to later verify whether
467        // the counters are realistic (i.e. start from zero after this time).
468        mStartUs = getNowUs();
469
470        // force refresh of remaining frames by processAudioBuffer() as last
471        // write before stop could be partial.
472        mRefreshRemaining = true;
473    }
474    mNewPosition = mPosition + mUpdatePeriod;
475    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
476
477    sp<AudioTrackThread> t = mAudioTrackThread;
478    if (t != 0) {
479        if (previousState == STATE_STOPPING) {
480            mProxy->interrupt();
481        } else {
482            t->resume();
483        }
484    } else {
485        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
486        get_sched_policy(0, &mPreviousSchedulingGroup);
487        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
488    }
489
490    status_t status = NO_ERROR;
491    if (!(flags & CBLK_INVALID)) {
492        status = mAudioTrack->start();
493        if (status == DEAD_OBJECT) {
494            flags |= CBLK_INVALID;
495        }
496    }
497    if (flags & CBLK_INVALID) {
498        status = restoreTrack_l("start");
499    }
500
501    if (status != NO_ERROR) {
502        ALOGE("start() status %d", status);
503        mState = previousState;
504        if (t != 0) {
505            if (previousState != STATE_STOPPING) {
506                t->pause();
507            }
508        } else {
509            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
510            set_sched_policy(0, mPreviousSchedulingGroup);
511        }
512    }
513
514    return status;
515}
516
517void AudioTrack::stop()
518{
519    AutoMutex lock(mLock);
520    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
521        return;
522    }
523
524    if (isOffloaded_l()) {
525        mState = STATE_STOPPING;
526    } else {
527        mState = STATE_STOPPED;
528        mReleased = 0;
529    }
530
531    mProxy->interrupt();
532    mAudioTrack->stop();
533    // the playback head position will reset to 0, so if a marker is set, we need
534    // to activate it again
535    mMarkerReached = false;
536#if 0
537    // Force flush if a shared buffer is used otherwise audioflinger
538    // will not stop before end of buffer is reached.
539    // It may be needed to make sure that we stop playback, likely in case looping is on.
540    if (mSharedBuffer != 0) {
541        flush_l();
542    }
543#endif
544
545    sp<AudioTrackThread> t = mAudioTrackThread;
546    if (t != 0) {
547        if (!isOffloaded_l()) {
548            t->pause();
549        }
550    } else {
551        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
552        set_sched_policy(0, mPreviousSchedulingGroup);
553    }
554}
555
556bool AudioTrack::stopped() const
557{
558    AutoMutex lock(mLock);
559    return mState != STATE_ACTIVE;
560}
561
562void AudioTrack::flush()
563{
564    if (mSharedBuffer != 0) {
565        return;
566    }
567    AutoMutex lock(mLock);
568    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
569        return;
570    }
571    flush_l();
572}
573
574void AudioTrack::flush_l()
575{
576    ALOG_ASSERT(mState != STATE_ACTIVE);
577
578    // clear playback marker and periodic update counter
579    mMarkerPosition = 0;
580    mMarkerReached = false;
581    mUpdatePeriod = 0;
582    mRefreshRemaining = true;
583
584    mState = STATE_FLUSHED;
585    mReleased = 0;
586    if (isOffloaded_l()) {
587        mProxy->interrupt();
588    }
589    mProxy->flush();
590    mAudioTrack->flush();
591}
592
593void AudioTrack::pause()
594{
595    AutoMutex lock(mLock);
596    if (mState == STATE_ACTIVE) {
597        mState = STATE_PAUSED;
598    } else if (mState == STATE_STOPPING) {
599        mState = STATE_PAUSED_STOPPING;
600    } else {
601        return;
602    }
603    mProxy->interrupt();
604    mAudioTrack->pause();
605
606    if (isOffloaded_l()) {
607        if (mOutput != AUDIO_IO_HANDLE_NONE) {
608            // An offload output can be re-used between two audio tracks having
609            // the same configuration. A timestamp query for a paused track
610            // while the other is running would return an incorrect time.
611            // To fix this, cache the playback position on a pause() and return
612            // this time when requested until the track is resumed.
613
614            // OffloadThread sends HAL pause in its threadLoop. Time saved
615            // here can be slightly off.
616
617            // TODO: check return code for getRenderPosition.
618
619            uint32_t halFrames;
620            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
621            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
622        }
623    }
624}
625
626status_t AudioTrack::setVolume(float left, float right)
627{
628    // This duplicates a test by AudioTrack JNI, but that is not the only caller
629    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
630            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
631        return BAD_VALUE;
632    }
633
634    AutoMutex lock(mLock);
635    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
636    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
637
638    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
639
640    if (isOffloaded_l()) {
641        mAudioTrack->signal();
642    }
643    return NO_ERROR;
644}
645
646status_t AudioTrack::setVolume(float volume)
647{
648    return setVolume(volume, volume);
649}
650
651status_t AudioTrack::setAuxEffectSendLevel(float level)
652{
653    // This duplicates a test by AudioTrack JNI, but that is not the only caller
654    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
655        return BAD_VALUE;
656    }
657
658    AutoMutex lock(mLock);
659    mSendLevel = level;
660    mProxy->setSendLevel(level);
661
662    return NO_ERROR;
663}
664
665void AudioTrack::getAuxEffectSendLevel(float* level) const
666{
667    if (level != NULL) {
668        *level = mSendLevel;
669    }
670}
671
672status_t AudioTrack::setSampleRate(uint32_t rate)
673{
674    if (mIsTimed || isOffloadedOrDirect()) {
675        return INVALID_OPERATION;
676    }
677
678    AutoMutex lock(mLock);
679    if (mOutput == AUDIO_IO_HANDLE_NONE) {
680        return NO_INIT;
681    }
682    uint32_t afSamplingRate;
683    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
684        return NO_INIT;
685    }
686    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
687        return BAD_VALUE;
688    }
689
690    mSampleRate = rate;
691    mProxy->setSampleRate(rate);
692
693    return NO_ERROR;
694}
695
696uint32_t AudioTrack::getSampleRate() const
697{
698    if (mIsTimed) {
699        return 0;
700    }
701
702    AutoMutex lock(mLock);
703
704    // sample rate can be updated during playback by the offloaded decoder so we need to
705    // query the HAL and update if needed.
706// FIXME use Proxy return channel to update the rate from server and avoid polling here
707    if (isOffloadedOrDirect_l()) {
708        if (mOutput != AUDIO_IO_HANDLE_NONE) {
709            uint32_t sampleRate = 0;
710            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
711            if (status == NO_ERROR) {
712                mSampleRate = sampleRate;
713            }
714        }
715    }
716    return mSampleRate;
717}
718
719status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
720{
721    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
722        return INVALID_OPERATION;
723    }
724
725    if (loopCount == 0) {
726        ;
727    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
728            loopEnd - loopStart >= MIN_LOOP) {
729        ;
730    } else {
731        return BAD_VALUE;
732    }
733
734    AutoMutex lock(mLock);
735    // See setPosition() regarding setting parameters such as loop points or position while active
736    if (mState == STATE_ACTIVE) {
737        return INVALID_OPERATION;
738    }
739    setLoop_l(loopStart, loopEnd, loopCount);
740    return NO_ERROR;
741}
742
743void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
744{
745    // FIXME If setting a loop also sets position to start of loop, then
746    //       this is correct.  Otherwise it should be removed.
747    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
748    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
749    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
750}
751
752status_t AudioTrack::setMarkerPosition(uint32_t marker)
753{
754    // The only purpose of setting marker position is to get a callback
755    if (mCbf == NULL || isOffloadedOrDirect()) {
756        return INVALID_OPERATION;
757    }
758
759    AutoMutex lock(mLock);
760    mMarkerPosition = marker;
761    mMarkerReached = false;
762
763    return NO_ERROR;
764}
765
766status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
767{
768    if (isOffloadedOrDirect()) {
769        return INVALID_OPERATION;
770    }
771    if (marker == NULL) {
772        return BAD_VALUE;
773    }
774
775    AutoMutex lock(mLock);
776    *marker = mMarkerPosition;
777
778    return NO_ERROR;
779}
780
781status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
782{
783    // The only purpose of setting position update period is to get a callback
784    if (mCbf == NULL || isOffloadedOrDirect()) {
785        return INVALID_OPERATION;
786    }
787
788    AutoMutex lock(mLock);
789    mNewPosition = updateAndGetPosition_l() + updatePeriod;
790    mUpdatePeriod = updatePeriod;
791
792    return NO_ERROR;
793}
794
795status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
796{
797    if (isOffloadedOrDirect()) {
798        return INVALID_OPERATION;
799    }
800    if (updatePeriod == NULL) {
801        return BAD_VALUE;
802    }
803
804    AutoMutex lock(mLock);
805    *updatePeriod = mUpdatePeriod;
806
807    return NO_ERROR;
808}
809
810status_t AudioTrack::setPosition(uint32_t position)
811{
812    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
813        return INVALID_OPERATION;
814    }
815    if (position > mFrameCount) {
816        return BAD_VALUE;
817    }
818
819    AutoMutex lock(mLock);
820    // Currently we require that the player is inactive before setting parameters such as position
821    // or loop points.  Otherwise, there could be a race condition: the application could read the
822    // current position, compute a new position or loop parameters, and then set that position or
823    // loop parameters but it would do the "wrong" thing since the position has continued to advance
824    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
825    // to specify how it wants to handle such scenarios.
826    if (mState == STATE_ACTIVE) {
827        return INVALID_OPERATION;
828    }
829    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
830    mLoopPeriod = 0;
831    // FIXME Check whether loops and setting position are incompatible in old code.
832    // If we use setLoop for both purposes we lose the capability to set the position while looping.
833    mStaticProxy->setLoop(position, mFrameCount, 0);
834
835    return NO_ERROR;
836}
837
838status_t AudioTrack::getPosition(uint32_t *position)
839{
840    if (position == NULL) {
841        return BAD_VALUE;
842    }
843
844    AutoMutex lock(mLock);
845    if (isOffloadedOrDirect_l()) {
846        uint32_t dspFrames = 0;
847
848        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
849            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
850            *position = mPausedPosition;
851            return NO_ERROR;
852        }
853
854        if (mOutput != AUDIO_IO_HANDLE_NONE) {
855            uint32_t halFrames;
856            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
857        }
858        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
859        // due to hardware latency. We leave this behavior for now.
860        *position = dspFrames;
861    } else {
862        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
863        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
864                0 : updateAndGetPosition_l();
865    }
866    return NO_ERROR;
867}
868
869status_t AudioTrack::getBufferPosition(uint32_t *position)
870{
871    if (mSharedBuffer == 0 || mIsTimed) {
872        return INVALID_OPERATION;
873    }
874    if (position == NULL) {
875        return BAD_VALUE;
876    }
877
878    AutoMutex lock(mLock);
879    *position = mStaticProxy->getBufferPosition();
880    return NO_ERROR;
881}
882
883status_t AudioTrack::reload()
884{
885    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
886        return INVALID_OPERATION;
887    }
888
889    AutoMutex lock(mLock);
890    // See setPosition() regarding setting parameters such as loop points or position while active
891    if (mState == STATE_ACTIVE) {
892        return INVALID_OPERATION;
893    }
894    mNewPosition = mUpdatePeriod;
895    mLoopPeriod = 0;
896    // FIXME The new code cannot reload while keeping a loop specified.
897    // Need to check how the old code handled this, and whether it's a significant change.
898    mStaticProxy->setLoop(0, mFrameCount, 0);
899    return NO_ERROR;
900}
901
902audio_io_handle_t AudioTrack::getOutput() const
903{
904    AutoMutex lock(mLock);
905    return mOutput;
906}
907
908status_t AudioTrack::attachAuxEffect(int effectId)
909{
910    AutoMutex lock(mLock);
911    status_t status = mAudioTrack->attachAuxEffect(effectId);
912    if (status == NO_ERROR) {
913        mAuxEffectId = effectId;
914    }
915    return status;
916}
917
918// -------------------------------------------------------------------------
919
920// must be called with mLock held
921status_t AudioTrack::createTrack_l()
922{
923    status_t status;
924    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
925    if (audioFlinger == 0) {
926        ALOGE("Could not get audioflinger");
927        return NO_INIT;
928    }
929
930    audio_io_handle_t output = AudioSystem::getOutputForAttr(&mAttributes, mSampleRate, mFormat,
931            mChannelMask, mFlags, mOffloadInfo);
932    if (output == AUDIO_IO_HANDLE_NONE) {
933        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
934              " channel mask %#x, flags %#x",
935              mStreamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
936        return BAD_VALUE;
937    }
938    {
939    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
940    // we must release it ourselves if anything goes wrong.
941
942    // Not all of these values are needed under all conditions, but it is easier to get them all
943
944    uint32_t afLatency;
945    status = AudioSystem::getLatency(output, &afLatency);
946    if (status != NO_ERROR) {
947        ALOGE("getLatency(%d) failed status %d", output, status);
948        goto release;
949    }
950
951    size_t afFrameCount;
952    status = AudioSystem::getFrameCount(output, &afFrameCount);
953    if (status != NO_ERROR) {
954        ALOGE("getFrameCount(output=%d) status %d", output, status);
955        goto release;
956    }
957
958    uint32_t afSampleRate;
959    status = AudioSystem::getSamplingRate(output, &afSampleRate);
960    if (status != NO_ERROR) {
961        ALOGE("getSamplingRate(output=%d) status %d", output, status);
962        goto release;
963    }
964    if (mSampleRate == 0) {
965        mSampleRate = afSampleRate;
966    }
967    // Client decides whether the track is TIMED (see below), but can only express a preference
968    // for FAST.  Server will perform additional tests.
969    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
970            // either of these use cases:
971            // use case 1: shared buffer
972            (mSharedBuffer != 0) ||
973            // use case 2: callback transfer mode
974            (mTransfer == TRANSFER_CALLBACK)) &&
975            // matching sample rate
976            (mSampleRate == afSampleRate))) {
977        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
978        // once denied, do not request again if IAudioTrack is re-created
979        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
980    }
981    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
982
983    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
984    //  n = 1   fast track with single buffering; nBuffering is ignored
985    //  n = 2   fast track with double buffering
986    //  n = 2   normal track, no sample rate conversion
987    //  n = 3   normal track, with sample rate conversion
988    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
989    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
990    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
991
992    mNotificationFramesAct = mNotificationFramesReq;
993
994    size_t frameCount = mReqFrameCount;
995    if (!audio_is_linear_pcm(mFormat)) {
996
997        if (mSharedBuffer != 0) {
998            // Same comment as below about ignoring frameCount parameter for set()
999            frameCount = mSharedBuffer->size();
1000        } else if (frameCount == 0) {
1001            frameCount = afFrameCount;
1002        }
1003        if (mNotificationFramesAct != frameCount) {
1004            mNotificationFramesAct = frameCount;
1005        }
1006    } else if (mSharedBuffer != 0) {
1007
1008        // Ensure that buffer alignment matches channel count
1009        // 8-bit data in shared memory is not currently supported by AudioFlinger
1010        size_t alignment = audio_bytes_per_sample(
1011                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
1012        if (alignment & 1) {
1013            alignment = 1;
1014        }
1015        if (mChannelCount > 1) {
1016            // More than 2 channels does not require stronger alignment than stereo
1017            alignment <<= 1;
1018        }
1019        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1020            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1021                    mSharedBuffer->pointer(), mChannelCount);
1022            status = BAD_VALUE;
1023            goto release;
1024        }
1025
1026        // When initializing a shared buffer AudioTrack via constructors,
1027        // there's no frameCount parameter.
1028        // But when initializing a shared buffer AudioTrack via set(),
1029        // there _is_ a frameCount parameter.  We silently ignore it.
1030        frameCount = mSharedBuffer->size() / mFrameSizeAF;
1031
1032    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
1033
1034        // FIXME move these calculations and associated checks to server
1035
1036        // Ensure that buffer depth covers at least audio hardware latency
1037        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
1038        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
1039                afFrameCount, minBufCount, afSampleRate, afLatency);
1040        if (minBufCount <= nBuffering) {
1041            minBufCount = nBuffering;
1042        }
1043
1044        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
1045        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1046                ", afLatency=%d",
1047                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1048
1049        if (frameCount == 0) {
1050            frameCount = minFrameCount;
1051        } else if (frameCount < minFrameCount) {
1052            // not ALOGW because it happens all the time when playing key clicks over A2DP
1053            ALOGV("Minimum buffer size corrected from %zu to %zu",
1054                     frameCount, minFrameCount);
1055            frameCount = minFrameCount;
1056        }
1057        // Make sure that application is notified with sufficient margin before underrun
1058        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1059            mNotificationFramesAct = frameCount/nBuffering;
1060        }
1061
1062    } else {
1063        // For fast tracks, the frame count calculations and checks are done by server
1064    }
1065
1066    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1067    if (mIsTimed) {
1068        trackFlags |= IAudioFlinger::TRACK_TIMED;
1069    }
1070
1071    pid_t tid = -1;
1072    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1073        trackFlags |= IAudioFlinger::TRACK_FAST;
1074        if (mAudioTrackThread != 0) {
1075            tid = mAudioTrackThread->getTid();
1076        }
1077    }
1078
1079    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1080        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1081    }
1082
1083    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1084        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1085    }
1086
1087    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1088                                // but we will still need the original value also
1089    sp<IAudioTrack> track = audioFlinger->createTrack(mStreamType,
1090                                                      mSampleRate,
1091                                                      // AudioFlinger only sees 16-bit PCM
1092                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1093                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1094                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1095                                                      mChannelMask,
1096                                                      &temp,
1097                                                      &trackFlags,
1098                                                      mSharedBuffer,
1099                                                      output,
1100                                                      tid,
1101                                                      &mSessionId,
1102                                                      mClientUid,
1103                                                      &status);
1104
1105    if (status != NO_ERROR) {
1106        ALOGE("AudioFlinger could not create track, status: %d", status);
1107        goto release;
1108    }
1109    ALOG_ASSERT(track != 0);
1110
1111    // AudioFlinger now owns the reference to the I/O handle,
1112    // so we are no longer responsible for releasing it.
1113
1114    sp<IMemory> iMem = track->getCblk();
1115    if (iMem == 0) {
1116        ALOGE("Could not get control block");
1117        return NO_INIT;
1118    }
1119    void *iMemPointer = iMem->pointer();
1120    if (iMemPointer == NULL) {
1121        ALOGE("Could not get control block pointer");
1122        return NO_INIT;
1123    }
1124    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1125    if (mAudioTrack != 0) {
1126        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1127        mDeathNotifier.clear();
1128    }
1129    mAudioTrack = track;
1130    mCblkMemory = iMem;
1131    IPCThreadState::self()->flushCommands();
1132
1133    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1134    mCblk = cblk;
1135    // note that temp is the (possibly revised) value of frameCount
1136    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1137        // In current design, AudioTrack client checks and ensures frame count validity before
1138        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1139        // for fast track as it uses a special method of assigning frame count.
1140        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1141    }
1142    frameCount = temp;
1143
1144    mAwaitBoost = false;
1145    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1146        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1147            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1148            mAwaitBoost = true;
1149            if (mSharedBuffer == 0) {
1150                // Theoretically double-buffering is not required for fast tracks,
1151                // due to tighter scheduling.  But in practice, to accommodate kernels with
1152                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1153                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1154                    mNotificationFramesAct = frameCount/nBuffering;
1155                }
1156            }
1157        } else {
1158            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1159            // once denied, do not request again if IAudioTrack is re-created
1160            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1161            if (mSharedBuffer == 0) {
1162                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1163                    mNotificationFramesAct = frameCount/nBuffering;
1164                }
1165            }
1166        }
1167    }
1168    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1169        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1170            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1171        } else {
1172            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1173            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1174            // FIXME This is a warning, not an error, so don't return error status
1175            //return NO_INIT;
1176        }
1177    }
1178    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1179        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1180            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1181        } else {
1182            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1183            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1184            // FIXME This is a warning, not an error, so don't return error status
1185            //return NO_INIT;
1186        }
1187    }
1188
1189    // We retain a copy of the I/O handle, but don't own the reference
1190    mOutput = output;
1191    mRefreshRemaining = true;
1192
1193    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1194    // is the value of pointer() for the shared buffer, otherwise buffers points
1195    // immediately after the control block.  This address is for the mapping within client
1196    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1197    void* buffers;
1198    if (mSharedBuffer == 0) {
1199        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1200    } else {
1201        buffers = mSharedBuffer->pointer();
1202    }
1203
1204    mAudioTrack->attachAuxEffect(mAuxEffectId);
1205    // FIXME don't believe this lie
1206    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1207
1208    mFrameCount = frameCount;
1209    // If IAudioTrack is re-created, don't let the requested frameCount
1210    // decrease.  This can confuse clients that cache frameCount().
1211    if (frameCount > mReqFrameCount) {
1212        mReqFrameCount = frameCount;
1213    }
1214
1215    // update proxy
1216    if (mSharedBuffer == 0) {
1217        mStaticProxy.clear();
1218        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1219    } else {
1220        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1221        mProxy = mStaticProxy;
1222    }
1223    mProxy->setVolumeLR(GAIN_MINIFLOAT_PACKED_UNITY);
1224    mProxy->setSendLevel(mSendLevel);
1225    mProxy->setSampleRate(mSampleRate);
1226    mProxy->setMinimum(mNotificationFramesAct);
1227
1228    mDeathNotifier = new DeathNotifier(this);
1229    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1230
1231    return NO_ERROR;
1232    }
1233
1234release:
1235    AudioSystem::releaseOutput(output);
1236    if (status == NO_ERROR) {
1237        status = NO_INIT;
1238    }
1239    return status;
1240}
1241
1242status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1243{
1244    if (audioBuffer == NULL) {
1245        return BAD_VALUE;
1246    }
1247    if (mTransfer != TRANSFER_OBTAIN) {
1248        audioBuffer->frameCount = 0;
1249        audioBuffer->size = 0;
1250        audioBuffer->raw = NULL;
1251        return INVALID_OPERATION;
1252    }
1253
1254    const struct timespec *requested;
1255    struct timespec timeout;
1256    if (waitCount == -1) {
1257        requested = &ClientProxy::kForever;
1258    } else if (waitCount == 0) {
1259        requested = &ClientProxy::kNonBlocking;
1260    } else if (waitCount > 0) {
1261        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1262        timeout.tv_sec = ms / 1000;
1263        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1264        requested = &timeout;
1265    } else {
1266        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1267        requested = NULL;
1268    }
1269    return obtainBuffer(audioBuffer, requested);
1270}
1271
1272status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1273        struct timespec *elapsed, size_t *nonContig)
1274{
1275    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1276    uint32_t oldSequence = 0;
1277    uint32_t newSequence;
1278
1279    Proxy::Buffer buffer;
1280    status_t status = NO_ERROR;
1281
1282    static const int32_t kMaxTries = 5;
1283    int32_t tryCounter = kMaxTries;
1284
1285    do {
1286        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1287        // keep them from going away if another thread re-creates the track during obtainBuffer()
1288        sp<AudioTrackClientProxy> proxy;
1289        sp<IMemory> iMem;
1290
1291        {   // start of lock scope
1292            AutoMutex lock(mLock);
1293
1294            newSequence = mSequence;
1295            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1296            if (status == DEAD_OBJECT) {
1297                // re-create track, unless someone else has already done so
1298                if (newSequence == oldSequence) {
1299                    status = restoreTrack_l("obtainBuffer");
1300                    if (status != NO_ERROR) {
1301                        buffer.mFrameCount = 0;
1302                        buffer.mRaw = NULL;
1303                        buffer.mNonContig = 0;
1304                        break;
1305                    }
1306                }
1307            }
1308            oldSequence = newSequence;
1309
1310            // Keep the extra references
1311            proxy = mProxy;
1312            iMem = mCblkMemory;
1313
1314            if (mState == STATE_STOPPING) {
1315                status = -EINTR;
1316                buffer.mFrameCount = 0;
1317                buffer.mRaw = NULL;
1318                buffer.mNonContig = 0;
1319                break;
1320            }
1321
1322            // Non-blocking if track is stopped or paused
1323            if (mState != STATE_ACTIVE) {
1324                requested = &ClientProxy::kNonBlocking;
1325            }
1326
1327        }   // end of lock scope
1328
1329        buffer.mFrameCount = audioBuffer->frameCount;
1330        // FIXME starts the requested timeout and elapsed over from scratch
1331        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1332
1333    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1334
1335    audioBuffer->frameCount = buffer.mFrameCount;
1336    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1337    audioBuffer->raw = buffer.mRaw;
1338    if (nonContig != NULL) {
1339        *nonContig = buffer.mNonContig;
1340    }
1341    return status;
1342}
1343
1344void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1345{
1346    if (mTransfer == TRANSFER_SHARED) {
1347        return;
1348    }
1349
1350    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1351    if (stepCount == 0) {
1352        return;
1353    }
1354
1355    Proxy::Buffer buffer;
1356    buffer.mFrameCount = stepCount;
1357    buffer.mRaw = audioBuffer->raw;
1358
1359    AutoMutex lock(mLock);
1360    mReleased += stepCount;
1361    mInUnderrun = false;
1362    mProxy->releaseBuffer(&buffer);
1363
1364    // restart track if it was disabled by audioflinger due to previous underrun
1365    if (mState == STATE_ACTIVE) {
1366        audio_track_cblk_t* cblk = mCblk;
1367        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1368            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1369            // FIXME ignoring status
1370            mAudioTrack->start();
1371        }
1372    }
1373}
1374
1375// -------------------------------------------------------------------------
1376
1377ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1378{
1379    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1380        return INVALID_OPERATION;
1381    }
1382
1383    if (isDirect()) {
1384        AutoMutex lock(mLock);
1385        int32_t flags = android_atomic_and(
1386                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1387                            &mCblk->mFlags);
1388        if (flags & CBLK_INVALID) {
1389            return DEAD_OBJECT;
1390        }
1391    }
1392
1393    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1394        // Sanity-check: user is most-likely passing an error code, and it would
1395        // make the return value ambiguous (actualSize vs error).
1396        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1397        return BAD_VALUE;
1398    }
1399
1400    size_t written = 0;
1401    Buffer audioBuffer;
1402
1403    while (userSize >= mFrameSize) {
1404        audioBuffer.frameCount = userSize / mFrameSize;
1405
1406        status_t err = obtainBuffer(&audioBuffer,
1407                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1408        if (err < 0) {
1409            if (written > 0) {
1410                break;
1411            }
1412            return ssize_t(err);
1413        }
1414
1415        size_t toWrite;
1416        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1417            // Divide capacity by 2 to take expansion into account
1418            toWrite = audioBuffer.size >> 1;
1419            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1420        } else {
1421            toWrite = audioBuffer.size;
1422            memcpy(audioBuffer.i8, buffer, toWrite);
1423        }
1424        buffer = ((const char *) buffer) + toWrite;
1425        userSize -= toWrite;
1426        written += toWrite;
1427
1428        releaseBuffer(&audioBuffer);
1429    }
1430
1431    return written;
1432}
1433
1434// -------------------------------------------------------------------------
1435
1436TimedAudioTrack::TimedAudioTrack() {
1437    mIsTimed = true;
1438}
1439
1440status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1441{
1442    AutoMutex lock(mLock);
1443    status_t result = UNKNOWN_ERROR;
1444
1445#if 1
1446    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1447    // while we are accessing the cblk
1448    sp<IAudioTrack> audioTrack = mAudioTrack;
1449    sp<IMemory> iMem = mCblkMemory;
1450#endif
1451
1452    // If the track is not invalid already, try to allocate a buffer.  alloc
1453    // fails indicating that the server is dead, flag the track as invalid so
1454    // we can attempt to restore in just a bit.
1455    audio_track_cblk_t* cblk = mCblk;
1456    if (!(cblk->mFlags & CBLK_INVALID)) {
1457        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1458        if (result == DEAD_OBJECT) {
1459            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1460        }
1461    }
1462
1463    // If the track is invalid at this point, attempt to restore it. and try the
1464    // allocation one more time.
1465    if (cblk->mFlags & CBLK_INVALID) {
1466        result = restoreTrack_l("allocateTimedBuffer");
1467
1468        if (result == NO_ERROR) {
1469            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1470        }
1471    }
1472
1473    return result;
1474}
1475
1476status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1477                                           int64_t pts)
1478{
1479    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1480    {
1481        AutoMutex lock(mLock);
1482        audio_track_cblk_t* cblk = mCblk;
1483        // restart track if it was disabled by audioflinger due to previous underrun
1484        if (buffer->size() != 0 && status == NO_ERROR &&
1485                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1486            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1487            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1488            // FIXME ignoring status
1489            mAudioTrack->start();
1490        }
1491    }
1492    return status;
1493}
1494
1495status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1496                                                TargetTimeline target)
1497{
1498    return mAudioTrack->setMediaTimeTransform(xform, target);
1499}
1500
1501// -------------------------------------------------------------------------
1502
1503nsecs_t AudioTrack::processAudioBuffer()
1504{
1505    // Currently the AudioTrack thread is not created if there are no callbacks.
1506    // Would it ever make sense to run the thread, even without callbacks?
1507    // If so, then replace this by checks at each use for mCbf != NULL.
1508    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1509
1510    mLock.lock();
1511    if (mAwaitBoost) {
1512        mAwaitBoost = false;
1513        mLock.unlock();
1514        static const int32_t kMaxTries = 5;
1515        int32_t tryCounter = kMaxTries;
1516        uint32_t pollUs = 10000;
1517        do {
1518            int policy = sched_getscheduler(0);
1519            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1520                break;
1521            }
1522            usleep(pollUs);
1523            pollUs <<= 1;
1524        } while (tryCounter-- > 0);
1525        if (tryCounter < 0) {
1526            ALOGE("did not receive expected priority boost on time");
1527        }
1528        // Run again immediately
1529        return 0;
1530    }
1531
1532    // Can only reference mCblk while locked
1533    int32_t flags = android_atomic_and(
1534        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1535
1536    // Check for track invalidation
1537    if (flags & CBLK_INVALID) {
1538        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1539        // AudioSystem cache. We should not exit here but after calling the callback so
1540        // that the upper layers can recreate the track
1541        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1542            status_t status = restoreTrack_l("processAudioBuffer");
1543            mLock.unlock();
1544            // Run again immediately, but with a new IAudioTrack
1545            return 0;
1546        }
1547    }
1548
1549    bool waitStreamEnd = mState == STATE_STOPPING;
1550    bool active = mState == STATE_ACTIVE;
1551
1552    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1553    bool newUnderrun = false;
1554    if (flags & CBLK_UNDERRUN) {
1555#if 0
1556        // Currently in shared buffer mode, when the server reaches the end of buffer,
1557        // the track stays active in continuous underrun state.  It's up to the application
1558        // to pause or stop the track, or set the position to a new offset within buffer.
1559        // This was some experimental code to auto-pause on underrun.   Keeping it here
1560        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1561        if (mTransfer == TRANSFER_SHARED) {
1562            mState = STATE_PAUSED;
1563            active = false;
1564        }
1565#endif
1566        if (!mInUnderrun) {
1567            mInUnderrun = true;
1568            newUnderrun = true;
1569        }
1570    }
1571
1572    // Get current position of server
1573    size_t position = updateAndGetPosition_l();
1574
1575    // Manage marker callback
1576    bool markerReached = false;
1577    size_t markerPosition = mMarkerPosition;
1578    // FIXME fails for wraparound, need 64 bits
1579    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1580        mMarkerReached = markerReached = true;
1581    }
1582
1583    // Determine number of new position callback(s) that will be needed, while locked
1584    size_t newPosCount = 0;
1585    size_t newPosition = mNewPosition;
1586    size_t updatePeriod = mUpdatePeriod;
1587    // FIXME fails for wraparound, need 64 bits
1588    if (updatePeriod > 0 && position >= newPosition) {
1589        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1590        mNewPosition += updatePeriod * newPosCount;
1591    }
1592
1593    // Cache other fields that will be needed soon
1594    uint32_t loopPeriod = mLoopPeriod;
1595    uint32_t sampleRate = mSampleRate;
1596    uint32_t notificationFrames = mNotificationFramesAct;
1597    if (mRefreshRemaining) {
1598        mRefreshRemaining = false;
1599        mRemainingFrames = notificationFrames;
1600        mRetryOnPartialBuffer = false;
1601    }
1602    size_t misalignment = mProxy->getMisalignment();
1603    uint32_t sequence = mSequence;
1604    sp<AudioTrackClientProxy> proxy = mProxy;
1605
1606    // These fields don't need to be cached, because they are assigned only by set():
1607    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1608    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1609
1610    mLock.unlock();
1611
1612    if (waitStreamEnd) {
1613        struct timespec timeout;
1614        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1615        timeout.tv_nsec = 0;
1616
1617        status_t status = proxy->waitStreamEndDone(&timeout);
1618        switch (status) {
1619        case NO_ERROR:
1620        case DEAD_OBJECT:
1621        case TIMED_OUT:
1622            mCbf(EVENT_STREAM_END, mUserData, NULL);
1623            {
1624                AutoMutex lock(mLock);
1625                // The previously assigned value of waitStreamEnd is no longer valid,
1626                // since the mutex has been unlocked and either the callback handler
1627                // or another thread could have re-started the AudioTrack during that time.
1628                waitStreamEnd = mState == STATE_STOPPING;
1629                if (waitStreamEnd) {
1630                    mState = STATE_STOPPED;
1631                    mReleased = 0;
1632                }
1633            }
1634            if (waitStreamEnd && status != DEAD_OBJECT) {
1635               return NS_INACTIVE;
1636            }
1637            break;
1638        }
1639        return 0;
1640    }
1641
1642    // perform callbacks while unlocked
1643    if (newUnderrun) {
1644        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1645    }
1646    // FIXME we will miss loops if loop cycle was signaled several times since last call
1647    //       to processAudioBuffer()
1648    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1649        mCbf(EVENT_LOOP_END, mUserData, NULL);
1650    }
1651    if (flags & CBLK_BUFFER_END) {
1652        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1653    }
1654    if (markerReached) {
1655        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1656    }
1657    while (newPosCount > 0) {
1658        size_t temp = newPosition;
1659        mCbf(EVENT_NEW_POS, mUserData, &temp);
1660        newPosition += updatePeriod;
1661        newPosCount--;
1662    }
1663
1664    if (mObservedSequence != sequence) {
1665        mObservedSequence = sequence;
1666        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1667        // for offloaded tracks, just wait for the upper layers to recreate the track
1668        if (isOffloadedOrDirect()) {
1669            return NS_INACTIVE;
1670        }
1671    }
1672
1673    // if inactive, then don't run me again until re-started
1674    if (!active) {
1675        return NS_INACTIVE;
1676    }
1677
1678    // Compute the estimated time until the next timed event (position, markers, loops)
1679    // FIXME only for non-compressed audio
1680    uint32_t minFrames = ~0;
1681    if (!markerReached && position < markerPosition) {
1682        minFrames = markerPosition - position;
1683    }
1684    if (loopPeriod > 0 && loopPeriod < minFrames) {
1685        minFrames = loopPeriod;
1686    }
1687    if (updatePeriod > 0 && updatePeriod < minFrames) {
1688        minFrames = updatePeriod;
1689    }
1690
1691    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1692    static const uint32_t kPoll = 0;
1693    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1694        minFrames = kPoll * notificationFrames;
1695    }
1696
1697    // Convert frame units to time units
1698    nsecs_t ns = NS_WHENEVER;
1699    if (minFrames != (uint32_t) ~0) {
1700        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1701        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1702        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1703    }
1704
1705    // If not supplying data by EVENT_MORE_DATA, then we're done
1706    if (mTransfer != TRANSFER_CALLBACK) {
1707        return ns;
1708    }
1709
1710    struct timespec timeout;
1711    const struct timespec *requested = &ClientProxy::kForever;
1712    if (ns != NS_WHENEVER) {
1713        timeout.tv_sec = ns / 1000000000LL;
1714        timeout.tv_nsec = ns % 1000000000LL;
1715        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1716        requested = &timeout;
1717    }
1718
1719    while (mRemainingFrames > 0) {
1720
1721        Buffer audioBuffer;
1722        audioBuffer.frameCount = mRemainingFrames;
1723        size_t nonContig;
1724        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1725        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1726                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1727        requested = &ClientProxy::kNonBlocking;
1728        size_t avail = audioBuffer.frameCount + nonContig;
1729        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1730                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1731        if (err != NO_ERROR) {
1732            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1733                    (isOffloaded() && (err == DEAD_OBJECT))) {
1734                return 0;
1735            }
1736            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1737            return NS_NEVER;
1738        }
1739
1740        if (mRetryOnPartialBuffer && !isOffloaded()) {
1741            mRetryOnPartialBuffer = false;
1742            if (avail < mRemainingFrames) {
1743                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1744                if (ns < 0 || myns < ns) {
1745                    ns = myns;
1746                }
1747                return ns;
1748            }
1749        }
1750
1751        // Divide buffer size by 2 to take into account the expansion
1752        // due to 8 to 16 bit conversion: the callback must fill only half
1753        // of the destination buffer
1754        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1755            audioBuffer.size >>= 1;
1756        }
1757
1758        size_t reqSize = audioBuffer.size;
1759        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1760        size_t writtenSize = audioBuffer.size;
1761
1762        // Sanity check on returned size
1763        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1764            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1765                    reqSize, ssize_t(writtenSize));
1766            return NS_NEVER;
1767        }
1768
1769        if (writtenSize == 0) {
1770            // The callback is done filling buffers
1771            // Keep this thread going to handle timed events and
1772            // still try to get more data in intervals of WAIT_PERIOD_MS
1773            // but don't just loop and block the CPU, so wait
1774            return WAIT_PERIOD_MS * 1000000LL;
1775        }
1776
1777        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1778            // 8 to 16 bit conversion, note that source and destination are the same address
1779            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1780            audioBuffer.size <<= 1;
1781        }
1782
1783        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1784        audioBuffer.frameCount = releasedFrames;
1785        mRemainingFrames -= releasedFrames;
1786        if (misalignment >= releasedFrames) {
1787            misalignment -= releasedFrames;
1788        } else {
1789            misalignment = 0;
1790        }
1791
1792        releaseBuffer(&audioBuffer);
1793
1794        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1795        // if callback doesn't like to accept the full chunk
1796        if (writtenSize < reqSize) {
1797            continue;
1798        }
1799
1800        // There could be enough non-contiguous frames available to satisfy the remaining request
1801        if (mRemainingFrames <= nonContig) {
1802            continue;
1803        }
1804
1805#if 0
1806        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1807        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1808        // that total to a sum == notificationFrames.
1809        if (0 < misalignment && misalignment <= mRemainingFrames) {
1810            mRemainingFrames = misalignment;
1811            return (mRemainingFrames * 1100000000LL) / sampleRate;
1812        }
1813#endif
1814
1815    }
1816    mRemainingFrames = notificationFrames;
1817    mRetryOnPartialBuffer = true;
1818
1819    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1820    return 0;
1821}
1822
1823status_t AudioTrack::restoreTrack_l(const char *from)
1824{
1825    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1826          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1827    ++mSequence;
1828    status_t result;
1829
1830    // refresh the audio configuration cache in this process to make sure we get new
1831    // output parameters and new IAudioFlinger in createTrack_l()
1832    AudioSystem::clearAudioConfigCache();
1833
1834    if (isOffloadedOrDirect_l()) {
1835        // FIXME re-creation of offloaded tracks is not yet implemented
1836        return DEAD_OBJECT;
1837    }
1838
1839    // save the old static buffer position
1840    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1841
1842    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1843    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1844    // It will also delete the strong references on previous IAudioTrack and IMemory.
1845    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1846    result = createTrack_l();
1847
1848    // take the frames that will be lost by track recreation into account in saved position
1849    (void) updateAndGetPosition_l();
1850    mPosition = mReleased;
1851
1852    if (result == NO_ERROR) {
1853        // continue playback from last known position, but
1854        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1855        if (mStaticProxy != NULL) {
1856            mLoopPeriod = 0;
1857            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1858        }
1859        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1860        //       track destruction have been played? This is critical for SoundPool implementation
1861        //       This must be broken, and needs to be tested/debugged.
1862#if 0
1863        // restore write index and set other indexes to reflect empty buffer status
1864        if (!strcmp(from, "start")) {
1865            // Make sure that a client relying on callback events indicating underrun or
1866            // the actual amount of audio frames played (e.g SoundPool) receives them.
1867            if (mSharedBuffer == 0) {
1868                // restart playback even if buffer is not completely filled.
1869                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1870            }
1871        }
1872#endif
1873        if (mState == STATE_ACTIVE) {
1874            result = mAudioTrack->start();
1875        }
1876    }
1877    if (result != NO_ERROR) {
1878        ALOGW("restoreTrack_l() failed status %d", result);
1879        mState = STATE_STOPPED;
1880        mReleased = 0;
1881    }
1882
1883    return result;
1884}
1885
1886uint32_t AudioTrack::updateAndGetPosition_l()
1887{
1888    // This is the sole place to read server consumed frames
1889    uint32_t newServer = mProxy->getPosition();
1890    int32_t delta = newServer - mServer;
1891    mServer = newServer;
1892    // TODO There is controversy about whether there can be "negative jitter" in server position.
1893    //      This should be investigated further, and if possible, it should be addressed.
1894    //      A more definite failure mode is infrequent polling by client.
1895    //      One could call (void)getPosition_l() in releaseBuffer(),
1896    //      so mReleased and mPosition are always lock-step as best possible.
1897    //      That should ensure delta never goes negative for infrequent polling
1898    //      unless the server has more than 2^31 frames in its buffer,
1899    //      in which case the use of uint32_t for these counters has bigger issues.
1900    if (delta < 0) {
1901        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1902        delta = 0;
1903    }
1904    return mPosition += (uint32_t) delta;
1905}
1906
1907status_t AudioTrack::setParameters(const String8& keyValuePairs)
1908{
1909    AutoMutex lock(mLock);
1910    return mAudioTrack->setParameters(keyValuePairs);
1911}
1912
1913status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1914{
1915    AutoMutex lock(mLock);
1916    // FIXME not implemented for fast tracks; should use proxy and SSQ
1917    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1918        return INVALID_OPERATION;
1919    }
1920
1921    switch (mState) {
1922    case STATE_ACTIVE:
1923    case STATE_PAUSED:
1924        break; // handle below
1925    case STATE_FLUSHED:
1926    case STATE_STOPPED:
1927        return WOULD_BLOCK;
1928    case STATE_STOPPING:
1929    case STATE_PAUSED_STOPPING:
1930        if (!isOffloaded_l()) {
1931            return INVALID_OPERATION;
1932        }
1933        break; // offloaded tracks handled below
1934    default:
1935        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1936        break;
1937    }
1938
1939    // The presented frame count must always lag behind the consumed frame count.
1940    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1941    status_t status = mAudioTrack->getTimestamp(timestamp);
1942    if (status != NO_ERROR) {
1943        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1944        return status;
1945    }
1946    if (isOffloadedOrDirect_l()) {
1947        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1948            // use cached paused position in case another offloaded track is running.
1949            timestamp.mPosition = mPausedPosition;
1950            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1951            return NO_ERROR;
1952        }
1953
1954        // Check whether a pending flush or stop has completed, as those commands may
1955        // be asynchronous or return near finish.
1956        if (mStartUs != 0 && mSampleRate != 0) {
1957            static const int kTimeJitterUs = 100000; // 100 ms
1958            static const int k1SecUs = 1000000;
1959
1960            const int64_t timeNow = getNowUs();
1961
1962            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1963                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1964                if (timestampTimeUs < mStartUs) {
1965                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1966                }
1967                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1968                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1969
1970                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1971                    // Verify that the counter can't count faster than the sample rate
1972                    // since the start time.  If greater, then that means we have failed
1973                    // to completely flush or stop the previous playing track.
1974                    ALOGW("incomplete flush or stop:"
1975                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1976                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
1977                            timestamp.mPosition);
1978                    return WOULD_BLOCK;
1979                }
1980            }
1981            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
1982        }
1983    } else {
1984        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
1985        (void) updateAndGetPosition_l();
1986        // Server consumed (mServer) and presented both use the same server time base,
1987        // and server consumed is always >= presented.
1988        // The delta between these represents the number of frames in the buffer pipeline.
1989        // If this delta between these is greater than the client position, it means that
1990        // actually presented is still stuck at the starting line (figuratively speaking),
1991        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
1992        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
1993            return INVALID_OPERATION;
1994        }
1995        // Convert timestamp position from server time base to client time base.
1996        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
1997        // But if we change it to 64-bit then this could fail.
1998        // If (mPosition - mServer) can be negative then should use:
1999        //   (int32_t)(mPosition - mServer)
2000        timestamp.mPosition += mPosition - mServer;
2001        // Immediately after a call to getPosition_l(), mPosition and
2002        // mServer both represent the same frame position.  mPosition is
2003        // in client's point of view, and mServer is in server's point of
2004        // view.  So the difference between them is the "fudge factor"
2005        // between client and server views due to stop() and/or new
2006        // IAudioTrack.  And timestamp.mPosition is initially in server's
2007        // point of view, so we need to apply the same fudge factor to it.
2008    }
2009    return status;
2010}
2011
2012String8 AudioTrack::getParameters(const String8& keys)
2013{
2014    audio_io_handle_t output = getOutput();
2015    if (output != AUDIO_IO_HANDLE_NONE) {
2016        return AudioSystem::getParameters(output, keys);
2017    } else {
2018        return String8::empty();
2019    }
2020}
2021
2022bool AudioTrack::isOffloaded() const
2023{
2024    AutoMutex lock(mLock);
2025    return isOffloaded_l();
2026}
2027
2028bool AudioTrack::isDirect() const
2029{
2030    AutoMutex lock(mLock);
2031    return isDirect_l();
2032}
2033
2034bool AudioTrack::isOffloadedOrDirect() const
2035{
2036    AutoMutex lock(mLock);
2037    return isOffloadedOrDirect_l();
2038}
2039
2040
2041status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2042{
2043
2044    const size_t SIZE = 256;
2045    char buffer[SIZE];
2046    String8 result;
2047
2048    result.append(" AudioTrack::dump\n");
2049    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2050            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2051    result.append(buffer);
2052    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2053            mChannelCount, mFrameCount);
2054    result.append(buffer);
2055    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2056    result.append(buffer);
2057    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2058    result.append(buffer);
2059    ::write(fd, result.string(), result.size());
2060    return NO_ERROR;
2061}
2062
2063uint32_t AudioTrack::getUnderrunFrames() const
2064{
2065    AutoMutex lock(mLock);
2066    return mProxy->getUnderrunFrames();
2067}
2068
2069void AudioTrack::setAttributesFromStreamType(audio_stream_type_t streamType) {
2070    mAttributes.flags = 0x0;
2071
2072    switch(streamType) {
2073    case AUDIO_STREAM_DEFAULT:
2074    case AUDIO_STREAM_MUSIC:
2075        mAttributes.content_type = AUDIO_CONTENT_TYPE_MUSIC;
2076        mAttributes.usage = AUDIO_USAGE_MEDIA;
2077        break;
2078    case AUDIO_STREAM_VOICE_CALL:
2079        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2080        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
2081        break;
2082    case AUDIO_STREAM_ENFORCED_AUDIBLE:
2083        mAttributes.flags  |= AUDIO_FLAG_AUDIBILITY_ENFORCED;
2084        // intended fall through, attributes in common with STREAM_SYSTEM
2085    case AUDIO_STREAM_SYSTEM:
2086        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2087        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_SONIFICATION;
2088        break;
2089    case AUDIO_STREAM_RING:
2090        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2091        mAttributes.usage = AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE;
2092        break;
2093    case AUDIO_STREAM_ALARM:
2094        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2095        mAttributes.usage = AUDIO_USAGE_ALARM;
2096        break;
2097    case AUDIO_STREAM_NOTIFICATION:
2098        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2099        mAttributes.usage = AUDIO_USAGE_NOTIFICATION;
2100        break;
2101    case AUDIO_STREAM_BLUETOOTH_SCO:
2102        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2103        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION;
2104        mAttributes.flags |= AUDIO_FLAG_SCO;
2105        break;
2106    case AUDIO_STREAM_DTMF:
2107        mAttributes.content_type = AUDIO_CONTENT_TYPE_SONIFICATION;
2108        mAttributes.usage = AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING;
2109        break;
2110    case AUDIO_STREAM_TTS:
2111        mAttributes.content_type = AUDIO_CONTENT_TYPE_SPEECH;
2112        mAttributes.usage = AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY;
2113        break;
2114    default:
2115        ALOGE("invalid stream type %d when converting to attributes", streamType);
2116    }
2117}
2118
2119void AudioTrack::setStreamTypeFromAttributes(audio_attributes_t& aa) {
2120    // flags to stream type mapping
2121    if ((aa.flags & AUDIO_FLAG_AUDIBILITY_ENFORCED) == AUDIO_FLAG_AUDIBILITY_ENFORCED) {
2122        mStreamType = AUDIO_STREAM_ENFORCED_AUDIBLE;
2123        return;
2124    }
2125    if ((aa.flags & AUDIO_FLAG_SCO) == AUDIO_FLAG_SCO) {
2126        mStreamType = AUDIO_STREAM_BLUETOOTH_SCO;
2127        return;
2128    }
2129    // TODO once AudioPolicyManager fully supports audio_attributes_t,
2130    //   remove stream remap, the flag will be enough
2131    if ((aa.flags & AUDIO_FLAG_BEACON) == AUDIO_FLAG_BEACON) {
2132        mStreamType = AUDIO_STREAM_TTS;
2133        return;
2134    }
2135
2136    // usage to stream type mapping
2137    switch (aa.usage) {
2138    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY: {
2139        // TODO once AudioPolicyManager fully supports audio_attributes_t,
2140        //   remove stream change based on stream activity
2141        bool active;
2142        status_t status = AudioSystem::isStreamActive(AUDIO_STREAM_RING, &active, 0);
2143        if (status == NO_ERROR && active == true) {
2144            mStreamType = AUDIO_STREAM_RING;
2145            break;
2146        }
2147        status = AudioSystem::isStreamActive(AUDIO_STREAM_ALARM, &active, 0);
2148        if (status == NO_ERROR && active == true) {
2149            mStreamType = AUDIO_STREAM_ALARM;
2150            break;
2151        }
2152    }    /// FALL THROUGH
2153    case AUDIO_USAGE_MEDIA:
2154    case AUDIO_USAGE_GAME:
2155    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2156        mStreamType = AUDIO_STREAM_MUSIC;
2157        return;
2158    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2159        mStreamType = AUDIO_STREAM_SYSTEM;
2160        return;
2161    case AUDIO_USAGE_VOICE_COMMUNICATION:
2162        mStreamType = AUDIO_STREAM_VOICE_CALL;
2163        return;
2164
2165    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2166        mStreamType = AUDIO_STREAM_DTMF;
2167        return;
2168
2169    case AUDIO_USAGE_ALARM:
2170        mStreamType = AUDIO_STREAM_ALARM;
2171        return;
2172    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2173        mStreamType = AUDIO_STREAM_RING;
2174        return;
2175
2176    case AUDIO_USAGE_NOTIFICATION:
2177    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2178    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2179    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2180    case AUDIO_USAGE_NOTIFICATION_EVENT:
2181        mStreamType = AUDIO_STREAM_NOTIFICATION;
2182        return;
2183
2184    case AUDIO_USAGE_UNKNOWN:
2185    default:
2186        mStreamType = AUDIO_STREAM_MUSIC;
2187    }
2188}
2189
2190bool AudioTrack::isValidAttributes(const audio_attributes_t *paa) {
2191    // has flags that map to a strategy?
2192    if ((paa->flags & (AUDIO_FLAG_AUDIBILITY_ENFORCED | AUDIO_FLAG_SCO | AUDIO_FLAG_BEACON)) != 0) {
2193        return true;
2194    }
2195
2196    // has known usage?
2197    switch (paa->usage) {
2198    case AUDIO_USAGE_UNKNOWN:
2199    case AUDIO_USAGE_MEDIA:
2200    case AUDIO_USAGE_VOICE_COMMUNICATION:
2201    case AUDIO_USAGE_VOICE_COMMUNICATION_SIGNALLING:
2202    case AUDIO_USAGE_ALARM:
2203    case AUDIO_USAGE_NOTIFICATION:
2204    case AUDIO_USAGE_NOTIFICATION_TELEPHONY_RINGTONE:
2205    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_REQUEST:
2206    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_INSTANT:
2207    case AUDIO_USAGE_NOTIFICATION_COMMUNICATION_DELAYED:
2208    case AUDIO_USAGE_NOTIFICATION_EVENT:
2209    case AUDIO_USAGE_ASSISTANCE_ACCESSIBILITY:
2210    case AUDIO_USAGE_ASSISTANCE_NAVIGATION_GUIDANCE:
2211    case AUDIO_USAGE_ASSISTANCE_SONIFICATION:
2212    case AUDIO_USAGE_GAME:
2213        break;
2214    default:
2215        return false;
2216    }
2217    return true;
2218}
2219// =========================================================================
2220
2221void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2222{
2223    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2224    if (audioTrack != 0) {
2225        AutoMutex lock(audioTrack->mLock);
2226        audioTrack->mProxy->binderDied();
2227    }
2228}
2229
2230// =========================================================================
2231
2232AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2233    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2234      mIgnoreNextPausedInt(false)
2235{
2236}
2237
2238AudioTrack::AudioTrackThread::~AudioTrackThread()
2239{
2240}
2241
2242bool AudioTrack::AudioTrackThread::threadLoop()
2243{
2244    {
2245        AutoMutex _l(mMyLock);
2246        if (mPaused) {
2247            mMyCond.wait(mMyLock);
2248            // caller will check for exitPending()
2249            return true;
2250        }
2251        if (mIgnoreNextPausedInt) {
2252            mIgnoreNextPausedInt = false;
2253            mPausedInt = false;
2254        }
2255        if (mPausedInt) {
2256            if (mPausedNs > 0) {
2257                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2258            } else {
2259                mMyCond.wait(mMyLock);
2260            }
2261            mPausedInt = false;
2262            return true;
2263        }
2264    }
2265    if (exitPending()) {
2266        return false;
2267    }
2268    nsecs_t ns = mReceiver.processAudioBuffer();
2269    switch (ns) {
2270    case 0:
2271        return true;
2272    case NS_INACTIVE:
2273        pauseInternal();
2274        return true;
2275    case NS_NEVER:
2276        return false;
2277    case NS_WHENEVER:
2278        // FIXME increase poll interval, or make event-driven
2279        ns = 1000000000LL;
2280        // fall through
2281    default:
2282        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2283        pauseInternal(ns);
2284        return true;
2285    }
2286}
2287
2288void AudioTrack::AudioTrackThread::requestExit()
2289{
2290    // must be in this order to avoid a race condition
2291    Thread::requestExit();
2292    resume();
2293}
2294
2295void AudioTrack::AudioTrackThread::pause()
2296{
2297    AutoMutex _l(mMyLock);
2298    mPaused = true;
2299}
2300
2301void AudioTrack::AudioTrackThread::resume()
2302{
2303    AutoMutex _l(mMyLock);
2304    mIgnoreNextPausedInt = true;
2305    if (mPaused || mPausedInt) {
2306        mPaused = false;
2307        mPausedInt = false;
2308        mMyCond.signal();
2309    }
2310}
2311
2312void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2313{
2314    AutoMutex _l(mMyLock);
2315    mPausedInt = true;
2316    mPausedNs = ns;
2317}
2318
2319}; // namespace android
2320