AudioTrack.cpp revision ea38ee7742e799b23bd8675f5801ef72f94de0f4
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41// TODO: Move to a separate .h
42
43template <typename T>
44static inline const T &min(const T &x, const T &y) {
45    return x < y ? x : y;
46}
47
48template <typename T>
49static inline const T &max(const T &x, const T &y) {
50    return x > y ? x : y;
51}
52
53static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
54{
55    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
56}
57
58static int64_t convertTimespecToUs(const struct timespec &tv)
59{
60    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
61}
62
63// current monotonic time in microseconds.
64static int64_t getNowUs()
65{
66    struct timespec tv;
67    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
68    return convertTimespecToUs(tv);
69}
70
71// FIXME: we don't use the pitch setting in the time stretcher (not working);
72// instead we emulate it using our sample rate converter.
73static const bool kFixPitch = true; // enable pitch fix
74static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
75{
76    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
77}
78
79static inline float adjustSpeed(float speed, float pitch)
80{
81    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
82}
83
84static inline float adjustPitch(float pitch)
85{
86    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
87}
88
89// Must match similar computation in createTrack_l in Threads.cpp.
90// TODO: Move to a common library
91static size_t calculateMinFrameCount(
92        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
93        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
94{
95    // Ensure that buffer depth covers at least audio hardware latency
96    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
97    if (minBufCount < 2) {
98        minBufCount = 2;
99    }
100#if 0
101    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
102    // but keeping the code here to make it easier to add later.
103    if (minBufCount < notificationsPerBufferReq) {
104        minBufCount = notificationsPerBufferReq;
105    }
106#endif
107    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
108            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
109            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
110            /*, notificationsPerBufferReq*/);
111    return minBufCount * sourceFramesNeededWithTimestretch(
112            sampleRate, afFrameCount, afSampleRate, speed);
113}
114
115// static
116status_t AudioTrack::getMinFrameCount(
117        size_t* frameCount,
118        audio_stream_type_t streamType,
119        uint32_t sampleRate)
120{
121    if (frameCount == NULL) {
122        return BAD_VALUE;
123    }
124
125    // FIXME handle in server, like createTrack_l(), possible missing info:
126    //          audio_io_handle_t output
127    //          audio_format_t format
128    //          audio_channel_mask_t channelMask
129    //          audio_output_flags_t flags (FAST)
130    uint32_t afSampleRate;
131    status_t status;
132    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
133    if (status != NO_ERROR) {
134        ALOGE("Unable to query output sample rate for stream type %d; status %d",
135                streamType, status);
136        return status;
137    }
138    size_t afFrameCount;
139    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
140    if (status != NO_ERROR) {
141        ALOGE("Unable to query output frame count for stream type %d; status %d",
142                streamType, status);
143        return status;
144    }
145    uint32_t afLatency;
146    status = AudioSystem::getOutputLatency(&afLatency, streamType);
147    if (status != NO_ERROR) {
148        ALOGE("Unable to query output latency for stream type %d; status %d",
149                streamType, status);
150        return status;
151    }
152
153    // When called from createTrack, speed is 1.0f (normal speed).
154    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
155    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
156            /*, 0 notificationsPerBufferReq*/);
157
158    // The formula above should always produce a non-zero value under normal circumstances:
159    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
160    // Return error in the unlikely event that it does not, as that's part of the API contract.
161    if (*frameCount == 0) {
162        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
163                streamType, sampleRate);
164        return BAD_VALUE;
165    }
166    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
167            *frameCount, afFrameCount, afSampleRate, afLatency);
168    return NO_ERROR;
169}
170
171// ---------------------------------------------------------------------------
172
173AudioTrack::AudioTrack()
174    : mStatus(NO_INIT),
175      mState(STATE_STOPPED),
176      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
177      mPreviousSchedulingGroup(SP_DEFAULT),
178      mPausedPosition(0),
179      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
180{
181    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
182    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
183    mAttributes.flags = 0x0;
184    strcpy(mAttributes.tags, "");
185}
186
187AudioTrack::AudioTrack(
188        audio_stream_type_t streamType,
189        uint32_t sampleRate,
190        audio_format_t format,
191        audio_channel_mask_t channelMask,
192        size_t frameCount,
193        audio_output_flags_t flags,
194        callback_t cbf,
195        void* user,
196        int32_t notificationFrames,
197        audio_session_t sessionId,
198        transfer_type transferType,
199        const audio_offload_info_t *offloadInfo,
200        int uid,
201        pid_t pid,
202        const audio_attributes_t* pAttributes,
203        bool doNotReconnect,
204        float maxRequiredSpeed)
205    : mStatus(NO_INIT),
206      mState(STATE_STOPPED),
207      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
208      mPreviousSchedulingGroup(SP_DEFAULT),
209      mPausedPosition(0),
210      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
211{
212    mStatus = set(streamType, sampleRate, format, channelMask,
213            frameCount, flags, cbf, user, notificationFrames,
214            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
215            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
216}
217
218AudioTrack::AudioTrack(
219        audio_stream_type_t streamType,
220        uint32_t sampleRate,
221        audio_format_t format,
222        audio_channel_mask_t channelMask,
223        const sp<IMemory>& sharedBuffer,
224        audio_output_flags_t flags,
225        callback_t cbf,
226        void* user,
227        int32_t notificationFrames,
228        audio_session_t sessionId,
229        transfer_type transferType,
230        const audio_offload_info_t *offloadInfo,
231        int uid,
232        pid_t pid,
233        const audio_attributes_t* pAttributes,
234        bool doNotReconnect,
235        float maxRequiredSpeed)
236    : mStatus(NO_INIT),
237      mState(STATE_STOPPED),
238      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
239      mPreviousSchedulingGroup(SP_DEFAULT),
240      mPausedPosition(0),
241      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
242{
243    mStatus = set(streamType, sampleRate, format, channelMask,
244            0 /*frameCount*/, flags, cbf, user, notificationFrames,
245            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
246            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
247}
248
249AudioTrack::~AudioTrack()
250{
251    if (mStatus == NO_ERROR) {
252        // Make sure that callback function exits in the case where
253        // it is looping on buffer full condition in obtainBuffer().
254        // Otherwise the callback thread will never exit.
255        stop();
256        if (mAudioTrackThread != 0) {
257            mProxy->interrupt();
258            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
259            mAudioTrackThread->requestExitAndWait();
260            mAudioTrackThread.clear();
261        }
262        // No lock here: worst case we remove a NULL callback which will be a nop
263        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
264            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
265        }
266        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
267        mAudioTrack.clear();
268        mCblkMemory.clear();
269        mSharedBuffer.clear();
270        IPCThreadState::self()->flushCommands();
271        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
272                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
273        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
274    }
275}
276
277status_t AudioTrack::set(
278        audio_stream_type_t streamType,
279        uint32_t sampleRate,
280        audio_format_t format,
281        audio_channel_mask_t channelMask,
282        size_t frameCount,
283        audio_output_flags_t flags,
284        callback_t cbf,
285        void* user,
286        int32_t notificationFrames,
287        const sp<IMemory>& sharedBuffer,
288        bool threadCanCallJava,
289        audio_session_t sessionId,
290        transfer_type transferType,
291        const audio_offload_info_t *offloadInfo,
292        int uid,
293        pid_t pid,
294        const audio_attributes_t* pAttributes,
295        bool doNotReconnect,
296        float maxRequiredSpeed)
297{
298    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
299          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
300          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
301          sessionId, transferType, uid, pid);
302
303    mThreadCanCallJava = threadCanCallJava;
304
305    switch (transferType) {
306    case TRANSFER_DEFAULT:
307        if (sharedBuffer != 0) {
308            transferType = TRANSFER_SHARED;
309        } else if (cbf == NULL || threadCanCallJava) {
310            transferType = TRANSFER_SYNC;
311        } else {
312            transferType = TRANSFER_CALLBACK;
313        }
314        break;
315    case TRANSFER_CALLBACK:
316        if (cbf == NULL || sharedBuffer != 0) {
317            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
318            return BAD_VALUE;
319        }
320        break;
321    case TRANSFER_OBTAIN:
322    case TRANSFER_SYNC:
323        if (sharedBuffer != 0) {
324            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
325            return BAD_VALUE;
326        }
327        break;
328    case TRANSFER_SHARED:
329        if (sharedBuffer == 0) {
330            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
331            return BAD_VALUE;
332        }
333        break;
334    default:
335        ALOGE("Invalid transfer type %d", transferType);
336        return BAD_VALUE;
337    }
338    mSharedBuffer = sharedBuffer;
339    mTransfer = transferType;
340    mDoNotReconnect = doNotReconnect;
341
342    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
343            sharedBuffer->size());
344
345    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
346
347    // invariant that mAudioTrack != 0 is true only after set() returns successfully
348    if (mAudioTrack != 0) {
349        ALOGE("Track already in use");
350        return INVALID_OPERATION;
351    }
352
353    // handle default values first.
354    if (streamType == AUDIO_STREAM_DEFAULT) {
355        streamType = AUDIO_STREAM_MUSIC;
356    }
357    if (pAttributes == NULL) {
358        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
359            ALOGE("Invalid stream type %d", streamType);
360            return BAD_VALUE;
361        }
362        mStreamType = streamType;
363
364    } else {
365        // stream type shouldn't be looked at, this track has audio attributes
366        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
367        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
368                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
369        mStreamType = AUDIO_STREAM_DEFAULT;
370        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
371            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
372        }
373        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
374            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
375        }
376    }
377
378    // these below should probably come from the audioFlinger too...
379    if (format == AUDIO_FORMAT_DEFAULT) {
380        format = AUDIO_FORMAT_PCM_16_BIT;
381    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
382        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
383    }
384
385    // validate parameters
386    if (!audio_is_valid_format(format)) {
387        ALOGE("Invalid format %#x", format);
388        return BAD_VALUE;
389    }
390    mFormat = format;
391
392    if (!audio_is_output_channel(channelMask)) {
393        ALOGE("Invalid channel mask %#x", channelMask);
394        return BAD_VALUE;
395    }
396    mChannelMask = channelMask;
397    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
398    mChannelCount = channelCount;
399
400    // force direct flag if format is not linear PCM
401    // or offload was requested
402    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
403            || !audio_is_linear_pcm(format)) {
404        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
405                    ? "Offload request, forcing to Direct Output"
406                    : "Not linear PCM, forcing to Direct Output");
407        flags = (audio_output_flags_t)
408                // FIXME why can't we allow direct AND fast?
409                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
410    }
411
412    // force direct flag if HW A/V sync requested
413    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
414        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
415    }
416
417    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
418        if (audio_has_proportional_frames(format)) {
419            mFrameSize = channelCount * audio_bytes_per_sample(format);
420        } else {
421            mFrameSize = sizeof(uint8_t);
422        }
423    } else {
424        ALOG_ASSERT(audio_has_proportional_frames(format));
425        mFrameSize = channelCount * audio_bytes_per_sample(format);
426        // createTrack will return an error if PCM format is not supported by server,
427        // so no need to check for specific PCM formats here
428    }
429
430    // sampling rate must be specified for direct outputs
431    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
432        return BAD_VALUE;
433    }
434    mSampleRate = sampleRate;
435    mOriginalSampleRate = sampleRate;
436    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
437    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
438    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
439
440    // Make copy of input parameter offloadInfo so that in the future:
441    //  (a) createTrack_l doesn't need it as an input parameter
442    //  (b) we can support re-creation of offloaded tracks
443    if (offloadInfo != NULL) {
444        mOffloadInfoCopy = *offloadInfo;
445        mOffloadInfo = &mOffloadInfoCopy;
446    } else {
447        mOffloadInfo = NULL;
448    }
449
450    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
451    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
452    mSendLevel = 0.0f;
453    // mFrameCount is initialized in createTrack_l
454    mReqFrameCount = frameCount;
455    if (notificationFrames >= 0) {
456        mNotificationFramesReq = notificationFrames;
457        mNotificationsPerBufferReq = 0;
458    } else {
459        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
460            ALOGE("notificationFrames=%d not permitted for non-fast track",
461                    notificationFrames);
462            return BAD_VALUE;
463        }
464        if (frameCount > 0) {
465            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
466                    notificationFrames, frameCount);
467            return BAD_VALUE;
468        }
469        mNotificationFramesReq = 0;
470        const uint32_t minNotificationsPerBuffer = 1;
471        const uint32_t maxNotificationsPerBuffer = 8;
472        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
473                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
474        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
475                "notificationFrames=%d clamped to the range -%u to -%u",
476                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
477    }
478    mNotificationFramesAct = 0;
479    if (sessionId == AUDIO_SESSION_ALLOCATE) {
480        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
481    } else {
482        mSessionId = sessionId;
483    }
484    int callingpid = IPCThreadState::self()->getCallingPid();
485    int mypid = getpid();
486    if (uid == -1 || (callingpid != mypid)) {
487        mClientUid = IPCThreadState::self()->getCallingUid();
488    } else {
489        mClientUid = uid;
490    }
491    if (pid == -1 || (callingpid != mypid)) {
492        mClientPid = callingpid;
493    } else {
494        mClientPid = pid;
495    }
496    mAuxEffectId = 0;
497    mOrigFlags = mFlags = flags;
498    mCbf = cbf;
499
500    if (cbf != NULL) {
501        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
502        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
503        // thread begins in paused state, and will not reference us until start()
504    }
505
506    // create the IAudioTrack
507    status_t status = createTrack_l();
508
509    if (status != NO_ERROR) {
510        if (mAudioTrackThread != 0) {
511            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
512            mAudioTrackThread->requestExitAndWait();
513            mAudioTrackThread.clear();
514        }
515        return status;
516    }
517
518    mStatus = NO_ERROR;
519    mUserData = user;
520    mLoopCount = 0;
521    mLoopStart = 0;
522    mLoopEnd = 0;
523    mLoopCountNotified = 0;
524    mMarkerPosition = 0;
525    mMarkerReached = false;
526    mNewPosition = 0;
527    mUpdatePeriod = 0;
528    mPosition = 0;
529    mReleased = 0;
530    mStartUs = 0;
531    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
532    mSequence = 1;
533    mObservedSequence = mSequence;
534    mInUnderrun = false;
535    mPreviousTimestampValid = false;
536    mTimestampStartupGlitchReported = false;
537    mRetrogradeMotionReported = false;
538    mUnderrunCountOffset = 0;
539    mFramesWritten = 0;
540    mFramesWrittenServerOffset = 0;
541
542    return NO_ERROR;
543}
544
545// -------------------------------------------------------------------------
546
547status_t AudioTrack::start()
548{
549    AutoMutex lock(mLock);
550
551    if (mState == STATE_ACTIVE) {
552        return INVALID_OPERATION;
553    }
554
555    mInUnderrun = true;
556
557    State previousState = mState;
558    if (previousState == STATE_PAUSED_STOPPING) {
559        mState = STATE_STOPPING;
560    } else {
561        mState = STATE_ACTIVE;
562    }
563    (void) updateAndGetPosition_l();
564    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
565        // reset current position as seen by client to 0
566        mPosition = 0;
567        mPreviousTimestampValid = false;
568        mTimestampStartupGlitchReported = false;
569        mRetrogradeMotionReported = false;
570
571        // read last server side position change via timestamp.
572        ExtendedTimestamp ets;
573        if (mProxy->getTimestamp(&ets) == OK &&
574                ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
575            // Server side has consumed something, but is it finished consuming?
576            // It is possible since flush and stop are asynchronous that the server
577            // is still active at this point.
578            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
579                    (long long)(mFramesWrittenServerOffset
580                            + ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
581                    (long long)ets.mFlushed,
582                    (long long)mFramesWritten);
583            mFramesWrittenServerOffset = -ets.mPosition[ExtendedTimestamp::LOCATION_SERVER];
584        }
585        mFramesWritten = 0;
586        mProxy->clearTimestamp(); // need new server push for valid timestamp
587        mMarkerReached = false;
588
589        // For offloaded tracks, we don't know if the hardware counters are really zero here,
590        // since the flush is asynchronous and stop may not fully drain.
591        // We save the time when the track is started to later verify whether
592        // the counters are realistic (i.e. start from zero after this time).
593        mStartUs = getNowUs();
594
595        // force refresh of remaining frames by processAudioBuffer() as last
596        // write before stop could be partial.
597        mRefreshRemaining = true;
598    }
599    mNewPosition = mPosition + mUpdatePeriod;
600    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
601
602    status_t status = NO_ERROR;
603    if (!(flags & CBLK_INVALID)) {
604        status = mAudioTrack->start();
605        if (status == DEAD_OBJECT) {
606            flags |= CBLK_INVALID;
607        }
608    }
609    if (flags & CBLK_INVALID) {
610        status = restoreTrack_l("start");
611    }
612
613    // resume or pause the callback thread as needed.
614    sp<AudioTrackThread> t = mAudioTrackThread;
615    if (status == NO_ERROR) {
616        if (t != 0) {
617            if (previousState == STATE_STOPPING) {
618                mProxy->interrupt();
619            } else {
620                t->resume();
621            }
622        } else {
623            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
624            get_sched_policy(0, &mPreviousSchedulingGroup);
625            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
626        }
627    } else {
628        ALOGE("start() status %d", status);
629        mState = previousState;
630        if (t != 0) {
631            if (previousState != STATE_STOPPING) {
632                t->pause();
633            }
634        } else {
635            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
636            set_sched_policy(0, mPreviousSchedulingGroup);
637        }
638    }
639
640    return status;
641}
642
643void AudioTrack::stop()
644{
645    AutoMutex lock(mLock);
646    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
647        return;
648    }
649
650    if (isOffloaded_l()) {
651        mState = STATE_STOPPING;
652    } else {
653        mState = STATE_STOPPED;
654        mReleased = 0;
655    }
656
657    mProxy->interrupt();
658    mAudioTrack->stop();
659
660    // Note: legacy handling - stop does not clear playback marker
661    // and periodic update counter, but flush does for streaming tracks.
662
663    if (mSharedBuffer != 0) {
664        // clear buffer position and loop count.
665        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
666                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
667    }
668
669    sp<AudioTrackThread> t = mAudioTrackThread;
670    if (t != 0) {
671        if (!isOffloaded_l()) {
672            t->pause();
673        }
674    } else {
675        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
676        set_sched_policy(0, mPreviousSchedulingGroup);
677    }
678}
679
680bool AudioTrack::stopped() const
681{
682    AutoMutex lock(mLock);
683    return mState != STATE_ACTIVE;
684}
685
686void AudioTrack::flush()
687{
688    if (mSharedBuffer != 0) {
689        return;
690    }
691    AutoMutex lock(mLock);
692    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
693        return;
694    }
695    flush_l();
696}
697
698void AudioTrack::flush_l()
699{
700    ALOG_ASSERT(mState != STATE_ACTIVE);
701
702    // clear playback marker and periodic update counter
703    mMarkerPosition = 0;
704    mMarkerReached = false;
705    mUpdatePeriod = 0;
706    mRefreshRemaining = true;
707
708    mState = STATE_FLUSHED;
709    mReleased = 0;
710    if (isOffloaded_l()) {
711        mProxy->interrupt();
712    }
713    mProxy->flush();
714    mAudioTrack->flush();
715}
716
717void AudioTrack::pause()
718{
719    AutoMutex lock(mLock);
720    if (mState == STATE_ACTIVE) {
721        mState = STATE_PAUSED;
722    } else if (mState == STATE_STOPPING) {
723        mState = STATE_PAUSED_STOPPING;
724    } else {
725        return;
726    }
727    mProxy->interrupt();
728    mAudioTrack->pause();
729
730    if (isOffloaded_l()) {
731        if (mOutput != AUDIO_IO_HANDLE_NONE) {
732            // An offload output can be re-used between two audio tracks having
733            // the same configuration. A timestamp query for a paused track
734            // while the other is running would return an incorrect time.
735            // To fix this, cache the playback position on a pause() and return
736            // this time when requested until the track is resumed.
737
738            // OffloadThread sends HAL pause in its threadLoop. Time saved
739            // here can be slightly off.
740
741            // TODO: check return code for getRenderPosition.
742
743            uint32_t halFrames;
744            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
745            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
746        }
747    }
748}
749
750status_t AudioTrack::setVolume(float left, float right)
751{
752    // This duplicates a test by AudioTrack JNI, but that is not the only caller
753    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
754            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
755        return BAD_VALUE;
756    }
757
758    AutoMutex lock(mLock);
759    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
760    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
761
762    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
763
764    if (isOffloaded_l()) {
765        mAudioTrack->signal();
766    }
767    return NO_ERROR;
768}
769
770status_t AudioTrack::setVolume(float volume)
771{
772    return setVolume(volume, volume);
773}
774
775status_t AudioTrack::setAuxEffectSendLevel(float level)
776{
777    // This duplicates a test by AudioTrack JNI, but that is not the only caller
778    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
779        return BAD_VALUE;
780    }
781
782    AutoMutex lock(mLock);
783    mSendLevel = level;
784    mProxy->setSendLevel(level);
785
786    return NO_ERROR;
787}
788
789void AudioTrack::getAuxEffectSendLevel(float* level) const
790{
791    if (level != NULL) {
792        *level = mSendLevel;
793    }
794}
795
796status_t AudioTrack::setSampleRate(uint32_t rate)
797{
798    AutoMutex lock(mLock);
799    if (rate == mSampleRate) {
800        return NO_ERROR;
801    }
802    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
803        return INVALID_OPERATION;
804    }
805    if (mOutput == AUDIO_IO_HANDLE_NONE) {
806        return NO_INIT;
807    }
808    // NOTE: it is theoretically possible, but highly unlikely, that a device change
809    // could mean a previously allowed sampling rate is no longer allowed.
810    uint32_t afSamplingRate;
811    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
812        return NO_INIT;
813    }
814    // pitch is emulated by adjusting speed and sampleRate
815    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
816    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
817        return BAD_VALUE;
818    }
819    // TODO: Should we also check if the buffer size is compatible?
820
821    mSampleRate = rate;
822    mProxy->setSampleRate(effectiveSampleRate);
823
824    return NO_ERROR;
825}
826
827uint32_t AudioTrack::getSampleRate() const
828{
829    AutoMutex lock(mLock);
830
831    // sample rate can be updated during playback by the offloaded decoder so we need to
832    // query the HAL and update if needed.
833// FIXME use Proxy return channel to update the rate from server and avoid polling here
834    if (isOffloadedOrDirect_l()) {
835        if (mOutput != AUDIO_IO_HANDLE_NONE) {
836            uint32_t sampleRate = 0;
837            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
838            if (status == NO_ERROR) {
839                mSampleRate = sampleRate;
840            }
841        }
842    }
843    return mSampleRate;
844}
845
846uint32_t AudioTrack::getOriginalSampleRate() const
847{
848    return mOriginalSampleRate;
849}
850
851status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
852{
853    AutoMutex lock(mLock);
854    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
855        return NO_ERROR;
856    }
857    if (isOffloadedOrDirect_l()) {
858        return INVALID_OPERATION;
859    }
860    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
861        return INVALID_OPERATION;
862    }
863
864    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
865            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
866    // pitch is emulated by adjusting speed and sampleRate
867    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
868    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
869    const float effectivePitch = adjustPitch(playbackRate.mPitch);
870    AudioPlaybackRate playbackRateTemp = playbackRate;
871    playbackRateTemp.mSpeed = effectiveSpeed;
872    playbackRateTemp.mPitch = effectivePitch;
873
874    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
875            effectiveRate, effectiveSpeed, effectivePitch);
876
877    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
878        ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
879                playbackRate.mSpeed, playbackRate.mPitch);
880        return BAD_VALUE;
881    }
882    // Check if the buffer size is compatible.
883    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
884        ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
885                playbackRate.mSpeed, playbackRate.mPitch);
886        return BAD_VALUE;
887    }
888
889    // Check resampler ratios are within bounds
890    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
891        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
892                playbackRate.mSpeed, playbackRate.mPitch);
893        return BAD_VALUE;
894    }
895
896    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
897        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
898                        playbackRate.mSpeed, playbackRate.mPitch);
899        return BAD_VALUE;
900    }
901    mPlaybackRate = playbackRate;
902    //set effective rates
903    mProxy->setPlaybackRate(playbackRateTemp);
904    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
905    return NO_ERROR;
906}
907
908const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
909{
910    AutoMutex lock(mLock);
911    return mPlaybackRate;
912}
913
914ssize_t AudioTrack::getBufferSizeInFrames()
915{
916    AutoMutex lock(mLock);
917    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
918        return NO_INIT;
919    }
920    return (ssize_t) mProxy->getBufferSizeInFrames();
921}
922
923status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
924{
925    if (duration == nullptr) {
926        return BAD_VALUE;
927    }
928    AutoMutex lock(mLock);
929    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
930        return NO_INIT;
931    }
932    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
933    if (bufferSizeInFrames < 0) {
934        return (status_t)bufferSizeInFrames;
935    }
936    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
937            / ((double)mSampleRate * mPlaybackRate.mSpeed));
938    return NO_ERROR;
939}
940
941ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
942{
943    AutoMutex lock(mLock);
944    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
945        return NO_INIT;
946    }
947    // Reject if timed track or compressed audio.
948    if (!audio_is_linear_pcm(mFormat)) {
949        return INVALID_OPERATION;
950    }
951    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
952}
953
954status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
955{
956    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
957        return INVALID_OPERATION;
958    }
959
960    if (loopCount == 0) {
961        ;
962    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
963            loopEnd - loopStart >= MIN_LOOP) {
964        ;
965    } else {
966        return BAD_VALUE;
967    }
968
969    AutoMutex lock(mLock);
970    // See setPosition() regarding setting parameters such as loop points or position while active
971    if (mState == STATE_ACTIVE) {
972        return INVALID_OPERATION;
973    }
974    setLoop_l(loopStart, loopEnd, loopCount);
975    return NO_ERROR;
976}
977
978void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
979{
980    // We do not update the periodic notification point.
981    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
982    mLoopCount = loopCount;
983    mLoopEnd = loopEnd;
984    mLoopStart = loopStart;
985    mLoopCountNotified = loopCount;
986    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
987
988    // Waking the AudioTrackThread is not needed as this cannot be called when active.
989}
990
991status_t AudioTrack::setMarkerPosition(uint32_t marker)
992{
993    // The only purpose of setting marker position is to get a callback
994    if (mCbf == NULL || isOffloadedOrDirect()) {
995        return INVALID_OPERATION;
996    }
997
998    AutoMutex lock(mLock);
999    mMarkerPosition = marker;
1000    mMarkerReached = false;
1001
1002    sp<AudioTrackThread> t = mAudioTrackThread;
1003    if (t != 0) {
1004        t->wake();
1005    }
1006    return NO_ERROR;
1007}
1008
1009status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1010{
1011    if (isOffloadedOrDirect()) {
1012        return INVALID_OPERATION;
1013    }
1014    if (marker == NULL) {
1015        return BAD_VALUE;
1016    }
1017
1018    AutoMutex lock(mLock);
1019    mMarkerPosition.getValue(marker);
1020
1021    return NO_ERROR;
1022}
1023
1024status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1025{
1026    // The only purpose of setting position update period is to get a callback
1027    if (mCbf == NULL || isOffloadedOrDirect()) {
1028        return INVALID_OPERATION;
1029    }
1030
1031    AutoMutex lock(mLock);
1032    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1033    mUpdatePeriod = updatePeriod;
1034
1035    sp<AudioTrackThread> t = mAudioTrackThread;
1036    if (t != 0) {
1037        t->wake();
1038    }
1039    return NO_ERROR;
1040}
1041
1042status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1043{
1044    if (isOffloadedOrDirect()) {
1045        return INVALID_OPERATION;
1046    }
1047    if (updatePeriod == NULL) {
1048        return BAD_VALUE;
1049    }
1050
1051    AutoMutex lock(mLock);
1052    *updatePeriod = mUpdatePeriod;
1053
1054    return NO_ERROR;
1055}
1056
1057status_t AudioTrack::setPosition(uint32_t position)
1058{
1059    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1060        return INVALID_OPERATION;
1061    }
1062    if (position > mFrameCount) {
1063        return BAD_VALUE;
1064    }
1065
1066    AutoMutex lock(mLock);
1067    // Currently we require that the player is inactive before setting parameters such as position
1068    // or loop points.  Otherwise, there could be a race condition: the application could read the
1069    // current position, compute a new position or loop parameters, and then set that position or
1070    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1071    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1072    // to specify how it wants to handle such scenarios.
1073    if (mState == STATE_ACTIVE) {
1074        return INVALID_OPERATION;
1075    }
1076    // After setting the position, use full update period before notification.
1077    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1078    mStaticProxy->setBufferPosition(position);
1079
1080    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1081    return NO_ERROR;
1082}
1083
1084status_t AudioTrack::getPosition(uint32_t *position)
1085{
1086    if (position == NULL) {
1087        return BAD_VALUE;
1088    }
1089
1090    AutoMutex lock(mLock);
1091    // FIXME: offloaded and direct tracks call into the HAL for render positions
1092    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1093    // as we do not know the capability of the HAL for pcm position support and standby.
1094    // There may be some latency differences between the HAL position and the proxy position.
1095    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1096        uint32_t dspFrames = 0;
1097
1098        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1099            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1100            *position = mPausedPosition;
1101            return NO_ERROR;
1102        }
1103
1104        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1105            uint32_t halFrames; // actually unused
1106            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1107            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1108        }
1109        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1110        // due to hardware latency. We leave this behavior for now.
1111        *position = dspFrames;
1112    } else {
1113        if (mCblk->mFlags & CBLK_INVALID) {
1114            (void) restoreTrack_l("getPosition");
1115            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1116            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1117        }
1118
1119        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1120        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1121                0 : updateAndGetPosition_l().value();
1122    }
1123    return NO_ERROR;
1124}
1125
1126status_t AudioTrack::getBufferPosition(uint32_t *position)
1127{
1128    if (mSharedBuffer == 0) {
1129        return INVALID_OPERATION;
1130    }
1131    if (position == NULL) {
1132        return BAD_VALUE;
1133    }
1134
1135    AutoMutex lock(mLock);
1136    *position = mStaticProxy->getBufferPosition();
1137    return NO_ERROR;
1138}
1139
1140status_t AudioTrack::reload()
1141{
1142    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1143        return INVALID_OPERATION;
1144    }
1145
1146    AutoMutex lock(mLock);
1147    // See setPosition() regarding setting parameters such as loop points or position while active
1148    if (mState == STATE_ACTIVE) {
1149        return INVALID_OPERATION;
1150    }
1151    mNewPosition = mUpdatePeriod;
1152    (void) updateAndGetPosition_l();
1153    mPosition = 0;
1154    mPreviousTimestampValid = false;
1155#if 0
1156    // The documentation is not clear on the behavior of reload() and the restoration
1157    // of loop count. Historically we have not restored loop count, start, end,
1158    // but it makes sense if one desires to repeat playing a particular sound.
1159    if (mLoopCount != 0) {
1160        mLoopCountNotified = mLoopCount;
1161        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1162    }
1163#endif
1164    mStaticProxy->setBufferPosition(0);
1165    return NO_ERROR;
1166}
1167
1168audio_io_handle_t AudioTrack::getOutput() const
1169{
1170    AutoMutex lock(mLock);
1171    return mOutput;
1172}
1173
1174status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1175    AutoMutex lock(mLock);
1176    if (mSelectedDeviceId != deviceId) {
1177        mSelectedDeviceId = deviceId;
1178        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1179    }
1180    return NO_ERROR;
1181}
1182
1183audio_port_handle_t AudioTrack::getOutputDevice() {
1184    AutoMutex lock(mLock);
1185    return mSelectedDeviceId;
1186}
1187
1188audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1189    AutoMutex lock(mLock);
1190    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1191        return AUDIO_PORT_HANDLE_NONE;
1192    }
1193    return AudioSystem::getDeviceIdForIo(mOutput);
1194}
1195
1196status_t AudioTrack::attachAuxEffect(int effectId)
1197{
1198    AutoMutex lock(mLock);
1199    status_t status = mAudioTrack->attachAuxEffect(effectId);
1200    if (status == NO_ERROR) {
1201        mAuxEffectId = effectId;
1202    }
1203    return status;
1204}
1205
1206audio_stream_type_t AudioTrack::streamType() const
1207{
1208    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1209        return audio_attributes_to_stream_type(&mAttributes);
1210    }
1211    return mStreamType;
1212}
1213
1214// -------------------------------------------------------------------------
1215
1216// must be called with mLock held
1217status_t AudioTrack::createTrack_l()
1218{
1219    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1220    if (audioFlinger == 0) {
1221        ALOGE("Could not get audioflinger");
1222        return NO_INIT;
1223    }
1224
1225    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1226        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1227    }
1228    audio_io_handle_t output;
1229    audio_stream_type_t streamType = mStreamType;
1230    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1231
1232    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1233    // After fast request is denied, we will request again if IAudioTrack is re-created.
1234
1235    status_t status;
1236    status = AudioSystem::getOutputForAttr(attr, &output,
1237                                           mSessionId, &streamType, mClientUid,
1238                                           mSampleRate, mFormat, mChannelMask,
1239                                           mFlags, mSelectedDeviceId, mOffloadInfo);
1240
1241    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1242        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1243              " channel mask %#x, flags %#x",
1244              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1245        return BAD_VALUE;
1246    }
1247    {
1248    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1249    // we must release it ourselves if anything goes wrong.
1250
1251    // Not all of these values are needed under all conditions, but it is easier to get them all
1252    status = AudioSystem::getLatency(output, &mAfLatency);
1253    if (status != NO_ERROR) {
1254        ALOGE("getLatency(%d) failed status %d", output, status);
1255        goto release;
1256    }
1257    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1258
1259    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1260    if (status != NO_ERROR) {
1261        ALOGE("getFrameCount(output=%d) status %d", output, status);
1262        goto release;
1263    }
1264
1265    // TODO consider making this a member variable if there are other uses for it later
1266    size_t afFrameCountHAL;
1267    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1268    if (status != NO_ERROR) {
1269        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1270        goto release;
1271    }
1272    ALOG_ASSERT(afFrameCountHAL > 0);
1273
1274    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1275    if (status != NO_ERROR) {
1276        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1277        goto release;
1278    }
1279    if (mSampleRate == 0) {
1280        mSampleRate = mAfSampleRate;
1281        mOriginalSampleRate = mAfSampleRate;
1282    }
1283
1284    // Client can only express a preference for FAST.  Server will perform additional tests.
1285    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1286        bool useCaseAllowed =
1287            // either of these use cases:
1288            // use case 1: shared buffer
1289            (mSharedBuffer != 0) ||
1290            // use case 2: callback transfer mode
1291            (mTransfer == TRANSFER_CALLBACK) ||
1292            // use case 3: obtain/release mode
1293            (mTransfer == TRANSFER_OBTAIN) ||
1294            // use case 4: synchronous write
1295            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1296        // sample rates must also match
1297        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1298        if (!fastAllowed) {
1299            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
1300                "track %u Hz, output %u Hz",
1301                mTransfer, mSampleRate, mAfSampleRate);
1302            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1303        }
1304    }
1305
1306    mNotificationFramesAct = mNotificationFramesReq;
1307
1308    size_t frameCount = mReqFrameCount;
1309    if (!audio_has_proportional_frames(mFormat)) {
1310
1311        if (mSharedBuffer != 0) {
1312            // Same comment as below about ignoring frameCount parameter for set()
1313            frameCount = mSharedBuffer->size();
1314        } else if (frameCount == 0) {
1315            frameCount = mAfFrameCount;
1316        }
1317        if (mNotificationFramesAct != frameCount) {
1318            mNotificationFramesAct = frameCount;
1319        }
1320    } else if (mSharedBuffer != 0) {
1321        // FIXME: Ensure client side memory buffers need
1322        // not have additional alignment beyond sample
1323        // (e.g. 16 bit stereo accessed as 32 bit frame).
1324        size_t alignment = audio_bytes_per_sample(mFormat);
1325        if (alignment & 1) {
1326            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1327            alignment = 1;
1328        }
1329        if (mChannelCount > 1) {
1330            // More than 2 channels does not require stronger alignment than stereo
1331            alignment <<= 1;
1332        }
1333        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1334            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1335                    mSharedBuffer->pointer(), mChannelCount);
1336            status = BAD_VALUE;
1337            goto release;
1338        }
1339
1340        // When initializing a shared buffer AudioTrack via constructors,
1341        // there's no frameCount parameter.
1342        // But when initializing a shared buffer AudioTrack via set(),
1343        // there _is_ a frameCount parameter.  We silently ignore it.
1344        frameCount = mSharedBuffer->size() / mFrameSize;
1345    } else {
1346        size_t minFrameCount = 0;
1347        // For fast tracks the frame count calculations and checks are mostly done by server,
1348        // but we try to respect the application's request for notifications per buffer.
1349        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1350            if (mNotificationsPerBufferReq > 0) {
1351                // Avoid possible arithmetic overflow during multiplication.
1352                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1353                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1354                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1355                            mNotificationsPerBufferReq, afFrameCountHAL);
1356                } else {
1357                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1358                }
1359            }
1360        } else {
1361            // for normal tracks precompute the frame count based on speed.
1362            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1363                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1364            minFrameCount = calculateMinFrameCount(
1365                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1366                    speed /*, 0 mNotificationsPerBufferReq*/);
1367        }
1368        if (frameCount < minFrameCount) {
1369            frameCount = minFrameCount;
1370        }
1371    }
1372
1373    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1374
1375    pid_t tid = -1;
1376    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1377        trackFlags |= IAudioFlinger::TRACK_FAST;
1378        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1379            tid = mAudioTrackThread->getTid();
1380        }
1381    }
1382
1383    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1384        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1385    }
1386
1387    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1388        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1389    }
1390
1391    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1392                                // but we will still need the original value also
1393    audio_session_t originalSessionId = mSessionId;
1394    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1395                                                      mSampleRate,
1396                                                      mFormat,
1397                                                      mChannelMask,
1398                                                      &temp,
1399                                                      &trackFlags,
1400                                                      mSharedBuffer,
1401                                                      output,
1402                                                      tid,
1403                                                      &mSessionId,
1404                                                      mClientUid,
1405                                                      &status);
1406    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1407            "session ID changed from %d to %d", originalSessionId, mSessionId);
1408
1409    if (status != NO_ERROR) {
1410        ALOGE("AudioFlinger could not create track, status: %d", status);
1411        goto release;
1412    }
1413    ALOG_ASSERT(track != 0);
1414
1415    // AudioFlinger now owns the reference to the I/O handle,
1416    // so we are no longer responsible for releasing it.
1417
1418    // FIXME compare to AudioRecord
1419    sp<IMemory> iMem = track->getCblk();
1420    if (iMem == 0) {
1421        ALOGE("Could not get control block");
1422        return NO_INIT;
1423    }
1424    void *iMemPointer = iMem->pointer();
1425    if (iMemPointer == NULL) {
1426        ALOGE("Could not get control block pointer");
1427        return NO_INIT;
1428    }
1429    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1430    if (mAudioTrack != 0) {
1431        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1432        mDeathNotifier.clear();
1433    }
1434    mAudioTrack = track;
1435    mCblkMemory = iMem;
1436    IPCThreadState::self()->flushCommands();
1437
1438    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1439    mCblk = cblk;
1440    // note that temp is the (possibly revised) value of frameCount
1441    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1442        // In current design, AudioTrack client checks and ensures frame count validity before
1443        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1444        // for fast track as it uses a special method of assigning frame count.
1445        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1446    }
1447    frameCount = temp;
1448
1449    mAwaitBoost = false;
1450    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1451        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1452            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1453            if (!mThreadCanCallJava) {
1454                mAwaitBoost = true;
1455            }
1456        } else {
1457            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1458            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1459        }
1460    }
1461
1462    // Make sure that application is notified with sufficient margin before underrun.
1463    // The client can divide the AudioTrack buffer into sub-buffers,
1464    // and expresses its desire to server as the notification frame count.
1465    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1466        size_t maxNotificationFrames;
1467        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1468            // notify every HAL buffer, regardless of the size of the track buffer
1469            maxNotificationFrames = afFrameCountHAL;
1470        } else {
1471            // For normal tracks, use double-buffering
1472            const int nBuffering = 2;
1473            maxNotificationFrames = frameCount / nBuffering;
1474        }
1475        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1476            if (mNotificationFramesAct == 0) {
1477                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1478                    maxNotificationFrames, frameCount);
1479            } else {
1480                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1481                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1482            }
1483            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1484        }
1485    }
1486
1487    // We retain a copy of the I/O handle, but don't own the reference
1488    mOutput = output;
1489    mRefreshRemaining = true;
1490
1491    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1492    // is the value of pointer() for the shared buffer, otherwise buffers points
1493    // immediately after the control block.  This address is for the mapping within client
1494    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1495    void* buffers;
1496    if (mSharedBuffer == 0) {
1497        buffers = cblk + 1;
1498    } else {
1499        buffers = mSharedBuffer->pointer();
1500        if (buffers == NULL) {
1501            ALOGE("Could not get buffer pointer");
1502            return NO_INIT;
1503        }
1504    }
1505
1506    mAudioTrack->attachAuxEffect(mAuxEffectId);
1507    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1508    // FIXME don't believe this lie
1509    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1510
1511    mFrameCount = frameCount;
1512    // If IAudioTrack is re-created, don't let the requested frameCount
1513    // decrease.  This can confuse clients that cache frameCount().
1514    if (frameCount > mReqFrameCount) {
1515        mReqFrameCount = frameCount;
1516    }
1517
1518    // reset server position to 0 as we have new cblk.
1519    mServer = 0;
1520
1521    // update proxy
1522    if (mSharedBuffer == 0) {
1523        mStaticProxy.clear();
1524        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1525    } else {
1526        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1527        mProxy = mStaticProxy;
1528    }
1529
1530    mProxy->setVolumeLR(gain_minifloat_pack(
1531            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1532            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1533
1534    mProxy->setSendLevel(mSendLevel);
1535    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1536    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1537    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1538    mProxy->setSampleRate(effectiveSampleRate);
1539
1540    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1541    playbackRateTemp.mSpeed = effectiveSpeed;
1542    playbackRateTemp.mPitch = effectivePitch;
1543    mProxy->setPlaybackRate(playbackRateTemp);
1544    mProxy->setMinimum(mNotificationFramesAct);
1545
1546    mDeathNotifier = new DeathNotifier(this);
1547    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1548
1549    if (mDeviceCallback != 0) {
1550        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1551    }
1552
1553    return NO_ERROR;
1554    }
1555
1556release:
1557    AudioSystem::releaseOutput(output, streamType, mSessionId);
1558    if (status == NO_ERROR) {
1559        status = NO_INIT;
1560    }
1561    return status;
1562}
1563
1564status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1565{
1566    if (audioBuffer == NULL) {
1567        if (nonContig != NULL) {
1568            *nonContig = 0;
1569        }
1570        return BAD_VALUE;
1571    }
1572    if (mTransfer != TRANSFER_OBTAIN) {
1573        audioBuffer->frameCount = 0;
1574        audioBuffer->size = 0;
1575        audioBuffer->raw = NULL;
1576        if (nonContig != NULL) {
1577            *nonContig = 0;
1578        }
1579        return INVALID_OPERATION;
1580    }
1581
1582    const struct timespec *requested;
1583    struct timespec timeout;
1584    if (waitCount == -1) {
1585        requested = &ClientProxy::kForever;
1586    } else if (waitCount == 0) {
1587        requested = &ClientProxy::kNonBlocking;
1588    } else if (waitCount > 0) {
1589        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1590        timeout.tv_sec = ms / 1000;
1591        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1592        requested = &timeout;
1593    } else {
1594        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1595        requested = NULL;
1596    }
1597    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1598}
1599
1600status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1601        struct timespec *elapsed, size_t *nonContig)
1602{
1603    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1604    uint32_t oldSequence = 0;
1605    uint32_t newSequence;
1606
1607    Proxy::Buffer buffer;
1608    status_t status = NO_ERROR;
1609
1610    static const int32_t kMaxTries = 5;
1611    int32_t tryCounter = kMaxTries;
1612
1613    do {
1614        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1615        // keep them from going away if another thread re-creates the track during obtainBuffer()
1616        sp<AudioTrackClientProxy> proxy;
1617        sp<IMemory> iMem;
1618
1619        {   // start of lock scope
1620            AutoMutex lock(mLock);
1621
1622            newSequence = mSequence;
1623            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1624            if (status == DEAD_OBJECT) {
1625                // re-create track, unless someone else has already done so
1626                if (newSequence == oldSequence) {
1627                    status = restoreTrack_l("obtainBuffer");
1628                    if (status != NO_ERROR) {
1629                        buffer.mFrameCount = 0;
1630                        buffer.mRaw = NULL;
1631                        buffer.mNonContig = 0;
1632                        break;
1633                    }
1634                }
1635            }
1636            oldSequence = newSequence;
1637
1638            if (status == NOT_ENOUGH_DATA) {
1639                restartIfDisabled();
1640            }
1641
1642            // Keep the extra references
1643            proxy = mProxy;
1644            iMem = mCblkMemory;
1645
1646            if (mState == STATE_STOPPING) {
1647                status = -EINTR;
1648                buffer.mFrameCount = 0;
1649                buffer.mRaw = NULL;
1650                buffer.mNonContig = 0;
1651                break;
1652            }
1653
1654            // Non-blocking if track is stopped or paused
1655            if (mState != STATE_ACTIVE) {
1656                requested = &ClientProxy::kNonBlocking;
1657            }
1658
1659        }   // end of lock scope
1660
1661        buffer.mFrameCount = audioBuffer->frameCount;
1662        // FIXME starts the requested timeout and elapsed over from scratch
1663        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1664    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1665
1666    audioBuffer->frameCount = buffer.mFrameCount;
1667    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1668    audioBuffer->raw = buffer.mRaw;
1669    if (nonContig != NULL) {
1670        *nonContig = buffer.mNonContig;
1671    }
1672    return status;
1673}
1674
1675void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1676{
1677    // FIXME add error checking on mode, by adding an internal version
1678    if (mTransfer == TRANSFER_SHARED) {
1679        return;
1680    }
1681
1682    size_t stepCount = audioBuffer->size / mFrameSize;
1683    if (stepCount == 0) {
1684        return;
1685    }
1686
1687    Proxy::Buffer buffer;
1688    buffer.mFrameCount = stepCount;
1689    buffer.mRaw = audioBuffer->raw;
1690
1691    AutoMutex lock(mLock);
1692    mReleased += stepCount;
1693    mInUnderrun = false;
1694    mProxy->releaseBuffer(&buffer);
1695
1696    // restart track if it was disabled by audioflinger due to previous underrun
1697    restartIfDisabled();
1698}
1699
1700void AudioTrack::restartIfDisabled()
1701{
1702    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1703    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1704        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1705        // FIXME ignoring status
1706        mAudioTrack->start();
1707    }
1708}
1709
1710// -------------------------------------------------------------------------
1711
1712ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1713{
1714    if (mTransfer != TRANSFER_SYNC) {
1715        return INVALID_OPERATION;
1716    }
1717
1718    if (isDirect()) {
1719        AutoMutex lock(mLock);
1720        int32_t flags = android_atomic_and(
1721                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1722                            &mCblk->mFlags);
1723        if (flags & CBLK_INVALID) {
1724            return DEAD_OBJECT;
1725        }
1726    }
1727
1728    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1729        // Sanity-check: user is most-likely passing an error code, and it would
1730        // make the return value ambiguous (actualSize vs error).
1731        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1732        return BAD_VALUE;
1733    }
1734
1735    size_t written = 0;
1736    Buffer audioBuffer;
1737
1738    while (userSize >= mFrameSize) {
1739        audioBuffer.frameCount = userSize / mFrameSize;
1740
1741        status_t err = obtainBuffer(&audioBuffer,
1742                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1743        if (err < 0) {
1744            if (written > 0) {
1745                break;
1746            }
1747            return ssize_t(err);
1748        }
1749
1750        size_t toWrite = audioBuffer.size;
1751        memcpy(audioBuffer.i8, buffer, toWrite);
1752        buffer = ((const char *) buffer) + toWrite;
1753        userSize -= toWrite;
1754        written += toWrite;
1755
1756        releaseBuffer(&audioBuffer);
1757    }
1758
1759    if (written > 0) {
1760        mFramesWritten += written / mFrameSize;
1761    }
1762    return written;
1763}
1764
1765// -------------------------------------------------------------------------
1766
1767nsecs_t AudioTrack::processAudioBuffer()
1768{
1769    // Currently the AudioTrack thread is not created if there are no callbacks.
1770    // Would it ever make sense to run the thread, even without callbacks?
1771    // If so, then replace this by checks at each use for mCbf != NULL.
1772    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1773
1774    mLock.lock();
1775    if (mAwaitBoost) {
1776        mAwaitBoost = false;
1777        mLock.unlock();
1778        static const int32_t kMaxTries = 5;
1779        int32_t tryCounter = kMaxTries;
1780        uint32_t pollUs = 10000;
1781        do {
1782            int policy = sched_getscheduler(0);
1783            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1784                break;
1785            }
1786            usleep(pollUs);
1787            pollUs <<= 1;
1788        } while (tryCounter-- > 0);
1789        if (tryCounter < 0) {
1790            ALOGE("did not receive expected priority boost on time");
1791        }
1792        // Run again immediately
1793        return 0;
1794    }
1795
1796    // Can only reference mCblk while locked
1797    int32_t flags = android_atomic_and(
1798        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1799
1800    // Check for track invalidation
1801    if (flags & CBLK_INVALID) {
1802        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1803        // AudioSystem cache. We should not exit here but after calling the callback so
1804        // that the upper layers can recreate the track
1805        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1806            status_t status __unused = restoreTrack_l("processAudioBuffer");
1807            // FIXME unused status
1808            // after restoration, continue below to make sure that the loop and buffer events
1809            // are notified because they have been cleared from mCblk->mFlags above.
1810        }
1811    }
1812
1813    bool waitStreamEnd = mState == STATE_STOPPING;
1814    bool active = mState == STATE_ACTIVE;
1815
1816    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1817    bool newUnderrun = false;
1818    if (flags & CBLK_UNDERRUN) {
1819#if 0
1820        // Currently in shared buffer mode, when the server reaches the end of buffer,
1821        // the track stays active in continuous underrun state.  It's up to the application
1822        // to pause or stop the track, or set the position to a new offset within buffer.
1823        // This was some experimental code to auto-pause on underrun.   Keeping it here
1824        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1825        if (mTransfer == TRANSFER_SHARED) {
1826            mState = STATE_PAUSED;
1827            active = false;
1828        }
1829#endif
1830        if (!mInUnderrun) {
1831            mInUnderrun = true;
1832            newUnderrun = true;
1833        }
1834    }
1835
1836    // Get current position of server
1837    Modulo<uint32_t> position(updateAndGetPosition_l());
1838
1839    // Manage marker callback
1840    bool markerReached = false;
1841    Modulo<uint32_t> markerPosition(mMarkerPosition);
1842    // uses 32 bit wraparound for comparison with position.
1843    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1844        mMarkerReached = markerReached = true;
1845    }
1846
1847    // Determine number of new position callback(s) that will be needed, while locked
1848    size_t newPosCount = 0;
1849    Modulo<uint32_t> newPosition(mNewPosition);
1850    uint32_t updatePeriod = mUpdatePeriod;
1851    // FIXME fails for wraparound, need 64 bits
1852    if (updatePeriod > 0 && position >= newPosition) {
1853        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1854        mNewPosition += updatePeriod * newPosCount;
1855    }
1856
1857    // Cache other fields that will be needed soon
1858    uint32_t sampleRate = mSampleRate;
1859    float speed = mPlaybackRate.mSpeed;
1860    const uint32_t notificationFrames = mNotificationFramesAct;
1861    if (mRefreshRemaining) {
1862        mRefreshRemaining = false;
1863        mRemainingFrames = notificationFrames;
1864        mRetryOnPartialBuffer = false;
1865    }
1866    size_t misalignment = mProxy->getMisalignment();
1867    uint32_t sequence = mSequence;
1868    sp<AudioTrackClientProxy> proxy = mProxy;
1869
1870    // Determine the number of new loop callback(s) that will be needed, while locked.
1871    int loopCountNotifications = 0;
1872    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1873
1874    if (mLoopCount > 0) {
1875        int loopCount;
1876        size_t bufferPosition;
1877        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1878        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1879        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1880        mLoopCountNotified = loopCount; // discard any excess notifications
1881    } else if (mLoopCount < 0) {
1882        // FIXME: We're not accurate with notification count and position with infinite looping
1883        // since loopCount from server side will always return -1 (we could decrement it).
1884        size_t bufferPosition = mStaticProxy->getBufferPosition();
1885        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1886        loopPeriod = mLoopEnd - bufferPosition;
1887    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1888        size_t bufferPosition = mStaticProxy->getBufferPosition();
1889        loopPeriod = mFrameCount - bufferPosition;
1890    }
1891
1892    // These fields don't need to be cached, because they are assigned only by set():
1893    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1894    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1895
1896    mLock.unlock();
1897
1898    // get anchor time to account for callbacks.
1899    const nsecs_t timeBeforeCallbacks = systemTime();
1900
1901    if (waitStreamEnd) {
1902        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1903        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1904        // (and make sure we don't callback for more data while we're stopping).
1905        // This helps with position, marker notifications, and track invalidation.
1906        struct timespec timeout;
1907        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1908        timeout.tv_nsec = 0;
1909
1910        status_t status = proxy->waitStreamEndDone(&timeout);
1911        switch (status) {
1912        case NO_ERROR:
1913        case DEAD_OBJECT:
1914        case TIMED_OUT:
1915            if (status != DEAD_OBJECT) {
1916                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1917                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1918                mCbf(EVENT_STREAM_END, mUserData, NULL);
1919            }
1920            {
1921                AutoMutex lock(mLock);
1922                // The previously assigned value of waitStreamEnd is no longer valid,
1923                // since the mutex has been unlocked and either the callback handler
1924                // or another thread could have re-started the AudioTrack during that time.
1925                waitStreamEnd = mState == STATE_STOPPING;
1926                if (waitStreamEnd) {
1927                    mState = STATE_STOPPED;
1928                    mReleased = 0;
1929                }
1930            }
1931            if (waitStreamEnd && status != DEAD_OBJECT) {
1932               return NS_INACTIVE;
1933            }
1934            break;
1935        }
1936        return 0;
1937    }
1938
1939    // perform callbacks while unlocked
1940    if (newUnderrun) {
1941        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1942    }
1943    while (loopCountNotifications > 0) {
1944        mCbf(EVENT_LOOP_END, mUserData, NULL);
1945        --loopCountNotifications;
1946    }
1947    if (flags & CBLK_BUFFER_END) {
1948        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1949    }
1950    if (markerReached) {
1951        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1952    }
1953    while (newPosCount > 0) {
1954        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1955        mCbf(EVENT_NEW_POS, mUserData, &temp);
1956        newPosition += updatePeriod;
1957        newPosCount--;
1958    }
1959
1960    if (mObservedSequence != sequence) {
1961        mObservedSequence = sequence;
1962        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1963        // for offloaded tracks, just wait for the upper layers to recreate the track
1964        if (isOffloadedOrDirect()) {
1965            return NS_INACTIVE;
1966        }
1967    }
1968
1969    // if inactive, then don't run me again until re-started
1970    if (!active) {
1971        return NS_INACTIVE;
1972    }
1973
1974    // Compute the estimated time until the next timed event (position, markers, loops)
1975    // FIXME only for non-compressed audio
1976    uint32_t minFrames = ~0;
1977    if (!markerReached && position < markerPosition) {
1978        minFrames = (markerPosition - position).value();
1979    }
1980    if (loopPeriod > 0 && loopPeriod < minFrames) {
1981        // loopPeriod is already adjusted for actual position.
1982        minFrames = loopPeriod;
1983    }
1984    if (updatePeriod > 0) {
1985        minFrames = min(minFrames, (newPosition - position).value());
1986    }
1987
1988    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1989    static const uint32_t kPoll = 0;
1990    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1991        minFrames = kPoll * notificationFrames;
1992    }
1993
1994    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1995    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
1996    const nsecs_t timeAfterCallbacks = systemTime();
1997
1998    // Convert frame units to time units
1999    nsecs_t ns = NS_WHENEVER;
2000    if (minFrames != (uint32_t) ~0) {
2001        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
2002        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2003        // TODO: Should we warn if the callback time is too long?
2004        if (ns < 0) ns = 0;
2005    }
2006
2007    // If not supplying data by EVENT_MORE_DATA, then we're done
2008    if (mTransfer != TRANSFER_CALLBACK) {
2009        return ns;
2010    }
2011
2012    // EVENT_MORE_DATA callback handling.
2013    // Timing for linear pcm audio data formats can be derived directly from the
2014    // buffer fill level.
2015    // Timing for compressed data is not directly available from the buffer fill level,
2016    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2017    // to return a certain fill level.
2018
2019    struct timespec timeout;
2020    const struct timespec *requested = &ClientProxy::kForever;
2021    if (ns != NS_WHENEVER) {
2022        timeout.tv_sec = ns / 1000000000LL;
2023        timeout.tv_nsec = ns % 1000000000LL;
2024        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2025        requested = &timeout;
2026    }
2027
2028    size_t writtenFrames = 0;
2029    while (mRemainingFrames > 0) {
2030
2031        Buffer audioBuffer;
2032        audioBuffer.frameCount = mRemainingFrames;
2033        size_t nonContig;
2034        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2035        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2036                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2037        requested = &ClientProxy::kNonBlocking;
2038        size_t avail = audioBuffer.frameCount + nonContig;
2039        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2040                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2041        if (err != NO_ERROR) {
2042            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2043                    (isOffloaded() && (err == DEAD_OBJECT))) {
2044                // FIXME bug 25195759
2045                return 1000000;
2046            }
2047            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2048            return NS_NEVER;
2049        }
2050
2051        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2052            mRetryOnPartialBuffer = false;
2053            if (avail < mRemainingFrames) {
2054                if (ns > 0) { // account for obtain time
2055                    const nsecs_t timeNow = systemTime();
2056                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2057                }
2058                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2059                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2060                    ns = myns;
2061                }
2062                return ns;
2063            }
2064        }
2065
2066        size_t reqSize = audioBuffer.size;
2067        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2068        size_t writtenSize = audioBuffer.size;
2069
2070        // Sanity check on returned size
2071        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2072            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2073                    reqSize, ssize_t(writtenSize));
2074            return NS_NEVER;
2075        }
2076
2077        if (writtenSize == 0) {
2078            // The callback is done filling buffers
2079            // Keep this thread going to handle timed events and
2080            // still try to get more data in intervals of WAIT_PERIOD_MS
2081            // but don't just loop and block the CPU, so wait
2082
2083            // mCbf(EVENT_MORE_DATA, ...) might either
2084            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2085            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2086            // (3) Return 0 size when no data is available, does not wait for more data.
2087            //
2088            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2089            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2090            // especially for case (3).
2091            //
2092            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2093            // and this loop; whereas for case (3) we could simply check once with the full
2094            // buffer size and skip the loop entirely.
2095
2096            nsecs_t myns;
2097            if (audio_has_proportional_frames(mFormat)) {
2098                // time to wait based on buffer occupancy
2099                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2100                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2101                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2102                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2103                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2104                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2105                myns = datans + (afns / 2);
2106            } else {
2107                // FIXME: This could ping quite a bit if the buffer isn't full.
2108                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2109                myns = kWaitPeriodNs;
2110            }
2111            if (ns > 0) { // account for obtain and callback time
2112                const nsecs_t timeNow = systemTime();
2113                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2114            }
2115            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2116                ns = myns;
2117            }
2118            return ns;
2119        }
2120
2121        size_t releasedFrames = writtenSize / mFrameSize;
2122        audioBuffer.frameCount = releasedFrames;
2123        mRemainingFrames -= releasedFrames;
2124        if (misalignment >= releasedFrames) {
2125            misalignment -= releasedFrames;
2126        } else {
2127            misalignment = 0;
2128        }
2129
2130        releaseBuffer(&audioBuffer);
2131        writtenFrames += releasedFrames;
2132
2133        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2134        // if callback doesn't like to accept the full chunk
2135        if (writtenSize < reqSize) {
2136            continue;
2137        }
2138
2139        // There could be enough non-contiguous frames available to satisfy the remaining request
2140        if (mRemainingFrames <= nonContig) {
2141            continue;
2142        }
2143
2144#if 0
2145        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2146        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2147        // that total to a sum == notificationFrames.
2148        if (0 < misalignment && misalignment <= mRemainingFrames) {
2149            mRemainingFrames = misalignment;
2150            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2151        }
2152#endif
2153
2154    }
2155    if (writtenFrames > 0) {
2156        AutoMutex lock(mLock);
2157        mFramesWritten += writtenFrames;
2158    }
2159    mRemainingFrames = notificationFrames;
2160    mRetryOnPartialBuffer = true;
2161
2162    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2163    return 0;
2164}
2165
2166status_t AudioTrack::restoreTrack_l(const char *from)
2167{
2168    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2169          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2170    ++mSequence;
2171
2172    // refresh the audio configuration cache in this process to make sure we get new
2173    // output parameters and new IAudioFlinger in createTrack_l()
2174    AudioSystem::clearAudioConfigCache();
2175
2176    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2177        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2178        // reconsider enabling for linear PCM encodings when position can be preserved.
2179        return DEAD_OBJECT;
2180    }
2181
2182    // Save so we can return count since creation.
2183    mUnderrunCountOffset = getUnderrunCount_l();
2184
2185    // save the old static buffer position
2186    size_t bufferPosition = 0;
2187    int loopCount = 0;
2188    if (mStaticProxy != 0) {
2189        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2190    }
2191
2192    mFlags = mOrigFlags;
2193
2194    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2195    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2196    // It will also delete the strong references on previous IAudioTrack and IMemory.
2197    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2198    status_t result = createTrack_l();
2199
2200    if (result == NO_ERROR) {
2201        // take the frames that will be lost by track recreation into account in saved position
2202        // For streaming tracks, this is the amount we obtained from the user/client
2203        // (not the number actually consumed at the server - those are already lost).
2204        if (mStaticProxy == 0) {
2205            mPosition = mReleased;
2206        }
2207        // Continue playback from last known position and restore loop.
2208        if (mStaticProxy != 0) {
2209            if (loopCount != 0) {
2210                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2211                        mLoopStart, mLoopEnd, loopCount);
2212            } else {
2213                mStaticProxy->setBufferPosition(bufferPosition);
2214                if (bufferPosition == mFrameCount) {
2215                    ALOGD("restoring track at end of static buffer");
2216                }
2217            }
2218        }
2219        if (mState == STATE_ACTIVE) {
2220            result = mAudioTrack->start();
2221            mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
2222        }
2223    }
2224    if (result != NO_ERROR) {
2225        ALOGW("restoreTrack_l() failed status %d", result);
2226        mState = STATE_STOPPED;
2227        mReleased = 0;
2228    }
2229
2230    return result;
2231}
2232
2233Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2234{
2235    // This is the sole place to read server consumed frames
2236    Modulo<uint32_t> newServer(mProxy->getPosition());
2237    const int32_t delta = (newServer - mServer).signedValue();
2238    // TODO There is controversy about whether there can be "negative jitter" in server position.
2239    //      This should be investigated further, and if possible, it should be addressed.
2240    //      A more definite failure mode is infrequent polling by client.
2241    //      One could call (void)getPosition_l() in releaseBuffer(),
2242    //      so mReleased and mPosition are always lock-step as best possible.
2243    //      That should ensure delta never goes negative for infrequent polling
2244    //      unless the server has more than 2^31 frames in its buffer,
2245    //      in which case the use of uint32_t for these counters has bigger issues.
2246    ALOGE_IF(delta < 0,
2247            "detected illegal retrograde motion by the server: mServer advanced by %d",
2248            delta);
2249    mServer = newServer;
2250    if (delta > 0) { // avoid retrograde
2251        mPosition += delta;
2252    }
2253    return mPosition;
2254}
2255
2256bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2257{
2258    // applicable for mixing tracks only (not offloaded or direct)
2259    if (mStaticProxy != 0) {
2260        return true; // static tracks do not have issues with buffer sizing.
2261    }
2262    const size_t minFrameCount =
2263            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2264                /*, 0 mNotificationsPerBufferReq*/);
2265    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
2266            mFrameCount, minFrameCount);
2267    return mFrameCount >= minFrameCount;
2268}
2269
2270status_t AudioTrack::setParameters(const String8& keyValuePairs)
2271{
2272    AutoMutex lock(mLock);
2273    return mAudioTrack->setParameters(keyValuePairs);
2274}
2275
2276status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2277{
2278    if (timestamp == nullptr) {
2279        return BAD_VALUE;
2280    }
2281    AutoMutex lock(mLock);
2282    return getTimestamp_l(timestamp);
2283}
2284
2285status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2286{
2287    if (mCblk->mFlags & CBLK_INVALID) {
2288        const status_t status = restoreTrack_l("getTimestampExtended");
2289        if (status != OK) {
2290            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2291            // recommending that the track be recreated.
2292            return DEAD_OBJECT;
2293        }
2294    }
2295    // check for offloaded/direct here in case restoring somehow changed those flags.
2296    if (isOffloadedOrDirect_l()) {
2297        return INVALID_OPERATION; // not supported
2298    }
2299    status_t status = mProxy->getTimestamp(timestamp);
2300    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2301    bool found = false;
2302    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2303    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2304    // server side frame offset in case AudioTrack has been restored.
2305    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2306            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2307        if (timestamp->mTimeNs[i] >= 0) {
2308            // apply server offset (frames flushed is ignored
2309            // so we don't report the jump when the flush occurs).
2310            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2311            found = true;
2312        }
2313    }
2314    return found ? OK : WOULD_BLOCK;
2315}
2316
2317status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2318{
2319    AutoMutex lock(mLock);
2320
2321    bool previousTimestampValid = mPreviousTimestampValid;
2322    // Set false here to cover all the error return cases.
2323    mPreviousTimestampValid = false;
2324
2325    switch (mState) {
2326    case STATE_ACTIVE:
2327    case STATE_PAUSED:
2328        break; // handle below
2329    case STATE_FLUSHED:
2330    case STATE_STOPPED:
2331        return WOULD_BLOCK;
2332    case STATE_STOPPING:
2333    case STATE_PAUSED_STOPPING:
2334        if (!isOffloaded_l()) {
2335            return INVALID_OPERATION;
2336        }
2337        break; // offloaded tracks handled below
2338    default:
2339        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2340        break;
2341    }
2342
2343    if (mCblk->mFlags & CBLK_INVALID) {
2344        const status_t status = restoreTrack_l("getTimestamp");
2345        if (status != OK) {
2346            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2347            // recommending that the track be recreated.
2348            return DEAD_OBJECT;
2349        }
2350    }
2351
2352    // The presented frame count must always lag behind the consumed frame count.
2353    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2354
2355    status_t status;
2356    if (isOffloadedOrDirect_l()) {
2357        // use Binder to get timestamp
2358        status = mAudioTrack->getTimestamp(timestamp);
2359    } else {
2360        // read timestamp from shared memory
2361        ExtendedTimestamp ets;
2362        status = mProxy->getTimestamp(&ets);
2363        if (status == OK) {
2364            status = ets.getBestTimestamp(&timestamp);
2365        }
2366        if (status == INVALID_OPERATION) {
2367            status = WOULD_BLOCK;
2368        }
2369    }
2370    if (status != NO_ERROR) {
2371        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2372        return status;
2373    }
2374    if (isOffloadedOrDirect_l()) {
2375        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2376            // use cached paused position in case another offloaded track is running.
2377            timestamp.mPosition = mPausedPosition;
2378            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2379            return NO_ERROR;
2380        }
2381
2382        // Check whether a pending flush or stop has completed, as those commands may
2383        // be asynchronous or return near finish or exhibit glitchy behavior.
2384        //
2385        // Originally this showed up as the first timestamp being a continuation of
2386        // the previous song under gapless playback.
2387        // However, we sometimes see zero timestamps, then a glitch of
2388        // the previous song's position, and then correct timestamps afterwards.
2389        if (mStartUs != 0 && mSampleRate != 0) {
2390            static const int kTimeJitterUs = 100000; // 100 ms
2391            static const int k1SecUs = 1000000;
2392
2393            const int64_t timeNow = getNowUs();
2394
2395            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2396                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2397                if (timestampTimeUs < mStartUs) {
2398                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2399                }
2400                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2401                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2402                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2403
2404                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2405                    // Verify that the counter can't count faster than the sample rate
2406                    // since the start time.  If greater, then that means we may have failed
2407                    // to completely flush or stop the previous playing track.
2408                    ALOGW_IF(!mTimestampStartupGlitchReported,
2409                            "getTimestamp startup glitch detected"
2410                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2411                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2412                            timestamp.mPosition);
2413                    mTimestampStartupGlitchReported = true;
2414                    if (previousTimestampValid
2415                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2416                        timestamp = mPreviousTimestamp;
2417                        mPreviousTimestampValid = true;
2418                        return NO_ERROR;
2419                    }
2420                    return WOULD_BLOCK;
2421                }
2422                if (deltaPositionByUs != 0) {
2423                    mStartUs = 0; // don't check again, we got valid nonzero position.
2424                }
2425            } else {
2426                mStartUs = 0; // don't check again, start time expired.
2427            }
2428            mTimestampStartupGlitchReported = false;
2429        }
2430    } else {
2431        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2432        (void) updateAndGetPosition_l();
2433        // Server consumed (mServer) and presented both use the same server time base,
2434        // and server consumed is always >= presented.
2435        // The delta between these represents the number of frames in the buffer pipeline.
2436        // If this delta between these is greater than the client position, it means that
2437        // actually presented is still stuck at the starting line (figuratively speaking),
2438        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2439        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2440        // mPosition exceeds 32 bits.
2441        // TODO Remove when timestamp is updated to contain pipeline status info.
2442        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2443        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2444                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2445            return INVALID_OPERATION;
2446        }
2447        // Convert timestamp position from server time base to client time base.
2448        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2449        // But if we change it to 64-bit then this could fail.
2450        // Use Modulo computation here.
2451        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2452        // Immediately after a call to getPosition_l(), mPosition and
2453        // mServer both represent the same frame position.  mPosition is
2454        // in client's point of view, and mServer is in server's point of
2455        // view.  So the difference between them is the "fudge factor"
2456        // between client and server views due to stop() and/or new
2457        // IAudioTrack.  And timestamp.mPosition is initially in server's
2458        // point of view, so we need to apply the same fudge factor to it.
2459    }
2460
2461    // Prevent retrograde motion in timestamp.
2462    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2463    if (status == NO_ERROR) {
2464        if (previousTimestampValid) {
2465#define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
2466            const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
2467            const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
2468#undef TIME_TO_NANOS
2469            if (currentTimeNanos < previousTimeNanos) {
2470                ALOGW("retrograde timestamp time");
2471                // FIXME Consider blocking this from propagating upwards.
2472            }
2473
2474            // Looking at signed delta will work even when the timestamps
2475            // are wrapping around.
2476            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2477                    - mPreviousTimestamp.mPosition).signedValue();
2478            // position can bobble slightly as an artifact; this hides the bobble
2479            static const int32_t MINIMUM_POSITION_DELTA = 8;
2480            if (deltaPosition < 0) {
2481                // Only report once per position instead of spamming the log.
2482                if (!mRetrogradeMotionReported) {
2483                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2484                            deltaPosition,
2485                            timestamp.mPosition,
2486                            mPreviousTimestamp.mPosition);
2487                    mRetrogradeMotionReported = true;
2488                }
2489            } else {
2490                mRetrogradeMotionReported = false;
2491            }
2492            if (deltaPosition < MINIMUM_POSITION_DELTA) {
2493                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
2494            }
2495        }
2496        mPreviousTimestamp = timestamp;
2497        mPreviousTimestampValid = true;
2498    }
2499
2500    return status;
2501}
2502
2503String8 AudioTrack::getParameters(const String8& keys)
2504{
2505    audio_io_handle_t output = getOutput();
2506    if (output != AUDIO_IO_HANDLE_NONE) {
2507        return AudioSystem::getParameters(output, keys);
2508    } else {
2509        return String8::empty();
2510    }
2511}
2512
2513bool AudioTrack::isOffloaded() const
2514{
2515    AutoMutex lock(mLock);
2516    return isOffloaded_l();
2517}
2518
2519bool AudioTrack::isDirect() const
2520{
2521    AutoMutex lock(mLock);
2522    return isDirect_l();
2523}
2524
2525bool AudioTrack::isOffloadedOrDirect() const
2526{
2527    AutoMutex lock(mLock);
2528    return isOffloadedOrDirect_l();
2529}
2530
2531
2532status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2533{
2534
2535    const size_t SIZE = 256;
2536    char buffer[SIZE];
2537    String8 result;
2538
2539    result.append(" AudioTrack::dump\n");
2540    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2541            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2542    result.append(buffer);
2543    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2544            mChannelCount, mFrameCount);
2545    result.append(buffer);
2546    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2547            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2548    result.append(buffer);
2549    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2550    result.append(buffer);
2551    ::write(fd, result.string(), result.size());
2552    return NO_ERROR;
2553}
2554
2555uint32_t AudioTrack::getUnderrunCount() const
2556{
2557    AutoMutex lock(mLock);
2558    return getUnderrunCount_l();
2559}
2560
2561uint32_t AudioTrack::getUnderrunCount_l() const
2562{
2563    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2564}
2565
2566uint32_t AudioTrack::getUnderrunFrames() const
2567{
2568    AutoMutex lock(mLock);
2569    return mProxy->getUnderrunFrames();
2570}
2571
2572status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2573{
2574    if (callback == 0) {
2575        ALOGW("%s adding NULL callback!", __FUNCTION__);
2576        return BAD_VALUE;
2577    }
2578    AutoMutex lock(mLock);
2579    if (mDeviceCallback == callback) {
2580        ALOGW("%s adding same callback!", __FUNCTION__);
2581        return INVALID_OPERATION;
2582    }
2583    status_t status = NO_ERROR;
2584    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2585        if (mDeviceCallback != 0) {
2586            ALOGW("%s callback already present!", __FUNCTION__);
2587            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2588        }
2589        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2590    }
2591    mDeviceCallback = callback;
2592    return status;
2593}
2594
2595status_t AudioTrack::removeAudioDeviceCallback(
2596        const sp<AudioSystem::AudioDeviceCallback>& callback)
2597{
2598    if (callback == 0) {
2599        ALOGW("%s removing NULL callback!", __FUNCTION__);
2600        return BAD_VALUE;
2601    }
2602    AutoMutex lock(mLock);
2603    if (mDeviceCallback != callback) {
2604        ALOGW("%s removing different callback!", __FUNCTION__);
2605        return INVALID_OPERATION;
2606    }
2607    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2608        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2609    }
2610    mDeviceCallback = 0;
2611    return NO_ERROR;
2612}
2613
2614status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2615{
2616    if (msec == nullptr ||
2617            (location != ExtendedTimestamp::LOCATION_SERVER
2618                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2619        return BAD_VALUE;
2620    }
2621    AutoMutex lock(mLock);
2622    // inclusive of offloaded and direct tracks.
2623    //
2624    // It is possible, but not enabled, to allow duration computation for non-pcm
2625    // audio_has_proportional_frames() formats because currently they have
2626    // the drain rate equivalent to the pcm sample rate * framesize.
2627    if (!isPurePcmData_l()) {
2628        return INVALID_OPERATION;
2629    }
2630    ExtendedTimestamp ets;
2631    if (getTimestamp_l(&ets) == OK
2632            && ets.mTimeNs[location] > 0) {
2633        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2634                - ets.mPosition[location];
2635        if (diff < 0) {
2636            *msec = 0;
2637        } else {
2638            // ms is the playback time by frames
2639            int64_t ms = (int64_t)((double)diff * 1000 /
2640                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2641            // clockdiff is the timestamp age (negative)
2642            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2643                    ets.mTimeNs[location]
2644                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2645                    - systemTime(SYSTEM_TIME_MONOTONIC);
2646
2647            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2648            static const int NANOS_PER_MILLIS = 1000000;
2649            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2650        }
2651        return NO_ERROR;
2652    }
2653    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2654        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2655    }
2656    // use server position directly (offloaded and direct arrive here)
2657    updateAndGetPosition_l();
2658    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2659    *msec = (diff <= 0) ? 0
2660            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2661    return NO_ERROR;
2662}
2663
2664// =========================================================================
2665
2666void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2667{
2668    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2669    if (audioTrack != 0) {
2670        AutoMutex lock(audioTrack->mLock);
2671        audioTrack->mProxy->binderDied();
2672    }
2673}
2674
2675// =========================================================================
2676
2677AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2678    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2679      mIgnoreNextPausedInt(false)
2680{
2681}
2682
2683AudioTrack::AudioTrackThread::~AudioTrackThread()
2684{
2685}
2686
2687bool AudioTrack::AudioTrackThread::threadLoop()
2688{
2689    {
2690        AutoMutex _l(mMyLock);
2691        if (mPaused) {
2692            mMyCond.wait(mMyLock);
2693            // caller will check for exitPending()
2694            return true;
2695        }
2696        if (mIgnoreNextPausedInt) {
2697            mIgnoreNextPausedInt = false;
2698            mPausedInt = false;
2699        }
2700        if (mPausedInt) {
2701            if (mPausedNs > 0) {
2702                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2703            } else {
2704                mMyCond.wait(mMyLock);
2705            }
2706            mPausedInt = false;
2707            return true;
2708        }
2709    }
2710    if (exitPending()) {
2711        return false;
2712    }
2713    nsecs_t ns = mReceiver.processAudioBuffer();
2714    switch (ns) {
2715    case 0:
2716        return true;
2717    case NS_INACTIVE:
2718        pauseInternal();
2719        return true;
2720    case NS_NEVER:
2721        return false;
2722    case NS_WHENEVER:
2723        // Event driven: call wake() when callback notifications conditions change.
2724        ns = INT64_MAX;
2725        // fall through
2726    default:
2727        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2728        pauseInternal(ns);
2729        return true;
2730    }
2731}
2732
2733void AudioTrack::AudioTrackThread::requestExit()
2734{
2735    // must be in this order to avoid a race condition
2736    Thread::requestExit();
2737    resume();
2738}
2739
2740void AudioTrack::AudioTrackThread::pause()
2741{
2742    AutoMutex _l(mMyLock);
2743    mPaused = true;
2744}
2745
2746void AudioTrack::AudioTrackThread::resume()
2747{
2748    AutoMutex _l(mMyLock);
2749    mIgnoreNextPausedInt = true;
2750    if (mPaused || mPausedInt) {
2751        mPaused = false;
2752        mPausedInt = false;
2753        mMyCond.signal();
2754    }
2755}
2756
2757void AudioTrack::AudioTrackThread::wake()
2758{
2759    AutoMutex _l(mMyLock);
2760    if (!mPaused) {
2761        // wake() might be called while servicing a callback - ignore the next
2762        // pause time and call processAudioBuffer.
2763        mIgnoreNextPausedInt = true;
2764        if (mPausedInt && mPausedNs > 0) {
2765            // audio track is active and internally paused with timeout.
2766            mPausedInt = false;
2767            mMyCond.signal();
2768        }
2769    }
2770}
2771
2772void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2773{
2774    AutoMutex _l(mMyLock);
2775    mPausedInt = true;
2776    mPausedNs = ns;
2777}
2778
2779} // namespace android
2780