AudioTrack.cpp revision 2148bf0e79c436b8764b9edc4c8f2730cce98a32
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41// TODO: Move to a separate .h
42
43template <typename T>
44static inline const T &min(const T &x, const T &y) {
45    return x < y ? x : y;
46}
47
48template <typename T>
49static inline const T &max(const T &x, const T &y) {
50    return x > y ? x : y;
51}
52
53static const int32_t NANOS_PER_SECOND = 1000000000;
54
55static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
56{
57    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
58}
59
60static int64_t convertTimespecToUs(const struct timespec &tv)
61{
62    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
63}
64
65static inline nsecs_t convertTimespecToNs(const struct timespec &tv)
66{
67    return tv.tv_sec * (long long)NANOS_PER_SECOND + tv.tv_nsec;
68}
69
70// current monotonic time in microseconds.
71static int64_t getNowUs()
72{
73    struct timespec tv;
74    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
75    return convertTimespecToUs(tv);
76}
77
78// FIXME: we don't use the pitch setting in the time stretcher (not working);
79// instead we emulate it using our sample rate converter.
80static const bool kFixPitch = true; // enable pitch fix
81static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
82{
83    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
84}
85
86static inline float adjustSpeed(float speed, float pitch)
87{
88    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
89}
90
91static inline float adjustPitch(float pitch)
92{
93    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
94}
95
96// Must match similar computation in createTrack_l in Threads.cpp.
97// TODO: Move to a common library
98static size_t calculateMinFrameCount(
99        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
100        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
101{
102    // Ensure that buffer depth covers at least audio hardware latency
103    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
104    if (minBufCount < 2) {
105        minBufCount = 2;
106    }
107#if 0
108    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
109    // but keeping the code here to make it easier to add later.
110    if (minBufCount < notificationsPerBufferReq) {
111        minBufCount = notificationsPerBufferReq;
112    }
113#endif
114    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
115            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
116            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
117            /*, notificationsPerBufferReq*/);
118    return minBufCount * sourceFramesNeededWithTimestretch(
119            sampleRate, afFrameCount, afSampleRate, speed);
120}
121
122// static
123status_t AudioTrack::getMinFrameCount(
124        size_t* frameCount,
125        audio_stream_type_t streamType,
126        uint32_t sampleRate)
127{
128    if (frameCount == NULL) {
129        return BAD_VALUE;
130    }
131
132    // FIXME handle in server, like createTrack_l(), possible missing info:
133    //          audio_io_handle_t output
134    //          audio_format_t format
135    //          audio_channel_mask_t channelMask
136    //          audio_output_flags_t flags (FAST)
137    uint32_t afSampleRate;
138    status_t status;
139    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
140    if (status != NO_ERROR) {
141        ALOGE("Unable to query output sample rate for stream type %d; status %d",
142                streamType, status);
143        return status;
144    }
145    size_t afFrameCount;
146    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
147    if (status != NO_ERROR) {
148        ALOGE("Unable to query output frame count for stream type %d; status %d",
149                streamType, status);
150        return status;
151    }
152    uint32_t afLatency;
153    status = AudioSystem::getOutputLatency(&afLatency, streamType);
154    if (status != NO_ERROR) {
155        ALOGE("Unable to query output latency for stream type %d; status %d",
156                streamType, status);
157        return status;
158    }
159
160    // When called from createTrack, speed is 1.0f (normal speed).
161    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
162    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
163            /*, 0 notificationsPerBufferReq*/);
164
165    // The formula above should always produce a non-zero value under normal circumstances:
166    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
167    // Return error in the unlikely event that it does not, as that's part of the API contract.
168    if (*frameCount == 0) {
169        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
170                streamType, sampleRate);
171        return BAD_VALUE;
172    }
173    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
174            *frameCount, afFrameCount, afSampleRate, afLatency);
175    return NO_ERROR;
176}
177
178// ---------------------------------------------------------------------------
179
180AudioTrack::AudioTrack()
181    : mStatus(NO_INIT),
182      mState(STATE_STOPPED),
183      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
184      mPreviousSchedulingGroup(SP_DEFAULT),
185      mPausedPosition(0),
186      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
187{
188    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
189    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
190    mAttributes.flags = 0x0;
191    strcpy(mAttributes.tags, "");
192}
193
194AudioTrack::AudioTrack(
195        audio_stream_type_t streamType,
196        uint32_t sampleRate,
197        audio_format_t format,
198        audio_channel_mask_t channelMask,
199        size_t frameCount,
200        audio_output_flags_t flags,
201        callback_t cbf,
202        void* user,
203        int32_t notificationFrames,
204        audio_session_t sessionId,
205        transfer_type transferType,
206        const audio_offload_info_t *offloadInfo,
207        uid_t uid,
208        pid_t pid,
209        const audio_attributes_t* pAttributes,
210        bool doNotReconnect,
211        float maxRequiredSpeed)
212    : mStatus(NO_INIT),
213      mState(STATE_STOPPED),
214      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
215      mPreviousSchedulingGroup(SP_DEFAULT),
216      mPausedPosition(0),
217      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
218{
219    mStatus = set(streamType, sampleRate, format, channelMask,
220            frameCount, flags, cbf, user, notificationFrames,
221            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
222            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
223}
224
225AudioTrack::AudioTrack(
226        audio_stream_type_t streamType,
227        uint32_t sampleRate,
228        audio_format_t format,
229        audio_channel_mask_t channelMask,
230        const sp<IMemory>& sharedBuffer,
231        audio_output_flags_t flags,
232        callback_t cbf,
233        void* user,
234        int32_t notificationFrames,
235        audio_session_t sessionId,
236        transfer_type transferType,
237        const audio_offload_info_t *offloadInfo,
238        uid_t uid,
239        pid_t pid,
240        const audio_attributes_t* pAttributes,
241        bool doNotReconnect,
242        float maxRequiredSpeed)
243    : mStatus(NO_INIT),
244      mState(STATE_STOPPED),
245      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
246      mPreviousSchedulingGroup(SP_DEFAULT),
247      mPausedPosition(0),
248      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
249{
250    mStatus = set(streamType, sampleRate, format, channelMask,
251            0 /*frameCount*/, flags, cbf, user, notificationFrames,
252            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
253            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
254}
255
256AudioTrack::~AudioTrack()
257{
258    if (mStatus == NO_ERROR) {
259        // Make sure that callback function exits in the case where
260        // it is looping on buffer full condition in obtainBuffer().
261        // Otherwise the callback thread will never exit.
262        stop();
263        if (mAudioTrackThread != 0) {
264            mProxy->interrupt();
265            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
266            mAudioTrackThread->requestExitAndWait();
267            mAudioTrackThread.clear();
268        }
269        // No lock here: worst case we remove a NULL callback which will be a nop
270        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
271            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
272        }
273        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
274        mAudioTrack.clear();
275        mCblkMemory.clear();
276        mSharedBuffer.clear();
277        IPCThreadState::self()->flushCommands();
278        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
279                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
280        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
281    }
282}
283
284status_t AudioTrack::set(
285        audio_stream_type_t streamType,
286        uint32_t sampleRate,
287        audio_format_t format,
288        audio_channel_mask_t channelMask,
289        size_t frameCount,
290        audio_output_flags_t flags,
291        callback_t cbf,
292        void* user,
293        int32_t notificationFrames,
294        const sp<IMemory>& sharedBuffer,
295        bool threadCanCallJava,
296        audio_session_t sessionId,
297        transfer_type transferType,
298        const audio_offload_info_t *offloadInfo,
299        uid_t uid,
300        pid_t pid,
301        const audio_attributes_t* pAttributes,
302        bool doNotReconnect,
303        float maxRequiredSpeed)
304{
305    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
306          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
307          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
308          sessionId, transferType, uid, pid);
309
310    mThreadCanCallJava = threadCanCallJava;
311
312    switch (transferType) {
313    case TRANSFER_DEFAULT:
314        if (sharedBuffer != 0) {
315            transferType = TRANSFER_SHARED;
316        } else if (cbf == NULL || threadCanCallJava) {
317            transferType = TRANSFER_SYNC;
318        } else {
319            transferType = TRANSFER_CALLBACK;
320        }
321        break;
322    case TRANSFER_CALLBACK:
323        if (cbf == NULL || sharedBuffer != 0) {
324            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
325            return BAD_VALUE;
326        }
327        break;
328    case TRANSFER_OBTAIN:
329    case TRANSFER_SYNC:
330        if (sharedBuffer != 0) {
331            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
332            return BAD_VALUE;
333        }
334        break;
335    case TRANSFER_SHARED:
336        if (sharedBuffer == 0) {
337            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
338            return BAD_VALUE;
339        }
340        break;
341    default:
342        ALOGE("Invalid transfer type %d", transferType);
343        return BAD_VALUE;
344    }
345    mSharedBuffer = sharedBuffer;
346    mTransfer = transferType;
347    mDoNotReconnect = doNotReconnect;
348
349    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
350            sharedBuffer->size());
351
352    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
353
354    // invariant that mAudioTrack != 0 is true only after set() returns successfully
355    if (mAudioTrack != 0) {
356        ALOGE("Track already in use");
357        return INVALID_OPERATION;
358    }
359
360    // handle default values first.
361    if (streamType == AUDIO_STREAM_DEFAULT) {
362        streamType = AUDIO_STREAM_MUSIC;
363    }
364    if (pAttributes == NULL) {
365        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
366            ALOGE("Invalid stream type %d", streamType);
367            return BAD_VALUE;
368        }
369        mStreamType = streamType;
370
371    } else {
372        // stream type shouldn't be looked at, this track has audio attributes
373        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
374        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
375                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
376        mStreamType = AUDIO_STREAM_DEFAULT;
377        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
378            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
379        }
380        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
381            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
382        }
383    }
384
385    // these below should probably come from the audioFlinger too...
386    if (format == AUDIO_FORMAT_DEFAULT) {
387        format = AUDIO_FORMAT_PCM_16_BIT;
388    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
389        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
390    }
391
392    // validate parameters
393    if (!audio_is_valid_format(format)) {
394        ALOGE("Invalid format %#x", format);
395        return BAD_VALUE;
396    }
397    mFormat = format;
398
399    if (!audio_is_output_channel(channelMask)) {
400        ALOGE("Invalid channel mask %#x", channelMask);
401        return BAD_VALUE;
402    }
403    mChannelMask = channelMask;
404    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
405    mChannelCount = channelCount;
406
407    // force direct flag if format is not linear PCM
408    // or offload was requested
409    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
410            || !audio_is_linear_pcm(format)) {
411        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
412                    ? "Offload request, forcing to Direct Output"
413                    : "Not linear PCM, forcing to Direct Output");
414        flags = (audio_output_flags_t)
415                // FIXME why can't we allow direct AND fast?
416                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
417    }
418
419    // force direct flag if HW A/V sync requested
420    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
421        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
422    }
423
424    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
425        if (audio_has_proportional_frames(format)) {
426            mFrameSize = channelCount * audio_bytes_per_sample(format);
427        } else {
428            mFrameSize = sizeof(uint8_t);
429        }
430    } else {
431        ALOG_ASSERT(audio_has_proportional_frames(format));
432        mFrameSize = channelCount * audio_bytes_per_sample(format);
433        // createTrack will return an error if PCM format is not supported by server,
434        // so no need to check for specific PCM formats here
435    }
436
437    // sampling rate must be specified for direct outputs
438    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
439        return BAD_VALUE;
440    }
441    mSampleRate = sampleRate;
442    mOriginalSampleRate = sampleRate;
443    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
444    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
445    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
446
447    // Make copy of input parameter offloadInfo so that in the future:
448    //  (a) createTrack_l doesn't need it as an input parameter
449    //  (b) we can support re-creation of offloaded tracks
450    if (offloadInfo != NULL) {
451        mOffloadInfoCopy = *offloadInfo;
452        mOffloadInfo = &mOffloadInfoCopy;
453    } else {
454        mOffloadInfo = NULL;
455    }
456
457    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
458    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
459    mSendLevel = 0.0f;
460    // mFrameCount is initialized in createTrack_l
461    mReqFrameCount = frameCount;
462    if (notificationFrames >= 0) {
463        mNotificationFramesReq = notificationFrames;
464        mNotificationsPerBufferReq = 0;
465    } else {
466        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
467            ALOGE("notificationFrames=%d not permitted for non-fast track",
468                    notificationFrames);
469            return BAD_VALUE;
470        }
471        if (frameCount > 0) {
472            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
473                    notificationFrames, frameCount);
474            return BAD_VALUE;
475        }
476        mNotificationFramesReq = 0;
477        const uint32_t minNotificationsPerBuffer = 1;
478        const uint32_t maxNotificationsPerBuffer = 8;
479        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
480                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
481        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
482                "notificationFrames=%d clamped to the range -%u to -%u",
483                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
484    }
485    mNotificationFramesAct = 0;
486    if (sessionId == AUDIO_SESSION_ALLOCATE) {
487        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
488    } else {
489        mSessionId = sessionId;
490    }
491    int callingpid = IPCThreadState::self()->getCallingPid();
492    int mypid = getpid();
493    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
494        mClientUid = IPCThreadState::self()->getCallingUid();
495    } else {
496        mClientUid = uid;
497    }
498    if (pid == -1 || (callingpid != mypid)) {
499        mClientPid = callingpid;
500    } else {
501        mClientPid = pid;
502    }
503    mAuxEffectId = 0;
504    mOrigFlags = mFlags = flags;
505    mCbf = cbf;
506
507    if (cbf != NULL) {
508        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
509        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
510        // thread begins in paused state, and will not reference us until start()
511    }
512
513    // create the IAudioTrack
514    status_t status = createTrack_l();
515
516    if (status != NO_ERROR) {
517        if (mAudioTrackThread != 0) {
518            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
519            mAudioTrackThread->requestExitAndWait();
520            mAudioTrackThread.clear();
521        }
522        return status;
523    }
524
525    mStatus = NO_ERROR;
526    mUserData = user;
527    mLoopCount = 0;
528    mLoopStart = 0;
529    mLoopEnd = 0;
530    mLoopCountNotified = 0;
531    mMarkerPosition = 0;
532    mMarkerReached = false;
533    mNewPosition = 0;
534    mUpdatePeriod = 0;
535    mPosition = 0;
536    mReleased = 0;
537    mStartUs = 0;
538    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
539    mSequence = 1;
540    mObservedSequence = mSequence;
541    mInUnderrun = false;
542    mPreviousTimestampValid = false;
543    mTimestampStartupGlitchReported = false;
544    mRetrogradeMotionReported = false;
545    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
546    mStartTs.mPosition = 0;
547    mUnderrunCountOffset = 0;
548    mFramesWritten = 0;
549    mFramesWrittenServerOffset = 0;
550    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
551
552    return NO_ERROR;
553}
554
555// -------------------------------------------------------------------------
556
557status_t AudioTrack::start()
558{
559    AutoMutex lock(mLock);
560
561    if (mState == STATE_ACTIVE) {
562        return INVALID_OPERATION;
563    }
564
565    mInUnderrun = true;
566
567    State previousState = mState;
568    if (previousState == STATE_PAUSED_STOPPING) {
569        mState = STATE_STOPPING;
570    } else {
571        mState = STATE_ACTIVE;
572    }
573    (void) updateAndGetPosition_l();
574
575    // save start timestamp
576    if (isOffloadedOrDirect_l()) {
577        if (getTimestamp_l(mStartTs) != OK) {
578            mStartTs.mPosition = 0;
579        }
580    } else {
581        if (getTimestamp_l(&mStartEts) != OK) {
582            mStartEts.clear();
583        }
584    }
585    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
586        // reset current position as seen by client to 0
587        mPosition = 0;
588        mPreviousTimestampValid = false;
589        mTimestampStartupGlitchReported = false;
590        mRetrogradeMotionReported = false;
591        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
592
593        if (!isOffloadedOrDirect_l()
594                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
595            // Server side has consumed something, but is it finished consuming?
596            // It is possible since flush and stop are asynchronous that the server
597            // is still active at this point.
598            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
599                    (long long)(mFramesWrittenServerOffset
600                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
601                    (long long)mStartEts.mFlushed,
602                    (long long)mFramesWritten);
603            mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
604        }
605        mFramesWritten = 0;
606        mProxy->clearTimestamp(); // need new server push for valid timestamp
607        mMarkerReached = false;
608
609        // For offloaded tracks, we don't know if the hardware counters are really zero here,
610        // since the flush is asynchronous and stop may not fully drain.
611        // We save the time when the track is started to later verify whether
612        // the counters are realistic (i.e. start from zero after this time).
613        mStartUs = getNowUs();
614
615        // force refresh of remaining frames by processAudioBuffer() as last
616        // write before stop could be partial.
617        mRefreshRemaining = true;
618    }
619    mNewPosition = mPosition + mUpdatePeriod;
620    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
621
622    status_t status = NO_ERROR;
623    if (!(flags & CBLK_INVALID)) {
624        status = mAudioTrack->start();
625        if (status == DEAD_OBJECT) {
626            flags |= CBLK_INVALID;
627        }
628    }
629    if (flags & CBLK_INVALID) {
630        status = restoreTrack_l("start");
631    }
632
633    // resume or pause the callback thread as needed.
634    sp<AudioTrackThread> t = mAudioTrackThread;
635    if (status == NO_ERROR) {
636        if (t != 0) {
637            if (previousState == STATE_STOPPING) {
638                mProxy->interrupt();
639            } else {
640                t->resume();
641            }
642        } else {
643            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
644            get_sched_policy(0, &mPreviousSchedulingGroup);
645            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
646        }
647    } else {
648        ALOGE("start() status %d", status);
649        mState = previousState;
650        if (t != 0) {
651            if (previousState != STATE_STOPPING) {
652                t->pause();
653            }
654        } else {
655            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
656            set_sched_policy(0, mPreviousSchedulingGroup);
657        }
658    }
659
660    return status;
661}
662
663void AudioTrack::stop()
664{
665    AutoMutex lock(mLock);
666    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
667        return;
668    }
669
670    if (isOffloaded_l()) {
671        mState = STATE_STOPPING;
672    } else {
673        mState = STATE_STOPPED;
674        ALOGD_IF(mSharedBuffer == nullptr,
675                "stop() called with %u frames delivered", mReleased.value());
676        mReleased = 0;
677    }
678
679    mProxy->interrupt();
680    mAudioTrack->stop();
681
682    // Note: legacy handling - stop does not clear playback marker
683    // and periodic update counter, but flush does for streaming tracks.
684
685    if (mSharedBuffer != 0) {
686        // clear buffer position and loop count.
687        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
688                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
689    }
690
691    sp<AudioTrackThread> t = mAudioTrackThread;
692    if (t != 0) {
693        if (!isOffloaded_l()) {
694            t->pause();
695        }
696    } else {
697        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
698        set_sched_policy(0, mPreviousSchedulingGroup);
699    }
700}
701
702bool AudioTrack::stopped() const
703{
704    AutoMutex lock(mLock);
705    return mState != STATE_ACTIVE;
706}
707
708void AudioTrack::flush()
709{
710    if (mSharedBuffer != 0) {
711        return;
712    }
713    AutoMutex lock(mLock);
714    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
715        return;
716    }
717    flush_l();
718}
719
720void AudioTrack::flush_l()
721{
722    ALOG_ASSERT(mState != STATE_ACTIVE);
723
724    // clear playback marker and periodic update counter
725    mMarkerPosition = 0;
726    mMarkerReached = false;
727    mUpdatePeriod = 0;
728    mRefreshRemaining = true;
729
730    mState = STATE_FLUSHED;
731    mReleased = 0;
732    if (isOffloaded_l()) {
733        mProxy->interrupt();
734    }
735    mProxy->flush();
736    mAudioTrack->flush();
737}
738
739void AudioTrack::pause()
740{
741    AutoMutex lock(mLock);
742    if (mState == STATE_ACTIVE) {
743        mState = STATE_PAUSED;
744    } else if (mState == STATE_STOPPING) {
745        mState = STATE_PAUSED_STOPPING;
746    } else {
747        return;
748    }
749    mProxy->interrupt();
750    mAudioTrack->pause();
751
752    if (isOffloaded_l()) {
753        if (mOutput != AUDIO_IO_HANDLE_NONE) {
754            // An offload output can be re-used between two audio tracks having
755            // the same configuration. A timestamp query for a paused track
756            // while the other is running would return an incorrect time.
757            // To fix this, cache the playback position on a pause() and return
758            // this time when requested until the track is resumed.
759
760            // OffloadThread sends HAL pause in its threadLoop. Time saved
761            // here can be slightly off.
762
763            // TODO: check return code for getRenderPosition.
764
765            uint32_t halFrames;
766            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
767            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
768        }
769    }
770}
771
772status_t AudioTrack::setVolume(float left, float right)
773{
774    // This duplicates a test by AudioTrack JNI, but that is not the only caller
775    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
776            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
777        return BAD_VALUE;
778    }
779
780    AutoMutex lock(mLock);
781    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
782    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
783
784    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
785
786    if (isOffloaded_l()) {
787        mAudioTrack->signal();
788    }
789    return NO_ERROR;
790}
791
792status_t AudioTrack::setVolume(float volume)
793{
794    return setVolume(volume, volume);
795}
796
797status_t AudioTrack::setAuxEffectSendLevel(float level)
798{
799    // This duplicates a test by AudioTrack JNI, but that is not the only caller
800    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
801        return BAD_VALUE;
802    }
803
804    AutoMutex lock(mLock);
805    mSendLevel = level;
806    mProxy->setSendLevel(level);
807
808    return NO_ERROR;
809}
810
811void AudioTrack::getAuxEffectSendLevel(float* level) const
812{
813    if (level != NULL) {
814        *level = mSendLevel;
815    }
816}
817
818status_t AudioTrack::setSampleRate(uint32_t rate)
819{
820    AutoMutex lock(mLock);
821    if (rate == mSampleRate) {
822        return NO_ERROR;
823    }
824    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
825        return INVALID_OPERATION;
826    }
827    if (mOutput == AUDIO_IO_HANDLE_NONE) {
828        return NO_INIT;
829    }
830    // NOTE: it is theoretically possible, but highly unlikely, that a device change
831    // could mean a previously allowed sampling rate is no longer allowed.
832    uint32_t afSamplingRate;
833    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
834        return NO_INIT;
835    }
836    // pitch is emulated by adjusting speed and sampleRate
837    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
838    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
839        return BAD_VALUE;
840    }
841    // TODO: Should we also check if the buffer size is compatible?
842
843    mSampleRate = rate;
844    mProxy->setSampleRate(effectiveSampleRate);
845
846    return NO_ERROR;
847}
848
849uint32_t AudioTrack::getSampleRate() const
850{
851    AutoMutex lock(mLock);
852
853    // sample rate can be updated during playback by the offloaded decoder so we need to
854    // query the HAL and update if needed.
855// FIXME use Proxy return channel to update the rate from server and avoid polling here
856    if (isOffloadedOrDirect_l()) {
857        if (mOutput != AUDIO_IO_HANDLE_NONE) {
858            uint32_t sampleRate = 0;
859            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
860            if (status == NO_ERROR) {
861                mSampleRate = sampleRate;
862            }
863        }
864    }
865    return mSampleRate;
866}
867
868uint32_t AudioTrack::getOriginalSampleRate() const
869{
870    return mOriginalSampleRate;
871}
872
873status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
874{
875    AutoMutex lock(mLock);
876    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
877        return NO_ERROR;
878    }
879    if (isOffloadedOrDirect_l()) {
880        return INVALID_OPERATION;
881    }
882    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
883        return INVALID_OPERATION;
884    }
885
886    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
887            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
888    // pitch is emulated by adjusting speed and sampleRate
889    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
890    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
891    const float effectivePitch = adjustPitch(playbackRate.mPitch);
892    AudioPlaybackRate playbackRateTemp = playbackRate;
893    playbackRateTemp.mSpeed = effectiveSpeed;
894    playbackRateTemp.mPitch = effectivePitch;
895
896    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
897            effectiveRate, effectiveSpeed, effectivePitch);
898
899    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
900        ALOGV("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
901                playbackRate.mSpeed, playbackRate.mPitch);
902        return BAD_VALUE;
903    }
904    // Check if the buffer size is compatible.
905    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
906        ALOGV("setPlaybackRate(%f, %f) failed (buffer size)",
907                playbackRate.mSpeed, playbackRate.mPitch);
908        return BAD_VALUE;
909    }
910
911    // Check resampler ratios are within bounds
912    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
913        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
914                playbackRate.mSpeed, playbackRate.mPitch);
915        return BAD_VALUE;
916    }
917
918    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
919        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
920                        playbackRate.mSpeed, playbackRate.mPitch);
921        return BAD_VALUE;
922    }
923    mPlaybackRate = playbackRate;
924    //set effective rates
925    mProxy->setPlaybackRate(playbackRateTemp);
926    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
927    return NO_ERROR;
928}
929
930const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
931{
932    AutoMutex lock(mLock);
933    return mPlaybackRate;
934}
935
936ssize_t AudioTrack::getBufferSizeInFrames()
937{
938    AutoMutex lock(mLock);
939    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
940        return NO_INIT;
941    }
942    return (ssize_t) mProxy->getBufferSizeInFrames();
943}
944
945status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
946{
947    if (duration == nullptr) {
948        return BAD_VALUE;
949    }
950    AutoMutex lock(mLock);
951    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
952        return NO_INIT;
953    }
954    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
955    if (bufferSizeInFrames < 0) {
956        return (status_t)bufferSizeInFrames;
957    }
958    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
959            / ((double)mSampleRate * mPlaybackRate.mSpeed));
960    return NO_ERROR;
961}
962
963ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
964{
965    AutoMutex lock(mLock);
966    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
967        return NO_INIT;
968    }
969    // Reject if timed track or compressed audio.
970    if (!audio_is_linear_pcm(mFormat)) {
971        return INVALID_OPERATION;
972    }
973    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
974}
975
976status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
977{
978    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
979        return INVALID_OPERATION;
980    }
981
982    if (loopCount == 0) {
983        ;
984    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
985            loopEnd - loopStart >= MIN_LOOP) {
986        ;
987    } else {
988        return BAD_VALUE;
989    }
990
991    AutoMutex lock(mLock);
992    // See setPosition() regarding setting parameters such as loop points or position while active
993    if (mState == STATE_ACTIVE) {
994        return INVALID_OPERATION;
995    }
996    setLoop_l(loopStart, loopEnd, loopCount);
997    return NO_ERROR;
998}
999
1000void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1001{
1002    // We do not update the periodic notification point.
1003    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1004    mLoopCount = loopCount;
1005    mLoopEnd = loopEnd;
1006    mLoopStart = loopStart;
1007    mLoopCountNotified = loopCount;
1008    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1009
1010    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1011}
1012
1013status_t AudioTrack::setMarkerPosition(uint32_t marker)
1014{
1015    // The only purpose of setting marker position is to get a callback
1016    if (mCbf == NULL || isOffloadedOrDirect()) {
1017        return INVALID_OPERATION;
1018    }
1019
1020    AutoMutex lock(mLock);
1021    mMarkerPosition = marker;
1022    mMarkerReached = false;
1023
1024    sp<AudioTrackThread> t = mAudioTrackThread;
1025    if (t != 0) {
1026        t->wake();
1027    }
1028    return NO_ERROR;
1029}
1030
1031status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1032{
1033    if (isOffloadedOrDirect()) {
1034        return INVALID_OPERATION;
1035    }
1036    if (marker == NULL) {
1037        return BAD_VALUE;
1038    }
1039
1040    AutoMutex lock(mLock);
1041    mMarkerPosition.getValue(marker);
1042
1043    return NO_ERROR;
1044}
1045
1046status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1047{
1048    // The only purpose of setting position update period is to get a callback
1049    if (mCbf == NULL || isOffloadedOrDirect()) {
1050        return INVALID_OPERATION;
1051    }
1052
1053    AutoMutex lock(mLock);
1054    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1055    mUpdatePeriod = updatePeriod;
1056
1057    sp<AudioTrackThread> t = mAudioTrackThread;
1058    if (t != 0) {
1059        t->wake();
1060    }
1061    return NO_ERROR;
1062}
1063
1064status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1065{
1066    if (isOffloadedOrDirect()) {
1067        return INVALID_OPERATION;
1068    }
1069    if (updatePeriod == NULL) {
1070        return BAD_VALUE;
1071    }
1072
1073    AutoMutex lock(mLock);
1074    *updatePeriod = mUpdatePeriod;
1075
1076    return NO_ERROR;
1077}
1078
1079status_t AudioTrack::setPosition(uint32_t position)
1080{
1081    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1082        return INVALID_OPERATION;
1083    }
1084    if (position > mFrameCount) {
1085        return BAD_VALUE;
1086    }
1087
1088    AutoMutex lock(mLock);
1089    // Currently we require that the player is inactive before setting parameters such as position
1090    // or loop points.  Otherwise, there could be a race condition: the application could read the
1091    // current position, compute a new position or loop parameters, and then set that position or
1092    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1093    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1094    // to specify how it wants to handle such scenarios.
1095    if (mState == STATE_ACTIVE) {
1096        return INVALID_OPERATION;
1097    }
1098    // After setting the position, use full update period before notification.
1099    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1100    mStaticProxy->setBufferPosition(position);
1101
1102    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1103    return NO_ERROR;
1104}
1105
1106status_t AudioTrack::getPosition(uint32_t *position)
1107{
1108    if (position == NULL) {
1109        return BAD_VALUE;
1110    }
1111
1112    AutoMutex lock(mLock);
1113    // FIXME: offloaded and direct tracks call into the HAL for render positions
1114    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1115    // as we do not know the capability of the HAL for pcm position support and standby.
1116    // There may be some latency differences between the HAL position and the proxy position.
1117    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1118        uint32_t dspFrames = 0;
1119
1120        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1121            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1122            *position = mPausedPosition;
1123            return NO_ERROR;
1124        }
1125
1126        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1127            uint32_t halFrames; // actually unused
1128            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1129            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1130        }
1131        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1132        // due to hardware latency. We leave this behavior for now.
1133        *position = dspFrames;
1134    } else {
1135        if (mCblk->mFlags & CBLK_INVALID) {
1136            (void) restoreTrack_l("getPosition");
1137            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1138            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1139        }
1140
1141        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1142        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1143                0 : updateAndGetPosition_l().value();
1144    }
1145    return NO_ERROR;
1146}
1147
1148status_t AudioTrack::getBufferPosition(uint32_t *position)
1149{
1150    if (mSharedBuffer == 0) {
1151        return INVALID_OPERATION;
1152    }
1153    if (position == NULL) {
1154        return BAD_VALUE;
1155    }
1156
1157    AutoMutex lock(mLock);
1158    *position = mStaticProxy->getBufferPosition();
1159    return NO_ERROR;
1160}
1161
1162status_t AudioTrack::reload()
1163{
1164    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1165        return INVALID_OPERATION;
1166    }
1167
1168    AutoMutex lock(mLock);
1169    // See setPosition() regarding setting parameters such as loop points or position while active
1170    if (mState == STATE_ACTIVE) {
1171        return INVALID_OPERATION;
1172    }
1173    mNewPosition = mUpdatePeriod;
1174    (void) updateAndGetPosition_l();
1175    mPosition = 0;
1176    mPreviousTimestampValid = false;
1177#if 0
1178    // The documentation is not clear on the behavior of reload() and the restoration
1179    // of loop count. Historically we have not restored loop count, start, end,
1180    // but it makes sense if one desires to repeat playing a particular sound.
1181    if (mLoopCount != 0) {
1182        mLoopCountNotified = mLoopCount;
1183        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1184    }
1185#endif
1186    mStaticProxy->setBufferPosition(0);
1187    return NO_ERROR;
1188}
1189
1190audio_io_handle_t AudioTrack::getOutput() const
1191{
1192    AutoMutex lock(mLock);
1193    return mOutput;
1194}
1195
1196status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1197    AutoMutex lock(mLock);
1198    if (mSelectedDeviceId != deviceId) {
1199        mSelectedDeviceId = deviceId;
1200        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1201    }
1202    return NO_ERROR;
1203}
1204
1205audio_port_handle_t AudioTrack::getOutputDevice() {
1206    AutoMutex lock(mLock);
1207    return mSelectedDeviceId;
1208}
1209
1210audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1211    AutoMutex lock(mLock);
1212    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1213        return AUDIO_PORT_HANDLE_NONE;
1214    }
1215    return AudioSystem::getDeviceIdForIo(mOutput);
1216}
1217
1218status_t AudioTrack::attachAuxEffect(int effectId)
1219{
1220    AutoMutex lock(mLock);
1221    status_t status = mAudioTrack->attachAuxEffect(effectId);
1222    if (status == NO_ERROR) {
1223        mAuxEffectId = effectId;
1224    }
1225    return status;
1226}
1227
1228audio_stream_type_t AudioTrack::streamType() const
1229{
1230    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1231        return audio_attributes_to_stream_type(&mAttributes);
1232    }
1233    return mStreamType;
1234}
1235
1236// -------------------------------------------------------------------------
1237
1238// must be called with mLock held
1239status_t AudioTrack::createTrack_l()
1240{
1241    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1242    if (audioFlinger == 0) {
1243        ALOGE("Could not get audioflinger");
1244        return NO_INIT;
1245    }
1246
1247    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1248        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1249    }
1250    audio_io_handle_t output;
1251    audio_stream_type_t streamType = mStreamType;
1252    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1253
1254    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1255    // After fast request is denied, we will request again if IAudioTrack is re-created.
1256
1257    status_t status;
1258    status = AudioSystem::getOutputForAttr(attr, &output,
1259                                           mSessionId, &streamType, mClientUid,
1260                                           mSampleRate, mFormat, mChannelMask,
1261                                           mFlags, mSelectedDeviceId, mOffloadInfo);
1262
1263    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1264        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1265              " channel mask %#x, flags %#x",
1266              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1267        return BAD_VALUE;
1268    }
1269    {
1270    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1271    // we must release it ourselves if anything goes wrong.
1272
1273    // Not all of these values are needed under all conditions, but it is easier to get them all
1274    status = AudioSystem::getLatency(output, &mAfLatency);
1275    if (status != NO_ERROR) {
1276        ALOGE("getLatency(%d) failed status %d", output, status);
1277        goto release;
1278    }
1279    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1280
1281    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1282    if (status != NO_ERROR) {
1283        ALOGE("getFrameCount(output=%d) status %d", output, status);
1284        goto release;
1285    }
1286
1287    // TODO consider making this a member variable if there are other uses for it later
1288    size_t afFrameCountHAL;
1289    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1290    if (status != NO_ERROR) {
1291        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1292        goto release;
1293    }
1294    ALOG_ASSERT(afFrameCountHAL > 0);
1295
1296    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1297    if (status != NO_ERROR) {
1298        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1299        goto release;
1300    }
1301    if (mSampleRate == 0) {
1302        mSampleRate = mAfSampleRate;
1303        mOriginalSampleRate = mAfSampleRate;
1304    }
1305
1306    // Client can only express a preference for FAST.  Server will perform additional tests.
1307    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1308        bool useCaseAllowed =
1309            // either of these use cases:
1310            // use case 1: shared buffer
1311            (mSharedBuffer != 0) ||
1312            // use case 2: callback transfer mode
1313            (mTransfer == TRANSFER_CALLBACK) ||
1314            // use case 3: obtain/release mode
1315            (mTransfer == TRANSFER_OBTAIN) ||
1316            // use case 4: synchronous write
1317            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1318        // sample rates must also match
1319        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1320        if (!fastAllowed) {
1321            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
1322                "track %u Hz, output %u Hz",
1323                mTransfer, mSampleRate, mAfSampleRate);
1324            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1325        }
1326    }
1327
1328    mNotificationFramesAct = mNotificationFramesReq;
1329
1330    size_t frameCount = mReqFrameCount;
1331    if (!audio_has_proportional_frames(mFormat)) {
1332
1333        if (mSharedBuffer != 0) {
1334            // Same comment as below about ignoring frameCount parameter for set()
1335            frameCount = mSharedBuffer->size();
1336        } else if (frameCount == 0) {
1337            frameCount = mAfFrameCount;
1338        }
1339        if (mNotificationFramesAct != frameCount) {
1340            mNotificationFramesAct = frameCount;
1341        }
1342    } else if (mSharedBuffer != 0) {
1343        // FIXME: Ensure client side memory buffers need
1344        // not have additional alignment beyond sample
1345        // (e.g. 16 bit stereo accessed as 32 bit frame).
1346        size_t alignment = audio_bytes_per_sample(mFormat);
1347        if (alignment & 1) {
1348            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1349            alignment = 1;
1350        }
1351        if (mChannelCount > 1) {
1352            // More than 2 channels does not require stronger alignment than stereo
1353            alignment <<= 1;
1354        }
1355        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1356            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1357                    mSharedBuffer->pointer(), mChannelCount);
1358            status = BAD_VALUE;
1359            goto release;
1360        }
1361
1362        // When initializing a shared buffer AudioTrack via constructors,
1363        // there's no frameCount parameter.
1364        // But when initializing a shared buffer AudioTrack via set(),
1365        // there _is_ a frameCount parameter.  We silently ignore it.
1366        frameCount = mSharedBuffer->size() / mFrameSize;
1367    } else {
1368        size_t minFrameCount = 0;
1369        // For fast tracks the frame count calculations and checks are mostly done by server,
1370        // but we try to respect the application's request for notifications per buffer.
1371        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1372            if (mNotificationsPerBufferReq > 0) {
1373                // Avoid possible arithmetic overflow during multiplication.
1374                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1375                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1376                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1377                            mNotificationsPerBufferReq, afFrameCountHAL);
1378                } else {
1379                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1380                }
1381            }
1382        } else {
1383            // for normal tracks precompute the frame count based on speed.
1384            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1385                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1386            minFrameCount = calculateMinFrameCount(
1387                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1388                    speed /*, 0 mNotificationsPerBufferReq*/);
1389        }
1390        if (frameCount < minFrameCount) {
1391            frameCount = minFrameCount;
1392        }
1393    }
1394
1395    audio_output_flags_t flags = mFlags;
1396
1397    pid_t tid = -1;
1398    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1399        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1400            tid = mAudioTrackThread->getTid();
1401        }
1402    }
1403
1404    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1405                                // but we will still need the original value also
1406    audio_session_t originalSessionId = mSessionId;
1407    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1408                                                      mSampleRate,
1409                                                      mFormat,
1410                                                      mChannelMask,
1411                                                      &temp,
1412                                                      &flags,
1413                                                      mSharedBuffer,
1414                                                      output,
1415                                                      mClientPid,
1416                                                      tid,
1417                                                      &mSessionId,
1418                                                      mClientUid,
1419                                                      &status);
1420    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1421            "session ID changed from %d to %d", originalSessionId, mSessionId);
1422
1423    if (status != NO_ERROR) {
1424        ALOGE("AudioFlinger could not create track, status: %d", status);
1425        goto release;
1426    }
1427    ALOG_ASSERT(track != 0);
1428
1429    // AudioFlinger now owns the reference to the I/O handle,
1430    // so we are no longer responsible for releasing it.
1431
1432    // FIXME compare to AudioRecord
1433    sp<IMemory> iMem = track->getCblk();
1434    if (iMem == 0) {
1435        ALOGE("Could not get control block");
1436        return NO_INIT;
1437    }
1438    void *iMemPointer = iMem->pointer();
1439    if (iMemPointer == NULL) {
1440        ALOGE("Could not get control block pointer");
1441        return NO_INIT;
1442    }
1443    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1444    if (mAudioTrack != 0) {
1445        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1446        mDeathNotifier.clear();
1447    }
1448    mAudioTrack = track;
1449    mCblkMemory = iMem;
1450    IPCThreadState::self()->flushCommands();
1451
1452    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1453    mCblk = cblk;
1454    // note that temp is the (possibly revised) value of frameCount
1455    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1456        // In current design, AudioTrack client checks and ensures frame count validity before
1457        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1458        // for fast track as it uses a special method of assigning frame count.
1459        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1460    }
1461    frameCount = temp;
1462
1463    mAwaitBoost = false;
1464    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1465        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1466            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1467            if (!mThreadCanCallJava) {
1468                mAwaitBoost = true;
1469            }
1470        } else {
1471            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1472        }
1473    }
1474    mFlags = flags;
1475
1476    // Make sure that application is notified with sufficient margin before underrun.
1477    // The client can divide the AudioTrack buffer into sub-buffers,
1478    // and expresses its desire to server as the notification frame count.
1479    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1480        size_t maxNotificationFrames;
1481        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1482            // notify every HAL buffer, regardless of the size of the track buffer
1483            maxNotificationFrames = afFrameCountHAL;
1484        } else {
1485            // For normal tracks, use at least double-buffering if no sample rate conversion,
1486            // or at least triple-buffering if there is sample rate conversion
1487            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1488            maxNotificationFrames = frameCount / nBuffering;
1489        }
1490        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1491            if (mNotificationFramesAct == 0) {
1492                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1493                    maxNotificationFrames, frameCount);
1494            } else {
1495                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1496                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1497            }
1498            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1499        }
1500    }
1501
1502    // We retain a copy of the I/O handle, but don't own the reference
1503    mOutput = output;
1504    mRefreshRemaining = true;
1505
1506    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1507    // is the value of pointer() for the shared buffer, otherwise buffers points
1508    // immediately after the control block.  This address is for the mapping within client
1509    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1510    void* buffers;
1511    if (mSharedBuffer == 0) {
1512        buffers = cblk + 1;
1513    } else {
1514        buffers = mSharedBuffer->pointer();
1515        if (buffers == NULL) {
1516            ALOGE("Could not get buffer pointer");
1517            return NO_INIT;
1518        }
1519    }
1520
1521    mAudioTrack->attachAuxEffect(mAuxEffectId);
1522    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1523    // FIXME don't believe this lie
1524    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1525
1526    mFrameCount = frameCount;
1527    // If IAudioTrack is re-created, don't let the requested frameCount
1528    // decrease.  This can confuse clients that cache frameCount().
1529    if (frameCount > mReqFrameCount) {
1530        mReqFrameCount = frameCount;
1531    }
1532
1533    // reset server position to 0 as we have new cblk.
1534    mServer = 0;
1535
1536    // update proxy
1537    if (mSharedBuffer == 0) {
1538        mStaticProxy.clear();
1539        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1540    } else {
1541        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1542        mProxy = mStaticProxy;
1543    }
1544
1545    mProxy->setVolumeLR(gain_minifloat_pack(
1546            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1547            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1548
1549    mProxy->setSendLevel(mSendLevel);
1550    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1551    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1552    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1553    mProxy->setSampleRate(effectiveSampleRate);
1554
1555    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1556    playbackRateTemp.mSpeed = effectiveSpeed;
1557    playbackRateTemp.mPitch = effectivePitch;
1558    mProxy->setPlaybackRate(playbackRateTemp);
1559    mProxy->setMinimum(mNotificationFramesAct);
1560
1561    mDeathNotifier = new DeathNotifier(this);
1562    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1563
1564    if (mDeviceCallback != 0) {
1565        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1566    }
1567
1568    return NO_ERROR;
1569    }
1570
1571release:
1572    AudioSystem::releaseOutput(output, streamType, mSessionId);
1573    if (status == NO_ERROR) {
1574        status = NO_INIT;
1575    }
1576    return status;
1577}
1578
1579status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1580{
1581    if (audioBuffer == NULL) {
1582        if (nonContig != NULL) {
1583            *nonContig = 0;
1584        }
1585        return BAD_VALUE;
1586    }
1587    if (mTransfer != TRANSFER_OBTAIN) {
1588        audioBuffer->frameCount = 0;
1589        audioBuffer->size = 0;
1590        audioBuffer->raw = NULL;
1591        if (nonContig != NULL) {
1592            *nonContig = 0;
1593        }
1594        return INVALID_OPERATION;
1595    }
1596
1597    const struct timespec *requested;
1598    struct timespec timeout;
1599    if (waitCount == -1) {
1600        requested = &ClientProxy::kForever;
1601    } else if (waitCount == 0) {
1602        requested = &ClientProxy::kNonBlocking;
1603    } else if (waitCount > 0) {
1604        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1605        timeout.tv_sec = ms / 1000;
1606        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1607        requested = &timeout;
1608    } else {
1609        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1610        requested = NULL;
1611    }
1612    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1613}
1614
1615status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1616        struct timespec *elapsed, size_t *nonContig)
1617{
1618    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1619    uint32_t oldSequence = 0;
1620    uint32_t newSequence;
1621
1622    Proxy::Buffer buffer;
1623    status_t status = NO_ERROR;
1624
1625    static const int32_t kMaxTries = 5;
1626    int32_t tryCounter = kMaxTries;
1627
1628    do {
1629        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1630        // keep them from going away if another thread re-creates the track during obtainBuffer()
1631        sp<AudioTrackClientProxy> proxy;
1632        sp<IMemory> iMem;
1633
1634        {   // start of lock scope
1635            AutoMutex lock(mLock);
1636
1637            newSequence = mSequence;
1638            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1639            if (status == DEAD_OBJECT) {
1640                // re-create track, unless someone else has already done so
1641                if (newSequence == oldSequence) {
1642                    status = restoreTrack_l("obtainBuffer");
1643                    if (status != NO_ERROR) {
1644                        buffer.mFrameCount = 0;
1645                        buffer.mRaw = NULL;
1646                        buffer.mNonContig = 0;
1647                        break;
1648                    }
1649                }
1650            }
1651            oldSequence = newSequence;
1652
1653            if (status == NOT_ENOUGH_DATA) {
1654                restartIfDisabled();
1655            }
1656
1657            // Keep the extra references
1658            proxy = mProxy;
1659            iMem = mCblkMemory;
1660
1661            if (mState == STATE_STOPPING) {
1662                status = -EINTR;
1663                buffer.mFrameCount = 0;
1664                buffer.mRaw = NULL;
1665                buffer.mNonContig = 0;
1666                break;
1667            }
1668
1669            // Non-blocking if track is stopped or paused
1670            if (mState != STATE_ACTIVE) {
1671                requested = &ClientProxy::kNonBlocking;
1672            }
1673
1674        }   // end of lock scope
1675
1676        buffer.mFrameCount = audioBuffer->frameCount;
1677        // FIXME starts the requested timeout and elapsed over from scratch
1678        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1679    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1680
1681    audioBuffer->frameCount = buffer.mFrameCount;
1682    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1683    audioBuffer->raw = buffer.mRaw;
1684    if (nonContig != NULL) {
1685        *nonContig = buffer.mNonContig;
1686    }
1687    return status;
1688}
1689
1690void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1691{
1692    // FIXME add error checking on mode, by adding an internal version
1693    if (mTransfer == TRANSFER_SHARED) {
1694        return;
1695    }
1696
1697    size_t stepCount = audioBuffer->size / mFrameSize;
1698    if (stepCount == 0) {
1699        return;
1700    }
1701
1702    Proxy::Buffer buffer;
1703    buffer.mFrameCount = stepCount;
1704    buffer.mRaw = audioBuffer->raw;
1705
1706    AutoMutex lock(mLock);
1707    mReleased += stepCount;
1708    mInUnderrun = false;
1709    mProxy->releaseBuffer(&buffer);
1710
1711    // restart track if it was disabled by audioflinger due to previous underrun
1712    restartIfDisabled();
1713}
1714
1715void AudioTrack::restartIfDisabled()
1716{
1717    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1718    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1719        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1720        // FIXME ignoring status
1721        mAudioTrack->start();
1722    }
1723}
1724
1725// -------------------------------------------------------------------------
1726
1727ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1728{
1729    if (mTransfer != TRANSFER_SYNC) {
1730        return INVALID_OPERATION;
1731    }
1732
1733    if (isDirect()) {
1734        AutoMutex lock(mLock);
1735        int32_t flags = android_atomic_and(
1736                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1737                            &mCblk->mFlags);
1738        if (flags & CBLK_INVALID) {
1739            return DEAD_OBJECT;
1740        }
1741    }
1742
1743    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1744        // Sanity-check: user is most-likely passing an error code, and it would
1745        // make the return value ambiguous (actualSize vs error).
1746        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1747        return BAD_VALUE;
1748    }
1749
1750    size_t written = 0;
1751    Buffer audioBuffer;
1752
1753    while (userSize >= mFrameSize) {
1754        audioBuffer.frameCount = userSize / mFrameSize;
1755
1756        status_t err = obtainBuffer(&audioBuffer,
1757                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1758        if (err < 0) {
1759            if (written > 0) {
1760                break;
1761            }
1762            if (err == TIMED_OUT || err == -EINTR) {
1763                err = WOULD_BLOCK;
1764            }
1765            return ssize_t(err);
1766        }
1767
1768        size_t toWrite = audioBuffer.size;
1769        memcpy(audioBuffer.i8, buffer, toWrite);
1770        buffer = ((const char *) buffer) + toWrite;
1771        userSize -= toWrite;
1772        written += toWrite;
1773
1774        releaseBuffer(&audioBuffer);
1775    }
1776
1777    if (written > 0) {
1778        mFramesWritten += written / mFrameSize;
1779    }
1780    return written;
1781}
1782
1783// -------------------------------------------------------------------------
1784
1785nsecs_t AudioTrack::processAudioBuffer()
1786{
1787    // Currently the AudioTrack thread is not created if there are no callbacks.
1788    // Would it ever make sense to run the thread, even without callbacks?
1789    // If so, then replace this by checks at each use for mCbf != NULL.
1790    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1791
1792    mLock.lock();
1793    if (mAwaitBoost) {
1794        mAwaitBoost = false;
1795        mLock.unlock();
1796        static const int32_t kMaxTries = 5;
1797        int32_t tryCounter = kMaxTries;
1798        uint32_t pollUs = 10000;
1799        do {
1800            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1801            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1802                break;
1803            }
1804            usleep(pollUs);
1805            pollUs <<= 1;
1806        } while (tryCounter-- > 0);
1807        if (tryCounter < 0) {
1808            ALOGE("did not receive expected priority boost on time");
1809        }
1810        // Run again immediately
1811        return 0;
1812    }
1813
1814    // Can only reference mCblk while locked
1815    int32_t flags = android_atomic_and(
1816        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1817
1818    // Check for track invalidation
1819    if (flags & CBLK_INVALID) {
1820        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1821        // AudioSystem cache. We should not exit here but after calling the callback so
1822        // that the upper layers can recreate the track
1823        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1824            status_t status __unused = restoreTrack_l("processAudioBuffer");
1825            // FIXME unused status
1826            // after restoration, continue below to make sure that the loop and buffer events
1827            // are notified because they have been cleared from mCblk->mFlags above.
1828        }
1829    }
1830
1831    bool waitStreamEnd = mState == STATE_STOPPING;
1832    bool active = mState == STATE_ACTIVE;
1833
1834    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1835    bool newUnderrun = false;
1836    if (flags & CBLK_UNDERRUN) {
1837#if 0
1838        // Currently in shared buffer mode, when the server reaches the end of buffer,
1839        // the track stays active in continuous underrun state.  It's up to the application
1840        // to pause or stop the track, or set the position to a new offset within buffer.
1841        // This was some experimental code to auto-pause on underrun.   Keeping it here
1842        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1843        if (mTransfer == TRANSFER_SHARED) {
1844            mState = STATE_PAUSED;
1845            active = false;
1846        }
1847#endif
1848        if (!mInUnderrun) {
1849            mInUnderrun = true;
1850            newUnderrun = true;
1851        }
1852    }
1853
1854    // Get current position of server
1855    Modulo<uint32_t> position(updateAndGetPosition_l());
1856
1857    // Manage marker callback
1858    bool markerReached = false;
1859    Modulo<uint32_t> markerPosition(mMarkerPosition);
1860    // uses 32 bit wraparound for comparison with position.
1861    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1862        mMarkerReached = markerReached = true;
1863    }
1864
1865    // Determine number of new position callback(s) that will be needed, while locked
1866    size_t newPosCount = 0;
1867    Modulo<uint32_t> newPosition(mNewPosition);
1868    uint32_t updatePeriod = mUpdatePeriod;
1869    // FIXME fails for wraparound, need 64 bits
1870    if (updatePeriod > 0 && position >= newPosition) {
1871        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1872        mNewPosition += updatePeriod * newPosCount;
1873    }
1874
1875    // Cache other fields that will be needed soon
1876    uint32_t sampleRate = mSampleRate;
1877    float speed = mPlaybackRate.mSpeed;
1878    const uint32_t notificationFrames = mNotificationFramesAct;
1879    if (mRefreshRemaining) {
1880        mRefreshRemaining = false;
1881        mRemainingFrames = notificationFrames;
1882        mRetryOnPartialBuffer = false;
1883    }
1884    size_t misalignment = mProxy->getMisalignment();
1885    uint32_t sequence = mSequence;
1886    sp<AudioTrackClientProxy> proxy = mProxy;
1887
1888    // Determine the number of new loop callback(s) that will be needed, while locked.
1889    int loopCountNotifications = 0;
1890    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1891
1892    if (mLoopCount > 0) {
1893        int loopCount;
1894        size_t bufferPosition;
1895        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1896        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1897        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1898        mLoopCountNotified = loopCount; // discard any excess notifications
1899    } else if (mLoopCount < 0) {
1900        // FIXME: We're not accurate with notification count and position with infinite looping
1901        // since loopCount from server side will always return -1 (we could decrement it).
1902        size_t bufferPosition = mStaticProxy->getBufferPosition();
1903        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1904        loopPeriod = mLoopEnd - bufferPosition;
1905    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1906        size_t bufferPosition = mStaticProxy->getBufferPosition();
1907        loopPeriod = mFrameCount - bufferPosition;
1908    }
1909
1910    // These fields don't need to be cached, because they are assigned only by set():
1911    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1912    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1913
1914    mLock.unlock();
1915
1916    // get anchor time to account for callbacks.
1917    const nsecs_t timeBeforeCallbacks = systemTime();
1918
1919    if (waitStreamEnd) {
1920        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1921        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1922        // (and make sure we don't callback for more data while we're stopping).
1923        // This helps with position, marker notifications, and track invalidation.
1924        struct timespec timeout;
1925        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1926        timeout.tv_nsec = 0;
1927
1928        status_t status = proxy->waitStreamEndDone(&timeout);
1929        switch (status) {
1930        case NO_ERROR:
1931        case DEAD_OBJECT:
1932        case TIMED_OUT:
1933            if (status != DEAD_OBJECT) {
1934                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1935                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1936                mCbf(EVENT_STREAM_END, mUserData, NULL);
1937            }
1938            {
1939                AutoMutex lock(mLock);
1940                // The previously assigned value of waitStreamEnd is no longer valid,
1941                // since the mutex has been unlocked and either the callback handler
1942                // or another thread could have re-started the AudioTrack during that time.
1943                waitStreamEnd = mState == STATE_STOPPING;
1944                if (waitStreamEnd) {
1945                    mState = STATE_STOPPED;
1946                    mReleased = 0;
1947                }
1948            }
1949            if (waitStreamEnd && status != DEAD_OBJECT) {
1950               return NS_INACTIVE;
1951            }
1952            break;
1953        }
1954        return 0;
1955    }
1956
1957    // perform callbacks while unlocked
1958    if (newUnderrun) {
1959        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1960    }
1961    while (loopCountNotifications > 0) {
1962        mCbf(EVENT_LOOP_END, mUserData, NULL);
1963        --loopCountNotifications;
1964    }
1965    if (flags & CBLK_BUFFER_END) {
1966        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1967    }
1968    if (markerReached) {
1969        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1970    }
1971    while (newPosCount > 0) {
1972        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1973        mCbf(EVENT_NEW_POS, mUserData, &temp);
1974        newPosition += updatePeriod;
1975        newPosCount--;
1976    }
1977
1978    if (mObservedSequence != sequence) {
1979        mObservedSequence = sequence;
1980        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1981        // for offloaded tracks, just wait for the upper layers to recreate the track
1982        if (isOffloadedOrDirect()) {
1983            return NS_INACTIVE;
1984        }
1985    }
1986
1987    // if inactive, then don't run me again until re-started
1988    if (!active) {
1989        return NS_INACTIVE;
1990    }
1991
1992    // Compute the estimated time until the next timed event (position, markers, loops)
1993    // FIXME only for non-compressed audio
1994    uint32_t minFrames = ~0;
1995    if (!markerReached && position < markerPosition) {
1996        minFrames = (markerPosition - position).value();
1997    }
1998    if (loopPeriod > 0 && loopPeriod < minFrames) {
1999        // loopPeriod is already adjusted for actual position.
2000        minFrames = loopPeriod;
2001    }
2002    if (updatePeriod > 0) {
2003        minFrames = min(minFrames, (newPosition - position).value());
2004    }
2005
2006    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2007    static const uint32_t kPoll = 0;
2008    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2009        minFrames = kPoll * notificationFrames;
2010    }
2011
2012    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2013    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2014    const nsecs_t timeAfterCallbacks = systemTime();
2015
2016    // Convert frame units to time units
2017    nsecs_t ns = NS_WHENEVER;
2018    if (minFrames != (uint32_t) ~0) {
2019        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
2020        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2021        // TODO: Should we warn if the callback time is too long?
2022        if (ns < 0) ns = 0;
2023    }
2024
2025    // If not supplying data by EVENT_MORE_DATA, then we're done
2026    if (mTransfer != TRANSFER_CALLBACK) {
2027        return ns;
2028    }
2029
2030    // EVENT_MORE_DATA callback handling.
2031    // Timing for linear pcm audio data formats can be derived directly from the
2032    // buffer fill level.
2033    // Timing for compressed data is not directly available from the buffer fill level,
2034    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2035    // to return a certain fill level.
2036
2037    struct timespec timeout;
2038    const struct timespec *requested = &ClientProxy::kForever;
2039    if (ns != NS_WHENEVER) {
2040        timeout.tv_sec = ns / 1000000000LL;
2041        timeout.tv_nsec = ns % 1000000000LL;
2042        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2043        requested = &timeout;
2044    }
2045
2046    size_t writtenFrames = 0;
2047    while (mRemainingFrames > 0) {
2048
2049        Buffer audioBuffer;
2050        audioBuffer.frameCount = mRemainingFrames;
2051        size_t nonContig;
2052        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2053        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2054                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2055        requested = &ClientProxy::kNonBlocking;
2056        size_t avail = audioBuffer.frameCount + nonContig;
2057        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2058                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2059        if (err != NO_ERROR) {
2060            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2061                    (isOffloaded() && (err == DEAD_OBJECT))) {
2062                // FIXME bug 25195759
2063                return 1000000;
2064            }
2065            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2066            return NS_NEVER;
2067        }
2068
2069        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2070            mRetryOnPartialBuffer = false;
2071            if (avail < mRemainingFrames) {
2072                if (ns > 0) { // account for obtain time
2073                    const nsecs_t timeNow = systemTime();
2074                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2075                }
2076                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2077                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2078                    ns = myns;
2079                }
2080                return ns;
2081            }
2082        }
2083
2084        size_t reqSize = audioBuffer.size;
2085        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2086        size_t writtenSize = audioBuffer.size;
2087
2088        // Sanity check on returned size
2089        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2090            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2091                    reqSize, ssize_t(writtenSize));
2092            return NS_NEVER;
2093        }
2094
2095        if (writtenSize == 0) {
2096            // The callback is done filling buffers
2097            // Keep this thread going to handle timed events and
2098            // still try to get more data in intervals of WAIT_PERIOD_MS
2099            // but don't just loop and block the CPU, so wait
2100
2101            // mCbf(EVENT_MORE_DATA, ...) might either
2102            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2103            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2104            // (3) Return 0 size when no data is available, does not wait for more data.
2105            //
2106            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2107            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2108            // especially for case (3).
2109            //
2110            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2111            // and this loop; whereas for case (3) we could simply check once with the full
2112            // buffer size and skip the loop entirely.
2113
2114            nsecs_t myns;
2115            if (audio_has_proportional_frames(mFormat)) {
2116                // time to wait based on buffer occupancy
2117                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2118                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2119                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2120                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2121                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2122                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2123                myns = datans + (afns / 2);
2124            } else {
2125                // FIXME: This could ping quite a bit if the buffer isn't full.
2126                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2127                myns = kWaitPeriodNs;
2128            }
2129            if (ns > 0) { // account for obtain and callback time
2130                const nsecs_t timeNow = systemTime();
2131                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2132            }
2133            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2134                ns = myns;
2135            }
2136            return ns;
2137        }
2138
2139        size_t releasedFrames = writtenSize / mFrameSize;
2140        audioBuffer.frameCount = releasedFrames;
2141        mRemainingFrames -= releasedFrames;
2142        if (misalignment >= releasedFrames) {
2143            misalignment -= releasedFrames;
2144        } else {
2145            misalignment = 0;
2146        }
2147
2148        releaseBuffer(&audioBuffer);
2149        writtenFrames += releasedFrames;
2150
2151        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2152        // if callback doesn't like to accept the full chunk
2153        if (writtenSize < reqSize) {
2154            continue;
2155        }
2156
2157        // There could be enough non-contiguous frames available to satisfy the remaining request
2158        if (mRemainingFrames <= nonContig) {
2159            continue;
2160        }
2161
2162#if 0
2163        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2164        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2165        // that total to a sum == notificationFrames.
2166        if (0 < misalignment && misalignment <= mRemainingFrames) {
2167            mRemainingFrames = misalignment;
2168            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2169        }
2170#endif
2171
2172    }
2173    if (writtenFrames > 0) {
2174        AutoMutex lock(mLock);
2175        mFramesWritten += writtenFrames;
2176    }
2177    mRemainingFrames = notificationFrames;
2178    mRetryOnPartialBuffer = true;
2179
2180    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2181    return 0;
2182}
2183
2184status_t AudioTrack::restoreTrack_l(const char *from)
2185{
2186    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2187          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2188    ++mSequence;
2189
2190    // refresh the audio configuration cache in this process to make sure we get new
2191    // output parameters and new IAudioFlinger in createTrack_l()
2192    AudioSystem::clearAudioConfigCache();
2193
2194    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2195        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2196        // reconsider enabling for linear PCM encodings when position can be preserved.
2197        return DEAD_OBJECT;
2198    }
2199
2200    // Save so we can return count since creation.
2201    mUnderrunCountOffset = getUnderrunCount_l();
2202
2203    // save the old static buffer position
2204    uint32_t staticPosition = 0;
2205    size_t bufferPosition = 0;
2206    int loopCount = 0;
2207    if (mStaticProxy != 0) {
2208        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2209        staticPosition = mStaticProxy->getPosition().unsignedValue();
2210    }
2211
2212    mFlags = mOrigFlags;
2213
2214    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2215    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2216    // It will also delete the strong references on previous IAudioTrack and IMemory.
2217    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2218    status_t result = createTrack_l();
2219
2220    if (result == NO_ERROR) {
2221        // take the frames that will be lost by track recreation into account in saved position
2222        // For streaming tracks, this is the amount we obtained from the user/client
2223        // (not the number actually consumed at the server - those are already lost).
2224        if (mStaticProxy == 0) {
2225            mPosition = mReleased;
2226        }
2227        // Continue playback from last known position and restore loop.
2228        if (mStaticProxy != 0) {
2229            if (loopCount != 0) {
2230                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2231                        mLoopStart, mLoopEnd, loopCount);
2232            } else {
2233                mStaticProxy->setBufferPosition(bufferPosition);
2234                if (bufferPosition == mFrameCount) {
2235                    ALOGD("restoring track at end of static buffer");
2236                }
2237            }
2238        }
2239        if (mState == STATE_ACTIVE) {
2240            result = mAudioTrack->start();
2241        }
2242        // server resets to zero so we offset
2243        mFramesWrittenServerOffset =
2244                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2245        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2246    }
2247    if (result != NO_ERROR) {
2248        ALOGW("restoreTrack_l() failed status %d", result);
2249        mState = STATE_STOPPED;
2250        mReleased = 0;
2251    }
2252
2253    return result;
2254}
2255
2256Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2257{
2258    // This is the sole place to read server consumed frames
2259    Modulo<uint32_t> newServer(mProxy->getPosition());
2260    const int32_t delta = (newServer - mServer).signedValue();
2261    // TODO There is controversy about whether there can be "negative jitter" in server position.
2262    //      This should be investigated further, and if possible, it should be addressed.
2263    //      A more definite failure mode is infrequent polling by client.
2264    //      One could call (void)getPosition_l() in releaseBuffer(),
2265    //      so mReleased and mPosition are always lock-step as best possible.
2266    //      That should ensure delta never goes negative for infrequent polling
2267    //      unless the server has more than 2^31 frames in its buffer,
2268    //      in which case the use of uint32_t for these counters has bigger issues.
2269    ALOGE_IF(delta < 0,
2270            "detected illegal retrograde motion by the server: mServer advanced by %d",
2271            delta);
2272    mServer = newServer;
2273    if (delta > 0) { // avoid retrograde
2274        mPosition += delta;
2275    }
2276    return mPosition;
2277}
2278
2279bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2280{
2281    // applicable for mixing tracks only (not offloaded or direct)
2282    if (mStaticProxy != 0) {
2283        return true; // static tracks do not have issues with buffer sizing.
2284    }
2285    const size_t minFrameCount =
2286            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2287                /*, 0 mNotificationsPerBufferReq*/);
2288    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
2289            mFrameCount, minFrameCount);
2290    return mFrameCount >= minFrameCount;
2291}
2292
2293status_t AudioTrack::setParameters(const String8& keyValuePairs)
2294{
2295    AutoMutex lock(mLock);
2296    return mAudioTrack->setParameters(keyValuePairs);
2297}
2298
2299status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2300{
2301    if (timestamp == nullptr) {
2302        return BAD_VALUE;
2303    }
2304    AutoMutex lock(mLock);
2305    return getTimestamp_l(timestamp);
2306}
2307
2308status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2309{
2310    if (mCblk->mFlags & CBLK_INVALID) {
2311        const status_t status = restoreTrack_l("getTimestampExtended");
2312        if (status != OK) {
2313            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2314            // recommending that the track be recreated.
2315            return DEAD_OBJECT;
2316        }
2317    }
2318    // check for offloaded/direct here in case restoring somehow changed those flags.
2319    if (isOffloadedOrDirect_l()) {
2320        return INVALID_OPERATION; // not supported
2321    }
2322    status_t status = mProxy->getTimestamp(timestamp);
2323    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2324    bool found = false;
2325    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2326    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2327    // server side frame offset in case AudioTrack has been restored.
2328    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2329            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2330        if (timestamp->mTimeNs[i] >= 0) {
2331            // apply server offset (frames flushed is ignored
2332            // so we don't report the jump when the flush occurs).
2333            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2334            found = true;
2335        }
2336    }
2337    return found ? OK : WOULD_BLOCK;
2338}
2339
2340status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2341{
2342    AutoMutex lock(mLock);
2343    return getTimestamp_l(timestamp);
2344}
2345
2346status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2347{
2348    bool previousTimestampValid = mPreviousTimestampValid;
2349    // Set false here to cover all the error return cases.
2350    mPreviousTimestampValid = false;
2351
2352    switch (mState) {
2353    case STATE_ACTIVE:
2354    case STATE_PAUSED:
2355        break; // handle below
2356    case STATE_FLUSHED:
2357    case STATE_STOPPED:
2358        return WOULD_BLOCK;
2359    case STATE_STOPPING:
2360    case STATE_PAUSED_STOPPING:
2361        if (!isOffloaded_l()) {
2362            return INVALID_OPERATION;
2363        }
2364        break; // offloaded tracks handled below
2365    default:
2366        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2367        break;
2368    }
2369
2370    if (mCblk->mFlags & CBLK_INVALID) {
2371        const status_t status = restoreTrack_l("getTimestamp");
2372        if (status != OK) {
2373            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2374            // recommending that the track be recreated.
2375            return DEAD_OBJECT;
2376        }
2377    }
2378
2379    // The presented frame count must always lag behind the consumed frame count.
2380    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2381
2382    status_t status;
2383    if (isOffloadedOrDirect_l()) {
2384        // use Binder to get timestamp
2385        status = mAudioTrack->getTimestamp(timestamp);
2386    } else {
2387        // read timestamp from shared memory
2388        ExtendedTimestamp ets;
2389        status = mProxy->getTimestamp(&ets);
2390        if (status == OK) {
2391            ExtendedTimestamp::Location location;
2392            status = ets.getBestTimestamp(&timestamp, &location);
2393
2394            if (status == OK) {
2395                // It is possible that the best location has moved from the kernel to the server.
2396                // In this case we adjust the position from the previous computed latency.
2397                if (location == ExtendedTimestamp::LOCATION_SERVER) {
2398                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2399                            "getTimestamp() location moved from kernel to server");
2400                    // check that the last kernel OK time info exists and the positions
2401                    // are valid (if they predate the current track, the positions may
2402                    // be zero or negative).
2403                    const int64_t frames =
2404                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2405                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2406                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2407                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2408                            ?
2409                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2410                                    / 1000)
2411                            :
2412                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2413                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2414                    ALOGV("frame adjustment:%lld  timestamp:%s",
2415                            (long long)frames, ets.toString().c_str());
2416                    if (frames >= ets.mPosition[location]) {
2417                        timestamp.mPosition = 0;
2418                    } else {
2419                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2420                    }
2421                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2422                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2423                            "getTimestamp() location moved from server to kernel");
2424                }
2425
2426                // We update the timestamp time even when paused.
2427                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2428                    const int64_t now = systemTime();
2429                    const int64_t at = convertTimespecToNs(timestamp.mTime);
2430                    const int64_t lag =
2431                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2432                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2433                            ? int64_t(mAfLatency * 1000000LL)
2434                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2435                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2436                             * NANOS_PER_SECOND / mSampleRate;
2437                    const int64_t limit = now - lag; // no earlier than this limit
2438                    if (at < limit) {
2439                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2440                                (long long)lag, (long long)at, (long long)limit);
2441                        timestamp.mTime.tv_sec = limit / NANOS_PER_SECOND;
2442                        timestamp.mTime.tv_nsec = limit % NANOS_PER_SECOND; // compiler opt.
2443                    }
2444                }
2445                mPreviousLocation = location;
2446            } else {
2447                // right after AudioTrack is started, one may not find a timestamp
2448                ALOGV("getBestTimestamp did not find timestamp");
2449            }
2450        }
2451        if (status == INVALID_OPERATION) {
2452            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2453            // other failures are signaled by a negative time.
2454            // If we come out of FLUSHED or STOPPED where the position is known
2455            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2456            // "zero" for NuPlayer).  We don't convert for track restoration as position
2457            // does not reset.
2458            ALOGV("timestamp server offset:%lld restore frames:%lld",
2459                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2460            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2461                status = WOULD_BLOCK;
2462            }
2463        }
2464    }
2465    if (status != NO_ERROR) {
2466        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2467        return status;
2468    }
2469    if (isOffloadedOrDirect_l()) {
2470        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2471            // use cached paused position in case another offloaded track is running.
2472            timestamp.mPosition = mPausedPosition;
2473            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2474            // TODO: adjust for delay
2475            return NO_ERROR;
2476        }
2477
2478        // Check whether a pending flush or stop has completed, as those commands may
2479        // be asynchronous or return near finish or exhibit glitchy behavior.
2480        //
2481        // Originally this showed up as the first timestamp being a continuation of
2482        // the previous song under gapless playback.
2483        // However, we sometimes see zero timestamps, then a glitch of
2484        // the previous song's position, and then correct timestamps afterwards.
2485        if (mStartUs != 0 && mSampleRate != 0) {
2486            static const int kTimeJitterUs = 100000; // 100 ms
2487            static const int k1SecUs = 1000000;
2488
2489            const int64_t timeNow = getNowUs();
2490
2491            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2492                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2493                if (timestampTimeUs < mStartUs) {
2494                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2495                }
2496                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2497                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2498                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2499
2500                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2501                    // Verify that the counter can't count faster than the sample rate
2502                    // since the start time.  If greater, then that means we may have failed
2503                    // to completely flush or stop the previous playing track.
2504                    ALOGW_IF(!mTimestampStartupGlitchReported,
2505                            "getTimestamp startup glitch detected"
2506                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2507                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2508                            timestamp.mPosition);
2509                    mTimestampStartupGlitchReported = true;
2510                    if (previousTimestampValid
2511                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2512                        timestamp = mPreviousTimestamp;
2513                        mPreviousTimestampValid = true;
2514                        return NO_ERROR;
2515                    }
2516                    return WOULD_BLOCK;
2517                }
2518                if (deltaPositionByUs != 0) {
2519                    mStartUs = 0; // don't check again, we got valid nonzero position.
2520                }
2521            } else {
2522                mStartUs = 0; // don't check again, start time expired.
2523            }
2524            mTimestampStartupGlitchReported = false;
2525        }
2526    } else {
2527        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2528        (void) updateAndGetPosition_l();
2529        // Server consumed (mServer) and presented both use the same server time base,
2530        // and server consumed is always >= presented.
2531        // The delta between these represents the number of frames in the buffer pipeline.
2532        // If this delta between these is greater than the client position, it means that
2533        // actually presented is still stuck at the starting line (figuratively speaking),
2534        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2535        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2536        // mPosition exceeds 32 bits.
2537        // TODO Remove when timestamp is updated to contain pipeline status info.
2538        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2539        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2540                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2541            return INVALID_OPERATION;
2542        }
2543        // Convert timestamp position from server time base to client time base.
2544        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2545        // But if we change it to 64-bit then this could fail.
2546        // Use Modulo computation here.
2547        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2548        // Immediately after a call to getPosition_l(), mPosition and
2549        // mServer both represent the same frame position.  mPosition is
2550        // in client's point of view, and mServer is in server's point of
2551        // view.  So the difference between them is the "fudge factor"
2552        // between client and server views due to stop() and/or new
2553        // IAudioTrack.  And timestamp.mPosition is initially in server's
2554        // point of view, so we need to apply the same fudge factor to it.
2555    }
2556
2557    // Prevent retrograde motion in timestamp.
2558    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2559    if (status == NO_ERROR) {
2560        if (previousTimestampValid) {
2561            const int64_t previousTimeNanos = convertTimespecToNs(mPreviousTimestamp.mTime);
2562            const int64_t currentTimeNanos = convertTimespecToNs(timestamp.mTime);
2563            if (currentTimeNanos < previousTimeNanos) {
2564                ALOGW("retrograde timestamp time corrected, %lld < %lld",
2565                        (long long)currentTimeNanos, (long long)previousTimeNanos);
2566                timestamp.mTime = mPreviousTimestamp.mTime;
2567            }
2568
2569            // Looking at signed delta will work even when the timestamps
2570            // are wrapping around.
2571            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2572                    - mPreviousTimestamp.mPosition).signedValue();
2573            if (deltaPosition < 0) {
2574                // Only report once per position instead of spamming the log.
2575                if (!mRetrogradeMotionReported) {
2576                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2577                            deltaPosition,
2578                            timestamp.mPosition,
2579                            mPreviousTimestamp.mPosition);
2580                    mRetrogradeMotionReported = true;
2581                }
2582            } else {
2583                mRetrogradeMotionReported = false;
2584            }
2585            if (deltaPosition < 0) {
2586                timestamp.mPosition = mPreviousTimestamp.mPosition;
2587                deltaPosition = 0;
2588            }
2589#if 0
2590            // Uncomment this to verify audio timestamp rate.
2591            const int64_t deltaTime =
2592                    convertTimespecToNs(timestamp.mTime) - previousTimeNanos;
2593            if (deltaTime != 0) {
2594                const int64_t computedSampleRate =
2595                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2596                ALOGD("computedSampleRate:%u  sampleRate:%u",
2597                        (unsigned)computedSampleRate, mSampleRate);
2598            }
2599#endif
2600        }
2601        mPreviousTimestamp = timestamp;
2602        mPreviousTimestampValid = true;
2603    }
2604
2605    return status;
2606}
2607
2608String8 AudioTrack::getParameters(const String8& keys)
2609{
2610    audio_io_handle_t output = getOutput();
2611    if (output != AUDIO_IO_HANDLE_NONE) {
2612        return AudioSystem::getParameters(output, keys);
2613    } else {
2614        return String8::empty();
2615    }
2616}
2617
2618bool AudioTrack::isOffloaded() const
2619{
2620    AutoMutex lock(mLock);
2621    return isOffloaded_l();
2622}
2623
2624bool AudioTrack::isDirect() const
2625{
2626    AutoMutex lock(mLock);
2627    return isDirect_l();
2628}
2629
2630bool AudioTrack::isOffloadedOrDirect() const
2631{
2632    AutoMutex lock(mLock);
2633    return isOffloadedOrDirect_l();
2634}
2635
2636
2637status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2638{
2639
2640    const size_t SIZE = 256;
2641    char buffer[SIZE];
2642    String8 result;
2643
2644    result.append(" AudioTrack::dump\n");
2645    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2646            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2647    result.append(buffer);
2648    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2649            mChannelCount, mFrameCount);
2650    result.append(buffer);
2651    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2652            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2653    result.append(buffer);
2654    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2655    result.append(buffer);
2656    ::write(fd, result.string(), result.size());
2657    return NO_ERROR;
2658}
2659
2660uint32_t AudioTrack::getUnderrunCount() const
2661{
2662    AutoMutex lock(mLock);
2663    return getUnderrunCount_l();
2664}
2665
2666uint32_t AudioTrack::getUnderrunCount_l() const
2667{
2668    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2669}
2670
2671uint32_t AudioTrack::getUnderrunFrames() const
2672{
2673    AutoMutex lock(mLock);
2674    return mProxy->getUnderrunFrames();
2675}
2676
2677status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2678{
2679    if (callback == 0) {
2680        ALOGW("%s adding NULL callback!", __FUNCTION__);
2681        return BAD_VALUE;
2682    }
2683    AutoMutex lock(mLock);
2684    if (mDeviceCallback == callback) {
2685        ALOGW("%s adding same callback!", __FUNCTION__);
2686        return INVALID_OPERATION;
2687    }
2688    status_t status = NO_ERROR;
2689    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2690        if (mDeviceCallback != 0) {
2691            ALOGW("%s callback already present!", __FUNCTION__);
2692            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2693        }
2694        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2695    }
2696    mDeviceCallback = callback;
2697    return status;
2698}
2699
2700status_t AudioTrack::removeAudioDeviceCallback(
2701        const sp<AudioSystem::AudioDeviceCallback>& callback)
2702{
2703    if (callback == 0) {
2704        ALOGW("%s removing NULL callback!", __FUNCTION__);
2705        return BAD_VALUE;
2706    }
2707    AutoMutex lock(mLock);
2708    if (mDeviceCallback != callback) {
2709        ALOGW("%s removing different callback!", __FUNCTION__);
2710        return INVALID_OPERATION;
2711    }
2712    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2713        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2714    }
2715    mDeviceCallback = 0;
2716    return NO_ERROR;
2717}
2718
2719status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2720{
2721    if (msec == nullptr ||
2722            (location != ExtendedTimestamp::LOCATION_SERVER
2723                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2724        return BAD_VALUE;
2725    }
2726    AutoMutex lock(mLock);
2727    // inclusive of offloaded and direct tracks.
2728    //
2729    // It is possible, but not enabled, to allow duration computation for non-pcm
2730    // audio_has_proportional_frames() formats because currently they have
2731    // the drain rate equivalent to the pcm sample rate * framesize.
2732    if (!isPurePcmData_l()) {
2733        return INVALID_OPERATION;
2734    }
2735    ExtendedTimestamp ets;
2736    if (getTimestamp_l(&ets) == OK
2737            && ets.mTimeNs[location] > 0) {
2738        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2739                - ets.mPosition[location];
2740        if (diff < 0) {
2741            *msec = 0;
2742        } else {
2743            // ms is the playback time by frames
2744            int64_t ms = (int64_t)((double)diff * 1000 /
2745                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2746            // clockdiff is the timestamp age (negative)
2747            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2748                    ets.mTimeNs[location]
2749                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2750                    - systemTime(SYSTEM_TIME_MONOTONIC);
2751
2752            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2753            static const int NANOS_PER_MILLIS = 1000000;
2754            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2755        }
2756        return NO_ERROR;
2757    }
2758    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2759        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2760    }
2761    // use server position directly (offloaded and direct arrive here)
2762    updateAndGetPosition_l();
2763    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2764    *msec = (diff <= 0) ? 0
2765            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2766    return NO_ERROR;
2767}
2768
2769bool AudioTrack::hasStarted()
2770{
2771    AutoMutex lock(mLock);
2772    switch (mState) {
2773    case STATE_STOPPED:
2774        if (isOffloadedOrDirect_l()) {
2775            // check if we have started in the past to return true.
2776            return mStartUs > 0;
2777        }
2778        // A normal audio track may still be draining, so
2779        // check if stream has ended.  This covers fasttrack position
2780        // instability and start/stop without any data written.
2781        if (mProxy->getStreamEndDone()) {
2782            return true;
2783        }
2784        // fall through
2785    case STATE_ACTIVE:
2786    case STATE_STOPPING:
2787        break;
2788    case STATE_PAUSED:
2789    case STATE_PAUSED_STOPPING:
2790    case STATE_FLUSHED:
2791        return false;  // we're not active
2792    default:
2793        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2794        break;
2795    }
2796
2797    // wait indicates whether we need to wait for a timestamp.
2798    // This is conservatively figured - if we encounter an unexpected error
2799    // then we will not wait.
2800    bool wait = false;
2801    if (isOffloadedOrDirect_l()) {
2802        AudioTimestamp ts;
2803        status_t status = getTimestamp_l(ts);
2804        if (status == WOULD_BLOCK) {
2805            wait = true;
2806        } else if (status == OK) {
2807            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2808        }
2809        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
2810                (int)wait,
2811                ts.mPosition,
2812                (long long)mStartTs.mPosition);
2813    } else {
2814        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2815        ExtendedTimestamp ets;
2816        status_t status = getTimestamp_l(&ets);
2817        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
2818            wait = true;
2819        } else if (status == OK) {
2820            for (location = ExtendedTimestamp::LOCATION_KERNEL;
2821                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2822                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2823                    continue;
2824                }
2825                wait = ets.mPosition[location] == 0
2826                        || ets.mPosition[location] == mStartEts.mPosition[location];
2827                break;
2828            }
2829        }
2830        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
2831                (int)wait,
2832                (long long)ets.mPosition[location],
2833                (long long)mStartEts.mPosition[location]);
2834    }
2835    return !wait;
2836}
2837
2838// =========================================================================
2839
2840void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2841{
2842    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2843    if (audioTrack != 0) {
2844        AutoMutex lock(audioTrack->mLock);
2845        audioTrack->mProxy->binderDied();
2846    }
2847}
2848
2849// =========================================================================
2850
2851AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2852    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2853      mIgnoreNextPausedInt(false)
2854{
2855}
2856
2857AudioTrack::AudioTrackThread::~AudioTrackThread()
2858{
2859}
2860
2861bool AudioTrack::AudioTrackThread::threadLoop()
2862{
2863    {
2864        AutoMutex _l(mMyLock);
2865        if (mPaused) {
2866            mMyCond.wait(mMyLock);
2867            // caller will check for exitPending()
2868            return true;
2869        }
2870        if (mIgnoreNextPausedInt) {
2871            mIgnoreNextPausedInt = false;
2872            mPausedInt = false;
2873        }
2874        if (mPausedInt) {
2875            if (mPausedNs > 0) {
2876                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2877            } else {
2878                mMyCond.wait(mMyLock);
2879            }
2880            mPausedInt = false;
2881            return true;
2882        }
2883    }
2884    if (exitPending()) {
2885        return false;
2886    }
2887    nsecs_t ns = mReceiver.processAudioBuffer();
2888    switch (ns) {
2889    case 0:
2890        return true;
2891    case NS_INACTIVE:
2892        pauseInternal();
2893        return true;
2894    case NS_NEVER:
2895        return false;
2896    case NS_WHENEVER:
2897        // Event driven: call wake() when callback notifications conditions change.
2898        ns = INT64_MAX;
2899        // fall through
2900    default:
2901        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2902        pauseInternal(ns);
2903        return true;
2904    }
2905}
2906
2907void AudioTrack::AudioTrackThread::requestExit()
2908{
2909    // must be in this order to avoid a race condition
2910    Thread::requestExit();
2911    resume();
2912}
2913
2914void AudioTrack::AudioTrackThread::pause()
2915{
2916    AutoMutex _l(mMyLock);
2917    mPaused = true;
2918}
2919
2920void AudioTrack::AudioTrackThread::resume()
2921{
2922    AutoMutex _l(mMyLock);
2923    mIgnoreNextPausedInt = true;
2924    if (mPaused || mPausedInt) {
2925        mPaused = false;
2926        mPausedInt = false;
2927        mMyCond.signal();
2928    }
2929}
2930
2931void AudioTrack::AudioTrackThread::wake()
2932{
2933    AutoMutex _l(mMyLock);
2934    if (!mPaused) {
2935        // wake() might be called while servicing a callback - ignore the next
2936        // pause time and call processAudioBuffer.
2937        mIgnoreNextPausedInt = true;
2938        if (mPausedInt && mPausedNs > 0) {
2939            // audio track is active and internally paused with timeout.
2940            mPausedInt = false;
2941            mMyCond.signal();
2942        }
2943    }
2944}
2945
2946void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2947{
2948    AutoMutex _l(mMyLock);
2949    mPausedInt = true;
2950    mPausedNs = ns;
2951}
2952
2953} // namespace android
2954