AudioTrack.cpp revision 156c6873a5e69af71f3c28b236c5831b9cb2ac95
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/clock.h>
26#include <audio_utils/primitives.h>
27#include <binder/IPCThreadState.h>
28#include <media/AudioTrack.h>
29#include <utils/Log.h>
30#include <private/media/AudioTrackShared.h>
31#include <media/IAudioFlinger.h>
32#include <media/AudioPolicyHelper.h>
33#include <media/AudioResamplerPublic.h>
34
35#define WAIT_PERIOD_MS                  10
36#define WAIT_STREAM_END_TIMEOUT_SEC     120
37static const int kMaxLoopCountNotifications = 32;
38
39namespace android {
40// ---------------------------------------------------------------------------
41
42using media::VolumeShaper;
43
44// TODO: Move to a separate .h
45
46template <typename T>
47static inline const T &min(const T &x, const T &y) {
48    return x < y ? x : y;
49}
50
51template <typename T>
52static inline const T &max(const T &x, const T &y) {
53    return x > y ? x : y;
54}
55
56static const int32_t NANOS_PER_SECOND = 1000000000;
57
58static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
59{
60    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
61}
62
63static int64_t convertTimespecToUs(const struct timespec &tv)
64{
65    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
66}
67
68// TODO move to audio_utils.
69static inline struct timespec convertNsToTimespec(int64_t ns) {
70    struct timespec tv;
71    tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
72    tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
73    return tv;
74}
75
76// current monotonic time in microseconds.
77static int64_t getNowUs()
78{
79    struct timespec tv;
80    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
81    return convertTimespecToUs(tv);
82}
83
84// FIXME: we don't use the pitch setting in the time stretcher (not working);
85// instead we emulate it using our sample rate converter.
86static const bool kFixPitch = true; // enable pitch fix
87static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
88{
89    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
90}
91
92static inline float adjustSpeed(float speed, float pitch)
93{
94    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
95}
96
97static inline float adjustPitch(float pitch)
98{
99    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
100}
101
102// Must match similar computation in createTrack_l in Threads.cpp.
103// TODO: Move to a common library
104static size_t calculateMinFrameCount(
105        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
106        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
107{
108    // Ensure that buffer depth covers at least audio hardware latency
109    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
110    if (minBufCount < 2) {
111        minBufCount = 2;
112    }
113#if 0
114    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
115    // but keeping the code here to make it easier to add later.
116    if (minBufCount < notificationsPerBufferReq) {
117        minBufCount = notificationsPerBufferReq;
118    }
119#endif
120    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
121            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
122            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
123            /*, notificationsPerBufferReq*/);
124    return minBufCount * sourceFramesNeededWithTimestretch(
125            sampleRate, afFrameCount, afSampleRate, speed);
126}
127
128// static
129status_t AudioTrack::getMinFrameCount(
130        size_t* frameCount,
131        audio_stream_type_t streamType,
132        uint32_t sampleRate)
133{
134    if (frameCount == NULL) {
135        return BAD_VALUE;
136    }
137
138    // FIXME handle in server, like createTrack_l(), possible missing info:
139    //          audio_io_handle_t output
140    //          audio_format_t format
141    //          audio_channel_mask_t channelMask
142    //          audio_output_flags_t flags (FAST)
143    uint32_t afSampleRate;
144    status_t status;
145    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
146    if (status != NO_ERROR) {
147        ALOGE("Unable to query output sample rate for stream type %d; status %d",
148                streamType, status);
149        return status;
150    }
151    size_t afFrameCount;
152    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
153    if (status != NO_ERROR) {
154        ALOGE("Unable to query output frame count for stream type %d; status %d",
155                streamType, status);
156        return status;
157    }
158    uint32_t afLatency;
159    status = AudioSystem::getOutputLatency(&afLatency, streamType);
160    if (status != NO_ERROR) {
161        ALOGE("Unable to query output latency for stream type %d; status %d",
162                streamType, status);
163        return status;
164    }
165
166    // When called from createTrack, speed is 1.0f (normal speed).
167    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
168    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
169            /*, 0 notificationsPerBufferReq*/);
170
171    // The formula above should always produce a non-zero value under normal circumstances:
172    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
173    // Return error in the unlikely event that it does not, as that's part of the API contract.
174    if (*frameCount == 0) {
175        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
176                streamType, sampleRate);
177        return BAD_VALUE;
178    }
179    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
180            *frameCount, afFrameCount, afSampleRate, afLatency);
181    return NO_ERROR;
182}
183
184// ---------------------------------------------------------------------------
185
186AudioTrack::AudioTrack()
187    : mStatus(NO_INIT),
188      mState(STATE_STOPPED),
189      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
190      mPreviousSchedulingGroup(SP_DEFAULT),
191      mPausedPosition(0),
192      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
193      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
194      mPortId(AUDIO_PORT_HANDLE_NONE)
195{
196    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
197    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
198    mAttributes.flags = 0x0;
199    strcpy(mAttributes.tags, "");
200}
201
202AudioTrack::AudioTrack(
203        audio_stream_type_t streamType,
204        uint32_t sampleRate,
205        audio_format_t format,
206        audio_channel_mask_t channelMask,
207        size_t frameCount,
208        audio_output_flags_t flags,
209        callback_t cbf,
210        void* user,
211        int32_t notificationFrames,
212        audio_session_t sessionId,
213        transfer_type transferType,
214        const audio_offload_info_t *offloadInfo,
215        uid_t uid,
216        pid_t pid,
217        const audio_attributes_t* pAttributes,
218        bool doNotReconnect,
219        float maxRequiredSpeed,
220        audio_port_handle_t selectedDeviceId)
221    : mStatus(NO_INIT),
222      mState(STATE_STOPPED),
223      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
224      mPreviousSchedulingGroup(SP_DEFAULT),
225      mPausedPosition(0),
226      mPortId(AUDIO_PORT_HANDLE_NONE)
227{
228    mStatus = set(streamType, sampleRate, format, channelMask,
229            frameCount, flags, cbf, user, notificationFrames,
230            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
231            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
232}
233
234AudioTrack::AudioTrack(
235        audio_stream_type_t streamType,
236        uint32_t sampleRate,
237        audio_format_t format,
238        audio_channel_mask_t channelMask,
239        const sp<IMemory>& sharedBuffer,
240        audio_output_flags_t flags,
241        callback_t cbf,
242        void* user,
243        int32_t notificationFrames,
244        audio_session_t sessionId,
245        transfer_type transferType,
246        const audio_offload_info_t *offloadInfo,
247        uid_t uid,
248        pid_t pid,
249        const audio_attributes_t* pAttributes,
250        bool doNotReconnect,
251        float maxRequiredSpeed)
252    : mStatus(NO_INIT),
253      mState(STATE_STOPPED),
254      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
255      mPreviousSchedulingGroup(SP_DEFAULT),
256      mPausedPosition(0),
257      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
258      mPortId(AUDIO_PORT_HANDLE_NONE)
259{
260    mStatus = set(streamType, sampleRate, format, channelMask,
261            0 /*frameCount*/, flags, cbf, user, notificationFrames,
262            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
263            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
264}
265
266AudioTrack::~AudioTrack()
267{
268    if (mStatus == NO_ERROR) {
269        // Make sure that callback function exits in the case where
270        // it is looping on buffer full condition in obtainBuffer().
271        // Otherwise the callback thread will never exit.
272        stop();
273        if (mAudioTrackThread != 0) {
274            mProxy->interrupt();
275            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
276            mAudioTrackThread->requestExitAndWait();
277            mAudioTrackThread.clear();
278        }
279        // No lock here: worst case we remove a NULL callback which will be a nop
280        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
281            AudioSystem::removeAudioDeviceCallback(this, mOutput);
282        }
283        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
284        mAudioTrack.clear();
285        mCblkMemory.clear();
286        mSharedBuffer.clear();
287        IPCThreadState::self()->flushCommands();
288        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
289                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
290        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
291    }
292}
293
294status_t AudioTrack::set(
295        audio_stream_type_t streamType,
296        uint32_t sampleRate,
297        audio_format_t format,
298        audio_channel_mask_t channelMask,
299        size_t frameCount,
300        audio_output_flags_t flags,
301        callback_t cbf,
302        void* user,
303        int32_t notificationFrames,
304        const sp<IMemory>& sharedBuffer,
305        bool threadCanCallJava,
306        audio_session_t sessionId,
307        transfer_type transferType,
308        const audio_offload_info_t *offloadInfo,
309        uid_t uid,
310        pid_t pid,
311        const audio_attributes_t* pAttributes,
312        bool doNotReconnect,
313        float maxRequiredSpeed,
314        audio_port_handle_t selectedDeviceId)
315{
316    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
317          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
318          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
319          sessionId, transferType, uid, pid);
320
321    mThreadCanCallJava = threadCanCallJava;
322    mSelectedDeviceId = selectedDeviceId;
323
324    switch (transferType) {
325    case TRANSFER_DEFAULT:
326        if (sharedBuffer != 0) {
327            transferType = TRANSFER_SHARED;
328        } else if (cbf == NULL || threadCanCallJava) {
329            transferType = TRANSFER_SYNC;
330        } else {
331            transferType = TRANSFER_CALLBACK;
332        }
333        break;
334    case TRANSFER_CALLBACK:
335        if (cbf == NULL || sharedBuffer != 0) {
336            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
337            return BAD_VALUE;
338        }
339        break;
340    case TRANSFER_OBTAIN:
341    case TRANSFER_SYNC:
342        if (sharedBuffer != 0) {
343            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
344            return BAD_VALUE;
345        }
346        break;
347    case TRANSFER_SHARED:
348        if (sharedBuffer == 0) {
349            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
350            return BAD_VALUE;
351        }
352        break;
353    default:
354        ALOGE("Invalid transfer type %d", transferType);
355        return BAD_VALUE;
356    }
357    mSharedBuffer = sharedBuffer;
358    mTransfer = transferType;
359    mDoNotReconnect = doNotReconnect;
360
361    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
362            sharedBuffer->size());
363
364    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
365
366    // invariant that mAudioTrack != 0 is true only after set() returns successfully
367    if (mAudioTrack != 0) {
368        ALOGE("Track already in use");
369        return INVALID_OPERATION;
370    }
371
372    // handle default values first.
373    if (streamType == AUDIO_STREAM_DEFAULT) {
374        streamType = AUDIO_STREAM_MUSIC;
375    }
376    if (pAttributes == NULL) {
377        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
378            ALOGE("Invalid stream type %d", streamType);
379            return BAD_VALUE;
380        }
381        mStreamType = streamType;
382
383    } else {
384        // stream type shouldn't be looked at, this track has audio attributes
385        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
386        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
387                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
388        mStreamType = AUDIO_STREAM_DEFAULT;
389        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
390            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
391        }
392        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
393            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
394        }
395        // check deep buffer after flags have been modified above
396        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
397            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
398        }
399    }
400
401    // these below should probably come from the audioFlinger too...
402    if (format == AUDIO_FORMAT_DEFAULT) {
403        format = AUDIO_FORMAT_PCM_16_BIT;
404    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
405        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
406    }
407
408    // validate parameters
409    if (!audio_is_valid_format(format)) {
410        ALOGE("Invalid format %#x", format);
411        return BAD_VALUE;
412    }
413    mFormat = format;
414
415    if (!audio_is_output_channel(channelMask)) {
416        ALOGE("Invalid channel mask %#x", channelMask);
417        return BAD_VALUE;
418    }
419    mChannelMask = channelMask;
420    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
421    mChannelCount = channelCount;
422
423    // force direct flag if format is not linear PCM
424    // or offload was requested
425    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
426            || !audio_is_linear_pcm(format)) {
427        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
428                    ? "Offload request, forcing to Direct Output"
429                    : "Not linear PCM, forcing to Direct Output");
430        flags = (audio_output_flags_t)
431                // FIXME why can't we allow direct AND fast?
432                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
433    }
434
435    // force direct flag if HW A/V sync requested
436    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
437        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
438    }
439
440    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
441        if (audio_has_proportional_frames(format)) {
442            mFrameSize = channelCount * audio_bytes_per_sample(format);
443        } else {
444            mFrameSize = sizeof(uint8_t);
445        }
446    } else {
447        ALOG_ASSERT(audio_has_proportional_frames(format));
448        mFrameSize = channelCount * audio_bytes_per_sample(format);
449        // createTrack will return an error if PCM format is not supported by server,
450        // so no need to check for specific PCM formats here
451    }
452
453    // sampling rate must be specified for direct outputs
454    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
455        return BAD_VALUE;
456    }
457    mSampleRate = sampleRate;
458    mOriginalSampleRate = sampleRate;
459    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
460    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
461    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
462
463    // Make copy of input parameter offloadInfo so that in the future:
464    //  (a) createTrack_l doesn't need it as an input parameter
465    //  (b) we can support re-creation of offloaded tracks
466    if (offloadInfo != NULL) {
467        mOffloadInfoCopy = *offloadInfo;
468        mOffloadInfo = &mOffloadInfoCopy;
469    } else {
470        mOffloadInfo = NULL;
471        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
472    }
473
474    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
475    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
476    mSendLevel = 0.0f;
477    // mFrameCount is initialized in createTrack_l
478    mReqFrameCount = frameCount;
479    if (notificationFrames >= 0) {
480        mNotificationFramesReq = notificationFrames;
481        mNotificationsPerBufferReq = 0;
482    } else {
483        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
484            ALOGE("notificationFrames=%d not permitted for non-fast track",
485                    notificationFrames);
486            return BAD_VALUE;
487        }
488        if (frameCount > 0) {
489            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
490                    notificationFrames, frameCount);
491            return BAD_VALUE;
492        }
493        mNotificationFramesReq = 0;
494        const uint32_t minNotificationsPerBuffer = 1;
495        const uint32_t maxNotificationsPerBuffer = 8;
496        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
497                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
498        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
499                "notificationFrames=%d clamped to the range -%u to -%u",
500                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
501    }
502    mNotificationFramesAct = 0;
503    if (sessionId == AUDIO_SESSION_ALLOCATE) {
504        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
505    } else {
506        mSessionId = sessionId;
507    }
508    int callingpid = IPCThreadState::self()->getCallingPid();
509    int mypid = getpid();
510    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
511        mClientUid = IPCThreadState::self()->getCallingUid();
512    } else {
513        mClientUid = uid;
514    }
515    if (pid == -1 || (callingpid != mypid)) {
516        mClientPid = callingpid;
517    } else {
518        mClientPid = pid;
519    }
520    mAuxEffectId = 0;
521    mOrigFlags = mFlags = flags;
522    mCbf = cbf;
523
524    if (cbf != NULL) {
525        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
526        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
527        // thread begins in paused state, and will not reference us until start()
528    }
529
530    // create the IAudioTrack
531    status_t status = createTrack_l();
532
533    if (status != NO_ERROR) {
534        if (mAudioTrackThread != 0) {
535            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
536            mAudioTrackThread->requestExitAndWait();
537            mAudioTrackThread.clear();
538        }
539        return status;
540    }
541
542    mStatus = NO_ERROR;
543    mUserData = user;
544    mLoopCount = 0;
545    mLoopStart = 0;
546    mLoopEnd = 0;
547    mLoopCountNotified = 0;
548    mMarkerPosition = 0;
549    mMarkerReached = false;
550    mNewPosition = 0;
551    mUpdatePeriod = 0;
552    mPosition = 0;
553    mReleased = 0;
554    mStartNs = 0;
555    mStartFromZeroUs = 0;
556    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
557    mSequence = 1;
558    mObservedSequence = mSequence;
559    mInUnderrun = false;
560    mPreviousTimestampValid = false;
561    mTimestampStartupGlitchReported = false;
562    mRetrogradeMotionReported = false;
563    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
564    mStartTs.mPosition = 0;
565    mUnderrunCountOffset = 0;
566    mFramesWritten = 0;
567    mFramesWrittenServerOffset = 0;
568    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
569    mVolumeHandler = new media::VolumeHandler();
570    return NO_ERROR;
571}
572
573// -------------------------------------------------------------------------
574
575status_t AudioTrack::start()
576{
577    AutoMutex lock(mLock);
578
579    if (mState == STATE_ACTIVE) {
580        return INVALID_OPERATION;
581    }
582
583    mInUnderrun = true;
584
585    State previousState = mState;
586    if (previousState == STATE_PAUSED_STOPPING) {
587        mState = STATE_STOPPING;
588    } else {
589        mState = STATE_ACTIVE;
590    }
591    (void) updateAndGetPosition_l();
592
593    // save start timestamp
594    if (isOffloadedOrDirect_l()) {
595        if (getTimestamp_l(mStartTs) != OK) {
596            mStartTs.mPosition = 0;
597        }
598    } else {
599        if (getTimestamp_l(&mStartEts) != OK) {
600            mStartEts.clear();
601        }
602    }
603    mStartNs = systemTime(); // save this for timestamp adjustment after starting.
604    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
605        // reset current position as seen by client to 0
606        mPosition = 0;
607        mPreviousTimestampValid = false;
608        mTimestampStartupGlitchReported = false;
609        mRetrogradeMotionReported = false;
610        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
611
612        if (!isOffloadedOrDirect_l()
613                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
614            // Server side has consumed something, but is it finished consuming?
615            // It is possible since flush and stop are asynchronous that the server
616            // is still active at this point.
617            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
618                    (long long)(mFramesWrittenServerOffset
619                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
620                    (long long)mStartEts.mFlushed,
621                    (long long)mFramesWritten);
622            // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
623            mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
624        }
625        mFramesWritten = 0;
626        mProxy->clearTimestamp(); // need new server push for valid timestamp
627        mMarkerReached = false;
628
629        // For offloaded tracks, we don't know if the hardware counters are really zero here,
630        // since the flush is asynchronous and stop may not fully drain.
631        // We save the time when the track is started to later verify whether
632        // the counters are realistic (i.e. start from zero after this time).
633        mStartFromZeroUs = mStartNs / 1000;
634
635        // force refresh of remaining frames by processAudioBuffer() as last
636        // write before stop could be partial.
637        mRefreshRemaining = true;
638    }
639    mNewPosition = mPosition + mUpdatePeriod;
640    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
641
642    status_t status = NO_ERROR;
643    if (!(flags & CBLK_INVALID)) {
644        status = mAudioTrack->start();
645        if (status == DEAD_OBJECT) {
646            flags |= CBLK_INVALID;
647        }
648    }
649    if (flags & CBLK_INVALID) {
650        status = restoreTrack_l("start");
651    }
652
653    // resume or pause the callback thread as needed.
654    sp<AudioTrackThread> t = mAudioTrackThread;
655    if (status == NO_ERROR) {
656        if (t != 0) {
657            if (previousState == STATE_STOPPING) {
658                mProxy->interrupt();
659            } else {
660                t->resume();
661            }
662        } else {
663            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
664            get_sched_policy(0, &mPreviousSchedulingGroup);
665            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
666        }
667
668        // Start our local VolumeHandler for restoration purposes.
669        mVolumeHandler->setStarted();
670    } else {
671        ALOGE("start() status %d", status);
672        mState = previousState;
673        if (t != 0) {
674            if (previousState != STATE_STOPPING) {
675                t->pause();
676            }
677        } else {
678            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
679            set_sched_policy(0, mPreviousSchedulingGroup);
680        }
681    }
682
683    return status;
684}
685
686void AudioTrack::stop()
687{
688    AutoMutex lock(mLock);
689    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
690        return;
691    }
692
693    if (isOffloaded_l()) {
694        mState = STATE_STOPPING;
695    } else {
696        mState = STATE_STOPPED;
697        ALOGD_IF(mSharedBuffer == nullptr,
698                "stop() called with %u frames delivered", mReleased.value());
699        mReleased = 0;
700    }
701
702    mProxy->interrupt();
703    mAudioTrack->stop();
704
705    // Note: legacy handling - stop does not clear playback marker
706    // and periodic update counter, but flush does for streaming tracks.
707
708    if (mSharedBuffer != 0) {
709        // clear buffer position and loop count.
710        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
711                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
712    }
713
714    sp<AudioTrackThread> t = mAudioTrackThread;
715    if (t != 0) {
716        if (!isOffloaded_l()) {
717            t->pause();
718        }
719    } else {
720        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
721        set_sched_policy(0, mPreviousSchedulingGroup);
722    }
723}
724
725bool AudioTrack::stopped() const
726{
727    AutoMutex lock(mLock);
728    return mState != STATE_ACTIVE;
729}
730
731void AudioTrack::flush()
732{
733    if (mSharedBuffer != 0) {
734        return;
735    }
736    AutoMutex lock(mLock);
737    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
738        return;
739    }
740    flush_l();
741}
742
743void AudioTrack::flush_l()
744{
745    ALOG_ASSERT(mState != STATE_ACTIVE);
746
747    // clear playback marker and periodic update counter
748    mMarkerPosition = 0;
749    mMarkerReached = false;
750    mUpdatePeriod = 0;
751    mRefreshRemaining = true;
752
753    mState = STATE_FLUSHED;
754    mReleased = 0;
755    if (isOffloaded_l()) {
756        mProxy->interrupt();
757    }
758    mProxy->flush();
759    mAudioTrack->flush();
760}
761
762void AudioTrack::pause()
763{
764    AutoMutex lock(mLock);
765    if (mState == STATE_ACTIVE) {
766        mState = STATE_PAUSED;
767    } else if (mState == STATE_STOPPING) {
768        mState = STATE_PAUSED_STOPPING;
769    } else {
770        return;
771    }
772    mProxy->interrupt();
773    mAudioTrack->pause();
774
775    if (isOffloaded_l()) {
776        if (mOutput != AUDIO_IO_HANDLE_NONE) {
777            // An offload output can be re-used between two audio tracks having
778            // the same configuration. A timestamp query for a paused track
779            // while the other is running would return an incorrect time.
780            // To fix this, cache the playback position on a pause() and return
781            // this time when requested until the track is resumed.
782
783            // OffloadThread sends HAL pause in its threadLoop. Time saved
784            // here can be slightly off.
785
786            // TODO: check return code for getRenderPosition.
787
788            uint32_t halFrames;
789            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
790            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
791        }
792    }
793}
794
795status_t AudioTrack::setVolume(float left, float right)
796{
797    // This duplicates a test by AudioTrack JNI, but that is not the only caller
798    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
799            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
800        return BAD_VALUE;
801    }
802
803    AutoMutex lock(mLock);
804    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
805    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
806
807    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
808
809    if (isOffloaded_l()) {
810        mAudioTrack->signal();
811    }
812    return NO_ERROR;
813}
814
815status_t AudioTrack::setVolume(float volume)
816{
817    return setVolume(volume, volume);
818}
819
820status_t AudioTrack::setAuxEffectSendLevel(float level)
821{
822    // This duplicates a test by AudioTrack JNI, but that is not the only caller
823    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
824        return BAD_VALUE;
825    }
826
827    AutoMutex lock(mLock);
828    mSendLevel = level;
829    mProxy->setSendLevel(level);
830
831    return NO_ERROR;
832}
833
834void AudioTrack::getAuxEffectSendLevel(float* level) const
835{
836    if (level != NULL) {
837        *level = mSendLevel;
838    }
839}
840
841status_t AudioTrack::setSampleRate(uint32_t rate)
842{
843    AutoMutex lock(mLock);
844    if (rate == mSampleRate) {
845        return NO_ERROR;
846    }
847    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
848        return INVALID_OPERATION;
849    }
850    if (mOutput == AUDIO_IO_HANDLE_NONE) {
851        return NO_INIT;
852    }
853    // NOTE: it is theoretically possible, but highly unlikely, that a device change
854    // could mean a previously allowed sampling rate is no longer allowed.
855    uint32_t afSamplingRate;
856    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
857        return NO_INIT;
858    }
859    // pitch is emulated by adjusting speed and sampleRate
860    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
861    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
862        return BAD_VALUE;
863    }
864    // TODO: Should we also check if the buffer size is compatible?
865
866    mSampleRate = rate;
867    mProxy->setSampleRate(effectiveSampleRate);
868
869    return NO_ERROR;
870}
871
872uint32_t AudioTrack::getSampleRate() const
873{
874    AutoMutex lock(mLock);
875
876    // sample rate can be updated during playback by the offloaded decoder so we need to
877    // query the HAL and update if needed.
878// FIXME use Proxy return channel to update the rate from server and avoid polling here
879    if (isOffloadedOrDirect_l()) {
880        if (mOutput != AUDIO_IO_HANDLE_NONE) {
881            uint32_t sampleRate = 0;
882            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
883            if (status == NO_ERROR) {
884                mSampleRate = sampleRate;
885            }
886        }
887    }
888    return mSampleRate;
889}
890
891uint32_t AudioTrack::getOriginalSampleRate() const
892{
893    return mOriginalSampleRate;
894}
895
896status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
897{
898    AutoMutex lock(mLock);
899    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
900        return NO_ERROR;
901    }
902    if (isOffloadedOrDirect_l()) {
903        return INVALID_OPERATION;
904    }
905    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
906        return INVALID_OPERATION;
907    }
908
909    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
910            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
911    // pitch is emulated by adjusting speed and sampleRate
912    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
913    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
914    const float effectivePitch = adjustPitch(playbackRate.mPitch);
915    AudioPlaybackRate playbackRateTemp = playbackRate;
916    playbackRateTemp.mSpeed = effectiveSpeed;
917    playbackRateTemp.mPitch = effectivePitch;
918
919    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
920            effectiveRate, effectiveSpeed, effectivePitch);
921
922    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
923        ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
924                playbackRate.mSpeed, playbackRate.mPitch);
925        return BAD_VALUE;
926    }
927    // Check if the buffer size is compatible.
928    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
929        ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
930                playbackRate.mSpeed, playbackRate.mPitch);
931        return BAD_VALUE;
932    }
933
934    // Check resampler ratios are within bounds
935    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
936            (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
937        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
938                playbackRate.mSpeed, playbackRate.mPitch);
939        return BAD_VALUE;
940    }
941
942    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
943        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
944                        playbackRate.mSpeed, playbackRate.mPitch);
945        return BAD_VALUE;
946    }
947    mPlaybackRate = playbackRate;
948    //set effective rates
949    mProxy->setPlaybackRate(playbackRateTemp);
950    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
951    return NO_ERROR;
952}
953
954const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
955{
956    AutoMutex lock(mLock);
957    return mPlaybackRate;
958}
959
960ssize_t AudioTrack::getBufferSizeInFrames()
961{
962    AutoMutex lock(mLock);
963    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
964        return NO_INIT;
965    }
966    return (ssize_t) mProxy->getBufferSizeInFrames();
967}
968
969status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
970{
971    if (duration == nullptr) {
972        return BAD_VALUE;
973    }
974    AutoMutex lock(mLock);
975    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
976        return NO_INIT;
977    }
978    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
979    if (bufferSizeInFrames < 0) {
980        return (status_t)bufferSizeInFrames;
981    }
982    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
983            / ((double)mSampleRate * mPlaybackRate.mSpeed));
984    return NO_ERROR;
985}
986
987ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
988{
989    AutoMutex lock(mLock);
990    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
991        return NO_INIT;
992    }
993    // Reject if timed track or compressed audio.
994    if (!audio_is_linear_pcm(mFormat)) {
995        return INVALID_OPERATION;
996    }
997    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
998}
999
1000status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1001{
1002    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1003        return INVALID_OPERATION;
1004    }
1005
1006    if (loopCount == 0) {
1007        ;
1008    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1009            loopEnd - loopStart >= MIN_LOOP) {
1010        ;
1011    } else {
1012        return BAD_VALUE;
1013    }
1014
1015    AutoMutex lock(mLock);
1016    // See setPosition() regarding setting parameters such as loop points or position while active
1017    if (mState == STATE_ACTIVE) {
1018        return INVALID_OPERATION;
1019    }
1020    setLoop_l(loopStart, loopEnd, loopCount);
1021    return NO_ERROR;
1022}
1023
1024void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1025{
1026    // We do not update the periodic notification point.
1027    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1028    mLoopCount = loopCount;
1029    mLoopEnd = loopEnd;
1030    mLoopStart = loopStart;
1031    mLoopCountNotified = loopCount;
1032    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1033
1034    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1035}
1036
1037status_t AudioTrack::setMarkerPosition(uint32_t marker)
1038{
1039    // The only purpose of setting marker position is to get a callback
1040    if (mCbf == NULL || isOffloadedOrDirect()) {
1041        return INVALID_OPERATION;
1042    }
1043
1044    AutoMutex lock(mLock);
1045    mMarkerPosition = marker;
1046    mMarkerReached = false;
1047
1048    sp<AudioTrackThread> t = mAudioTrackThread;
1049    if (t != 0) {
1050        t->wake();
1051    }
1052    return NO_ERROR;
1053}
1054
1055status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1056{
1057    if (isOffloadedOrDirect()) {
1058        return INVALID_OPERATION;
1059    }
1060    if (marker == NULL) {
1061        return BAD_VALUE;
1062    }
1063
1064    AutoMutex lock(mLock);
1065    mMarkerPosition.getValue(marker);
1066
1067    return NO_ERROR;
1068}
1069
1070status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1071{
1072    // The only purpose of setting position update period is to get a callback
1073    if (mCbf == NULL || isOffloadedOrDirect()) {
1074        return INVALID_OPERATION;
1075    }
1076
1077    AutoMutex lock(mLock);
1078    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1079    mUpdatePeriod = updatePeriod;
1080
1081    sp<AudioTrackThread> t = mAudioTrackThread;
1082    if (t != 0) {
1083        t->wake();
1084    }
1085    return NO_ERROR;
1086}
1087
1088status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1089{
1090    if (isOffloadedOrDirect()) {
1091        return INVALID_OPERATION;
1092    }
1093    if (updatePeriod == NULL) {
1094        return BAD_VALUE;
1095    }
1096
1097    AutoMutex lock(mLock);
1098    *updatePeriod = mUpdatePeriod;
1099
1100    return NO_ERROR;
1101}
1102
1103status_t AudioTrack::setPosition(uint32_t position)
1104{
1105    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1106        return INVALID_OPERATION;
1107    }
1108    if (position > mFrameCount) {
1109        return BAD_VALUE;
1110    }
1111
1112    AutoMutex lock(mLock);
1113    // Currently we require that the player is inactive before setting parameters such as position
1114    // or loop points.  Otherwise, there could be a race condition: the application could read the
1115    // current position, compute a new position or loop parameters, and then set that position or
1116    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1117    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1118    // to specify how it wants to handle such scenarios.
1119    if (mState == STATE_ACTIVE) {
1120        return INVALID_OPERATION;
1121    }
1122    // After setting the position, use full update period before notification.
1123    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1124    mStaticProxy->setBufferPosition(position);
1125
1126    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1127    return NO_ERROR;
1128}
1129
1130status_t AudioTrack::getPosition(uint32_t *position)
1131{
1132    if (position == NULL) {
1133        return BAD_VALUE;
1134    }
1135
1136    AutoMutex lock(mLock);
1137    // FIXME: offloaded and direct tracks call into the HAL for render positions
1138    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1139    // as we do not know the capability of the HAL for pcm position support and standby.
1140    // There may be some latency differences between the HAL position and the proxy position.
1141    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1142        uint32_t dspFrames = 0;
1143
1144        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1145            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1146            *position = mPausedPosition;
1147            return NO_ERROR;
1148        }
1149
1150        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1151            uint32_t halFrames; // actually unused
1152            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1153            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1154        }
1155        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1156        // due to hardware latency. We leave this behavior for now.
1157        *position = dspFrames;
1158    } else {
1159        if (mCblk->mFlags & CBLK_INVALID) {
1160            (void) restoreTrack_l("getPosition");
1161            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1162            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1163        }
1164
1165        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1166        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1167                0 : updateAndGetPosition_l().value();
1168    }
1169    return NO_ERROR;
1170}
1171
1172status_t AudioTrack::getBufferPosition(uint32_t *position)
1173{
1174    if (mSharedBuffer == 0) {
1175        return INVALID_OPERATION;
1176    }
1177    if (position == NULL) {
1178        return BAD_VALUE;
1179    }
1180
1181    AutoMutex lock(mLock);
1182    *position = mStaticProxy->getBufferPosition();
1183    return NO_ERROR;
1184}
1185
1186status_t AudioTrack::reload()
1187{
1188    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1189        return INVALID_OPERATION;
1190    }
1191
1192    AutoMutex lock(mLock);
1193    // See setPosition() regarding setting parameters such as loop points or position while active
1194    if (mState == STATE_ACTIVE) {
1195        return INVALID_OPERATION;
1196    }
1197    mNewPosition = mUpdatePeriod;
1198    (void) updateAndGetPosition_l();
1199    mPosition = 0;
1200    mPreviousTimestampValid = false;
1201#if 0
1202    // The documentation is not clear on the behavior of reload() and the restoration
1203    // of loop count. Historically we have not restored loop count, start, end,
1204    // but it makes sense if one desires to repeat playing a particular sound.
1205    if (mLoopCount != 0) {
1206        mLoopCountNotified = mLoopCount;
1207        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1208    }
1209#endif
1210    mStaticProxy->setBufferPosition(0);
1211    return NO_ERROR;
1212}
1213
1214audio_io_handle_t AudioTrack::getOutput() const
1215{
1216    AutoMutex lock(mLock);
1217    return mOutput;
1218}
1219
1220status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1221    AutoMutex lock(mLock);
1222    if (mSelectedDeviceId != deviceId) {
1223        mSelectedDeviceId = deviceId;
1224        if (mStatus == NO_ERROR) {
1225            android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1226            mProxy->interrupt();
1227        }
1228    }
1229    return NO_ERROR;
1230}
1231
1232audio_port_handle_t AudioTrack::getOutputDevice() {
1233    AutoMutex lock(mLock);
1234    return mSelectedDeviceId;
1235}
1236
1237// must be called with mLock held
1238void AudioTrack::updateRoutedDeviceId_l()
1239{
1240    // if the track is inactive, do not update actual device as the output stream maybe routed
1241    // to a device not relevant to this client because of other active use cases.
1242    if (mState != STATE_ACTIVE) {
1243        return;
1244    }
1245    if (mOutput != AUDIO_IO_HANDLE_NONE) {
1246        audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1247        if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1248            mRoutedDeviceId = deviceId;
1249        }
1250    }
1251}
1252
1253audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1254    AutoMutex lock(mLock);
1255    updateRoutedDeviceId_l();
1256    return mRoutedDeviceId;
1257}
1258
1259status_t AudioTrack::attachAuxEffect(int effectId)
1260{
1261    AutoMutex lock(mLock);
1262    status_t status = mAudioTrack->attachAuxEffect(effectId);
1263    if (status == NO_ERROR) {
1264        mAuxEffectId = effectId;
1265    }
1266    return status;
1267}
1268
1269audio_stream_type_t AudioTrack::streamType() const
1270{
1271    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1272        return audio_attributes_to_stream_type(&mAttributes);
1273    }
1274    return mStreamType;
1275}
1276
1277uint32_t AudioTrack::latency()
1278{
1279    AutoMutex lock(mLock);
1280    updateLatency_l();
1281    return mLatency;
1282}
1283
1284// -------------------------------------------------------------------------
1285
1286// must be called with mLock held
1287void AudioTrack::updateLatency_l()
1288{
1289    status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1290    if (status != NO_ERROR) {
1291        ALOGW("getLatency(%d) failed status %d", mOutput, status);
1292    } else {
1293        // FIXME don't believe this lie
1294        mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1295    }
1296}
1297
1298// TODO Move this macro to a common header file for enum to string conversion in audio framework.
1299#define MEDIA_CASE_ENUM(name) case name: return #name
1300const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1301    switch (transferType) {
1302        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1303        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1304        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1305        MEDIA_CASE_ENUM(TRANSFER_SYNC);
1306        MEDIA_CASE_ENUM(TRANSFER_SHARED);
1307        default:
1308            return "UNRECOGNIZED";
1309    }
1310}
1311
1312status_t AudioTrack::createTrack_l()
1313{
1314    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1315    if (audioFlinger == 0) {
1316        ALOGE("Could not get audioflinger");
1317        return NO_INIT;
1318    }
1319
1320    audio_io_handle_t output;
1321    audio_stream_type_t streamType = mStreamType;
1322    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1323    bool callbackAdded = false;
1324
1325    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1326    // After fast request is denied, we will request again if IAudioTrack is re-created.
1327
1328    status_t status;
1329    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
1330    config.sample_rate = mSampleRate;
1331    config.channel_mask = mChannelMask;
1332    config.format = mFormat;
1333    config.offload_info = mOffloadInfoCopy;
1334    mRoutedDeviceId = mSelectedDeviceId;
1335    status = AudioSystem::getOutputForAttr(attr, &output,
1336                                           mSessionId, &streamType, mClientUid,
1337                                           &config,
1338                                           mFlags, &mRoutedDeviceId, &mPortId);
1339
1340    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1341        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
1342              " format %#x, channel mask %#x, flags %#x",
1343              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
1344              mFlags);
1345        return BAD_VALUE;
1346    }
1347    {
1348    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1349    // we must release it ourselves if anything goes wrong.
1350
1351    // Not all of these values are needed under all conditions, but it is easier to get them all
1352    status = AudioSystem::getLatency(output, &mAfLatency);
1353    if (status != NO_ERROR) {
1354        ALOGE("getLatency(%d) failed status %d", output, status);
1355        goto release;
1356    }
1357    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1358
1359    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1360    if (status != NO_ERROR) {
1361        ALOGE("getFrameCount(output=%d) status %d", output, status);
1362        goto release;
1363    }
1364
1365    // TODO consider making this a member variable if there are other uses for it later
1366    size_t afFrameCountHAL;
1367    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1368    if (status != NO_ERROR) {
1369        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1370        goto release;
1371    }
1372    ALOG_ASSERT(afFrameCountHAL > 0);
1373
1374    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1375    if (status != NO_ERROR) {
1376        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1377        goto release;
1378    }
1379    if (mSampleRate == 0) {
1380        mSampleRate = mAfSampleRate;
1381        mOriginalSampleRate = mAfSampleRate;
1382    }
1383
1384    // Client can only express a preference for FAST.  Server will perform additional tests.
1385    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1386        // either of these use cases:
1387        // use case 1: shared buffer
1388        bool sharedBuffer = mSharedBuffer != 0;
1389        bool transferAllowed =
1390            // use case 2: callback transfer mode
1391            (mTransfer == TRANSFER_CALLBACK) ||
1392            // use case 3: obtain/release mode
1393            (mTransfer == TRANSFER_OBTAIN) ||
1394            // use case 4: synchronous write
1395            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1396
1397        bool useCaseAllowed = sharedBuffer || transferAllowed;
1398        if (!useCaseAllowed) {
1399            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
1400                  convertTransferToText(mTransfer));
1401        }
1402
1403        // sample rates must also match
1404        bool sampleRateAllowed = mSampleRate == mAfSampleRate;
1405        if (!sampleRateAllowed) {
1406            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, sample rate %u Hz but HAL needs %u Hz",
1407                  mSampleRate, mAfSampleRate);
1408        }
1409
1410        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
1411        if (!fastAllowed) {
1412            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1413        }
1414    }
1415
1416    mNotificationFramesAct = mNotificationFramesReq;
1417
1418    size_t frameCount = mReqFrameCount;
1419    if (!audio_has_proportional_frames(mFormat)) {
1420
1421        if (mSharedBuffer != 0) {
1422            // Same comment as below about ignoring frameCount parameter for set()
1423            frameCount = mSharedBuffer->size();
1424        } else if (frameCount == 0) {
1425            frameCount = mAfFrameCount;
1426        }
1427        if (mNotificationFramesAct != frameCount) {
1428            mNotificationFramesAct = frameCount;
1429        }
1430    } else if (mSharedBuffer != 0) {
1431        // FIXME: Ensure client side memory buffers need
1432        // not have additional alignment beyond sample
1433        // (e.g. 16 bit stereo accessed as 32 bit frame).
1434        size_t alignment = audio_bytes_per_sample(mFormat);
1435        if (alignment & 1) {
1436            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1437            alignment = 1;
1438        }
1439        if (mChannelCount > 1) {
1440            // More than 2 channels does not require stronger alignment than stereo
1441            alignment <<= 1;
1442        }
1443        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1444            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1445                    mSharedBuffer->pointer(), mChannelCount);
1446            status = BAD_VALUE;
1447            goto release;
1448        }
1449
1450        // When initializing a shared buffer AudioTrack via constructors,
1451        // there's no frameCount parameter.
1452        // But when initializing a shared buffer AudioTrack via set(),
1453        // there _is_ a frameCount parameter.  We silently ignore it.
1454        frameCount = mSharedBuffer->size() / mFrameSize;
1455    } else {
1456        size_t minFrameCount = 0;
1457        // For fast tracks the frame count calculations and checks are mostly done by server,
1458        // but we try to respect the application's request for notifications per buffer.
1459        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1460            if (mNotificationsPerBufferReq > 0) {
1461                // Avoid possible arithmetic overflow during multiplication.
1462                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1463                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1464                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1465                            mNotificationsPerBufferReq, afFrameCountHAL);
1466                } else {
1467                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1468                }
1469            }
1470        } else {
1471            // for normal tracks precompute the frame count based on speed.
1472            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1473                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1474            minFrameCount = calculateMinFrameCount(
1475                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1476                    speed /*, 0 mNotificationsPerBufferReq*/);
1477        }
1478        if (frameCount < minFrameCount) {
1479            frameCount = minFrameCount;
1480        }
1481    }
1482
1483    audio_output_flags_t flags = mFlags;
1484
1485    pid_t tid = -1;
1486    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1487        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1488        // application-level code follows all non-blocking design rules, the language runtime
1489        // doesn't also follow those rules, so the thread will not benefit overall.
1490        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1491            tid = mAudioTrackThread->getTid();
1492        }
1493    }
1494
1495    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1496                                // but we will still need the original value also
1497    audio_session_t originalSessionId = mSessionId;
1498    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1499                                                      mSampleRate,
1500                                                      mFormat,
1501                                                      mChannelMask,
1502                                                      &temp,
1503                                                      &flags,
1504                                                      mSharedBuffer,
1505                                                      output,
1506                                                      mClientPid,
1507                                                      tid,
1508                                                      &mSessionId,
1509                                                      mClientUid,
1510                                                      &status,
1511                                                      mPortId);
1512    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1513            "session ID changed from %d to %d", originalSessionId, mSessionId);
1514
1515    if (status != NO_ERROR) {
1516        ALOGE("AudioFlinger could not create track, status: %d", status);
1517        goto release;
1518    }
1519    ALOG_ASSERT(track != 0);
1520
1521    // AudioFlinger now owns the reference to the I/O handle,
1522    // so we are no longer responsible for releasing it.
1523
1524    // FIXME compare to AudioRecord
1525    sp<IMemory> iMem = track->getCblk();
1526    if (iMem == 0) {
1527        ALOGE("Could not get control block");
1528        status = NO_INIT;
1529        goto release;
1530    }
1531    void *iMemPointer = iMem->pointer();
1532    if (iMemPointer == NULL) {
1533        ALOGE("Could not get control block pointer");
1534        status = NO_INIT;
1535        goto release;
1536    }
1537    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1538    if (mAudioTrack != 0) {
1539        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1540        mDeathNotifier.clear();
1541    }
1542    mAudioTrack = track;
1543    mCblkMemory = iMem;
1544    IPCThreadState::self()->flushCommands();
1545
1546    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1547    mCblk = cblk;
1548    // note that temp is the (possibly revised) value of frameCount
1549    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1550        // In current design, AudioTrack client checks and ensures frame count validity before
1551        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1552        // for fast track as it uses a special method of assigning frame count.
1553        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1554    }
1555    frameCount = temp;
1556
1557    mAwaitBoost = false;
1558    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1559        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1560            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
1561            if (!mThreadCanCallJava) {
1562                mAwaitBoost = true;
1563            }
1564        } else {
1565            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
1566                    temp);
1567        }
1568    }
1569    mFlags = flags;
1570
1571    // Make sure that application is notified with sufficient margin before underrun.
1572    // The client can divide the AudioTrack buffer into sub-buffers,
1573    // and expresses its desire to server as the notification frame count.
1574    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1575        size_t maxNotificationFrames;
1576        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1577            // notify every HAL buffer, regardless of the size of the track buffer
1578            maxNotificationFrames = afFrameCountHAL;
1579        } else {
1580            // For normal tracks, use at least double-buffering if no sample rate conversion,
1581            // or at least triple-buffering if there is sample rate conversion
1582            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1583            maxNotificationFrames = frameCount / nBuffering;
1584            // If client requested a fast track but this was denied, then use the smaller maximum.
1585            // FMS_20 is the minimum task wakeup period in ms for which CFS operates reliably.
1586#define FMS_20 20   // FIXME share a common declaration with the same symbol in Threads.cpp
1587            if (mOrigFlags & AUDIO_OUTPUT_FLAG_FAST) {
1588                size_t maxNotificationFramesFastDenied = FMS_20 * mSampleRate / 1000;
1589                if (maxNotificationFrames > maxNotificationFramesFastDenied) {
1590                    maxNotificationFrames = maxNotificationFramesFastDenied;
1591                }
1592            }
1593        }
1594        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1595            if (mNotificationFramesAct == 0) {
1596                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1597                    maxNotificationFrames, frameCount);
1598            } else {
1599                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1600                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1601            }
1602            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1603        }
1604    }
1605
1606    //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1607    if (mDeviceCallback != 0 && mOutput != output) {
1608        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1609            AudioSystem::removeAudioDeviceCallback(this, mOutput);
1610        }
1611        AudioSystem::addAudioDeviceCallback(this, output);
1612        callbackAdded = true;
1613    }
1614
1615    // We retain a copy of the I/O handle, but don't own the reference
1616    mOutput = output;
1617    mRefreshRemaining = true;
1618
1619    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1620    // is the value of pointer() for the shared buffer, otherwise buffers points
1621    // immediately after the control block.  This address is for the mapping within client
1622    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1623    void* buffers;
1624    if (mSharedBuffer == 0) {
1625        buffers = cblk + 1;
1626    } else {
1627        buffers = mSharedBuffer->pointer();
1628        if (buffers == NULL) {
1629            ALOGE("Could not get buffer pointer");
1630            status = NO_INIT;
1631            goto release;
1632        }
1633    }
1634
1635    mAudioTrack->attachAuxEffect(mAuxEffectId);
1636    mFrameCount = frameCount;
1637    updateLatency_l();  // this refetches mAfLatency and sets mLatency
1638
1639    // If IAudioTrack is re-created, don't let the requested frameCount
1640    // decrease.  This can confuse clients that cache frameCount().
1641    if (frameCount > mReqFrameCount) {
1642        mReqFrameCount = frameCount;
1643    }
1644
1645    // reset server position to 0 as we have new cblk.
1646    mServer = 0;
1647
1648    // update proxy
1649    if (mSharedBuffer == 0) {
1650        mStaticProxy.clear();
1651        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1652    } else {
1653        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1654        mProxy = mStaticProxy;
1655    }
1656
1657    mProxy->setVolumeLR(gain_minifloat_pack(
1658            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1659            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1660
1661    mProxy->setSendLevel(mSendLevel);
1662    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1663    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1664    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1665    mProxy->setSampleRate(effectiveSampleRate);
1666
1667    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1668    playbackRateTemp.mSpeed = effectiveSpeed;
1669    playbackRateTemp.mPitch = effectivePitch;
1670    mProxy->setPlaybackRate(playbackRateTemp);
1671    mProxy->setMinimum(mNotificationFramesAct);
1672
1673    mDeathNotifier = new DeathNotifier(this);
1674    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1675
1676    return NO_ERROR;
1677    }
1678
1679release:
1680    AudioSystem::releaseOutput(output, streamType, mSessionId);
1681    if (callbackAdded) {
1682        // note: mOutput is always valid is callbackAdded is true
1683        AudioSystem::removeAudioDeviceCallback(this, mOutput);
1684    }
1685    if (status == NO_ERROR) {
1686        status = NO_INIT;
1687    }
1688    return status;
1689}
1690
1691status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1692{
1693    if (audioBuffer == NULL) {
1694        if (nonContig != NULL) {
1695            *nonContig = 0;
1696        }
1697        return BAD_VALUE;
1698    }
1699    if (mTransfer != TRANSFER_OBTAIN) {
1700        audioBuffer->frameCount = 0;
1701        audioBuffer->size = 0;
1702        audioBuffer->raw = NULL;
1703        if (nonContig != NULL) {
1704            *nonContig = 0;
1705        }
1706        return INVALID_OPERATION;
1707    }
1708
1709    const struct timespec *requested;
1710    struct timespec timeout;
1711    if (waitCount == -1) {
1712        requested = &ClientProxy::kForever;
1713    } else if (waitCount == 0) {
1714        requested = &ClientProxy::kNonBlocking;
1715    } else if (waitCount > 0) {
1716        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1717        timeout.tv_sec = ms / 1000;
1718        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1719        requested = &timeout;
1720    } else {
1721        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1722        requested = NULL;
1723    }
1724    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1725}
1726
1727status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1728        struct timespec *elapsed, size_t *nonContig)
1729{
1730    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1731    uint32_t oldSequence = 0;
1732    uint32_t newSequence;
1733
1734    Proxy::Buffer buffer;
1735    status_t status = NO_ERROR;
1736
1737    static const int32_t kMaxTries = 5;
1738    int32_t tryCounter = kMaxTries;
1739
1740    do {
1741        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1742        // keep them from going away if another thread re-creates the track during obtainBuffer()
1743        sp<AudioTrackClientProxy> proxy;
1744        sp<IMemory> iMem;
1745
1746        {   // start of lock scope
1747            AutoMutex lock(mLock);
1748
1749            newSequence = mSequence;
1750            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1751            if (status == DEAD_OBJECT) {
1752                // re-create track, unless someone else has already done so
1753                if (newSequence == oldSequence) {
1754                    status = restoreTrack_l("obtainBuffer");
1755                    if (status != NO_ERROR) {
1756                        buffer.mFrameCount = 0;
1757                        buffer.mRaw = NULL;
1758                        buffer.mNonContig = 0;
1759                        break;
1760                    }
1761                }
1762            }
1763            oldSequence = newSequence;
1764
1765            if (status == NOT_ENOUGH_DATA) {
1766                restartIfDisabled();
1767            }
1768
1769            // Keep the extra references
1770            proxy = mProxy;
1771            iMem = mCblkMemory;
1772
1773            if (mState == STATE_STOPPING) {
1774                status = -EINTR;
1775                buffer.mFrameCount = 0;
1776                buffer.mRaw = NULL;
1777                buffer.mNonContig = 0;
1778                break;
1779            }
1780
1781            // Non-blocking if track is stopped or paused
1782            if (mState != STATE_ACTIVE) {
1783                requested = &ClientProxy::kNonBlocking;
1784            }
1785
1786        }   // end of lock scope
1787
1788        buffer.mFrameCount = audioBuffer->frameCount;
1789        // FIXME starts the requested timeout and elapsed over from scratch
1790        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1791    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1792
1793    audioBuffer->frameCount = buffer.mFrameCount;
1794    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1795    audioBuffer->raw = buffer.mRaw;
1796    if (nonContig != NULL) {
1797        *nonContig = buffer.mNonContig;
1798    }
1799    return status;
1800}
1801
1802void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1803{
1804    // FIXME add error checking on mode, by adding an internal version
1805    if (mTransfer == TRANSFER_SHARED) {
1806        return;
1807    }
1808
1809    size_t stepCount = audioBuffer->size / mFrameSize;
1810    if (stepCount == 0) {
1811        return;
1812    }
1813
1814    Proxy::Buffer buffer;
1815    buffer.mFrameCount = stepCount;
1816    buffer.mRaw = audioBuffer->raw;
1817
1818    AutoMutex lock(mLock);
1819    mReleased += stepCount;
1820    mInUnderrun = false;
1821    mProxy->releaseBuffer(&buffer);
1822
1823    // restart track if it was disabled by audioflinger due to previous underrun
1824    restartIfDisabled();
1825}
1826
1827void AudioTrack::restartIfDisabled()
1828{
1829    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1830    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1831        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1832        // FIXME ignoring status
1833        mAudioTrack->start();
1834    }
1835}
1836
1837// -------------------------------------------------------------------------
1838
1839ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1840{
1841    if (mTransfer != TRANSFER_SYNC) {
1842        return INVALID_OPERATION;
1843    }
1844
1845    if (isDirect()) {
1846        AutoMutex lock(mLock);
1847        int32_t flags = android_atomic_and(
1848                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1849                            &mCblk->mFlags);
1850        if (flags & CBLK_INVALID) {
1851            return DEAD_OBJECT;
1852        }
1853    }
1854
1855    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1856        // Sanity-check: user is most-likely passing an error code, and it would
1857        // make the return value ambiguous (actualSize vs error).
1858        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1859        return BAD_VALUE;
1860    }
1861
1862    size_t written = 0;
1863    Buffer audioBuffer;
1864
1865    while (userSize >= mFrameSize) {
1866        audioBuffer.frameCount = userSize / mFrameSize;
1867
1868        status_t err = obtainBuffer(&audioBuffer,
1869                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1870        if (err < 0) {
1871            if (written > 0) {
1872                break;
1873            }
1874            if (err == TIMED_OUT || err == -EINTR) {
1875                err = WOULD_BLOCK;
1876            }
1877            return ssize_t(err);
1878        }
1879
1880        size_t toWrite = audioBuffer.size;
1881        memcpy(audioBuffer.i8, buffer, toWrite);
1882        buffer = ((const char *) buffer) + toWrite;
1883        userSize -= toWrite;
1884        written += toWrite;
1885
1886        releaseBuffer(&audioBuffer);
1887    }
1888
1889    if (written > 0) {
1890        mFramesWritten += written / mFrameSize;
1891    }
1892    return written;
1893}
1894
1895// -------------------------------------------------------------------------
1896
1897nsecs_t AudioTrack::processAudioBuffer()
1898{
1899    // Currently the AudioTrack thread is not created if there are no callbacks.
1900    // Would it ever make sense to run the thread, even without callbacks?
1901    // If so, then replace this by checks at each use for mCbf != NULL.
1902    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1903
1904    mLock.lock();
1905    if (mAwaitBoost) {
1906        mAwaitBoost = false;
1907        mLock.unlock();
1908        static const int32_t kMaxTries = 5;
1909        int32_t tryCounter = kMaxTries;
1910        uint32_t pollUs = 10000;
1911        do {
1912            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1913            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1914                break;
1915            }
1916            usleep(pollUs);
1917            pollUs <<= 1;
1918        } while (tryCounter-- > 0);
1919        if (tryCounter < 0) {
1920            ALOGE("did not receive expected priority boost on time");
1921        }
1922        // Run again immediately
1923        return 0;
1924    }
1925
1926    // Can only reference mCblk while locked
1927    int32_t flags = android_atomic_and(
1928        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1929
1930    // Check for track invalidation
1931    if (flags & CBLK_INVALID) {
1932        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1933        // AudioSystem cache. We should not exit here but after calling the callback so
1934        // that the upper layers can recreate the track
1935        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1936            status_t status __unused = restoreTrack_l("processAudioBuffer");
1937            // FIXME unused status
1938            // after restoration, continue below to make sure that the loop and buffer events
1939            // are notified because they have been cleared from mCblk->mFlags above.
1940        }
1941    }
1942
1943    bool waitStreamEnd = mState == STATE_STOPPING;
1944    bool active = mState == STATE_ACTIVE;
1945
1946    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1947    bool newUnderrun = false;
1948    if (flags & CBLK_UNDERRUN) {
1949#if 0
1950        // Currently in shared buffer mode, when the server reaches the end of buffer,
1951        // the track stays active in continuous underrun state.  It's up to the application
1952        // to pause or stop the track, or set the position to a new offset within buffer.
1953        // This was some experimental code to auto-pause on underrun.   Keeping it here
1954        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1955        if (mTransfer == TRANSFER_SHARED) {
1956            mState = STATE_PAUSED;
1957            active = false;
1958        }
1959#endif
1960        if (!mInUnderrun) {
1961            mInUnderrun = true;
1962            newUnderrun = true;
1963        }
1964    }
1965
1966    // Get current position of server
1967    Modulo<uint32_t> position(updateAndGetPosition_l());
1968
1969    // Manage marker callback
1970    bool markerReached = false;
1971    Modulo<uint32_t> markerPosition(mMarkerPosition);
1972    // uses 32 bit wraparound for comparison with position.
1973    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1974        mMarkerReached = markerReached = true;
1975    }
1976
1977    // Determine number of new position callback(s) that will be needed, while locked
1978    size_t newPosCount = 0;
1979    Modulo<uint32_t> newPosition(mNewPosition);
1980    uint32_t updatePeriod = mUpdatePeriod;
1981    // FIXME fails for wraparound, need 64 bits
1982    if (updatePeriod > 0 && position >= newPosition) {
1983        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1984        mNewPosition += updatePeriod * newPosCount;
1985    }
1986
1987    // Cache other fields that will be needed soon
1988    uint32_t sampleRate = mSampleRate;
1989    float speed = mPlaybackRate.mSpeed;
1990    const uint32_t notificationFrames = mNotificationFramesAct;
1991    if (mRefreshRemaining) {
1992        mRefreshRemaining = false;
1993        mRemainingFrames = notificationFrames;
1994        mRetryOnPartialBuffer = false;
1995    }
1996    size_t misalignment = mProxy->getMisalignment();
1997    uint32_t sequence = mSequence;
1998    sp<AudioTrackClientProxy> proxy = mProxy;
1999
2000    // Determine the number of new loop callback(s) that will be needed, while locked.
2001    int loopCountNotifications = 0;
2002    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
2003
2004    if (mLoopCount > 0) {
2005        int loopCount;
2006        size_t bufferPosition;
2007        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2008        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
2009        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
2010        mLoopCountNotified = loopCount; // discard any excess notifications
2011    } else if (mLoopCount < 0) {
2012        // FIXME: We're not accurate with notification count and position with infinite looping
2013        // since loopCount from server side will always return -1 (we could decrement it).
2014        size_t bufferPosition = mStaticProxy->getBufferPosition();
2015        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
2016        loopPeriod = mLoopEnd - bufferPosition;
2017    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
2018        size_t bufferPosition = mStaticProxy->getBufferPosition();
2019        loopPeriod = mFrameCount - bufferPosition;
2020    }
2021
2022    // These fields don't need to be cached, because they are assigned only by set():
2023    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
2024    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
2025
2026    mLock.unlock();
2027
2028    // get anchor time to account for callbacks.
2029    const nsecs_t timeBeforeCallbacks = systemTime();
2030
2031    if (waitStreamEnd) {
2032        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2033        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2034        // (and make sure we don't callback for more data while we're stopping).
2035        // This helps with position, marker notifications, and track invalidation.
2036        struct timespec timeout;
2037        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2038        timeout.tv_nsec = 0;
2039
2040        status_t status = proxy->waitStreamEndDone(&timeout);
2041        switch (status) {
2042        case NO_ERROR:
2043        case DEAD_OBJECT:
2044        case TIMED_OUT:
2045            if (status != DEAD_OBJECT) {
2046                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2047                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2048                mCbf(EVENT_STREAM_END, mUserData, NULL);
2049            }
2050            {
2051                AutoMutex lock(mLock);
2052                // The previously assigned value of waitStreamEnd is no longer valid,
2053                // since the mutex has been unlocked and either the callback handler
2054                // or another thread could have re-started the AudioTrack during that time.
2055                waitStreamEnd = mState == STATE_STOPPING;
2056                if (waitStreamEnd) {
2057                    mState = STATE_STOPPED;
2058                    mReleased = 0;
2059                }
2060            }
2061            if (waitStreamEnd && status != DEAD_OBJECT) {
2062               return NS_INACTIVE;
2063            }
2064            break;
2065        }
2066        return 0;
2067    }
2068
2069    // perform callbacks while unlocked
2070    if (newUnderrun) {
2071        mCbf(EVENT_UNDERRUN, mUserData, NULL);
2072    }
2073    while (loopCountNotifications > 0) {
2074        mCbf(EVENT_LOOP_END, mUserData, NULL);
2075        --loopCountNotifications;
2076    }
2077    if (flags & CBLK_BUFFER_END) {
2078        mCbf(EVENT_BUFFER_END, mUserData, NULL);
2079    }
2080    if (markerReached) {
2081        mCbf(EVENT_MARKER, mUserData, &markerPosition);
2082    }
2083    while (newPosCount > 0) {
2084        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2085        mCbf(EVENT_NEW_POS, mUserData, &temp);
2086        newPosition += updatePeriod;
2087        newPosCount--;
2088    }
2089
2090    if (mObservedSequence != sequence) {
2091        mObservedSequence = sequence;
2092        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2093        // for offloaded tracks, just wait for the upper layers to recreate the track
2094        if (isOffloadedOrDirect()) {
2095            return NS_INACTIVE;
2096        }
2097    }
2098
2099    // if inactive, then don't run me again until re-started
2100    if (!active) {
2101        return NS_INACTIVE;
2102    }
2103
2104    // Compute the estimated time until the next timed event (position, markers, loops)
2105    // FIXME only for non-compressed audio
2106    uint32_t minFrames = ~0;
2107    if (!markerReached && position < markerPosition) {
2108        minFrames = (markerPosition - position).value();
2109    }
2110    if (loopPeriod > 0 && loopPeriod < minFrames) {
2111        // loopPeriod is already adjusted for actual position.
2112        minFrames = loopPeriod;
2113    }
2114    if (updatePeriod > 0) {
2115        minFrames = min(minFrames, (newPosition - position).value());
2116    }
2117
2118    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2119    static const uint32_t kPoll = 0;
2120    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2121        minFrames = kPoll * notificationFrames;
2122    }
2123
2124    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2125    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2126    const nsecs_t timeAfterCallbacks = systemTime();
2127
2128    // Convert frame units to time units
2129    nsecs_t ns = NS_WHENEVER;
2130    if (minFrames != (uint32_t) ~0) {
2131        // AudioFlinger consumption of client data may be irregular when coming out of device
2132        // standby since the kernel buffers require filling. This is throttled to no more than 2x
2133        // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2134        // half (but no more than half a second) to improve callback accuracy during these temporary
2135        // data surges.
2136        const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2137        constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2138        ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2139        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2140        // TODO: Should we warn if the callback time is too long?
2141        if (ns < 0) ns = 0;
2142    }
2143
2144    // If not supplying data by EVENT_MORE_DATA, then we're done
2145    if (mTransfer != TRANSFER_CALLBACK) {
2146        return ns;
2147    }
2148
2149    // EVENT_MORE_DATA callback handling.
2150    // Timing for linear pcm audio data formats can be derived directly from the
2151    // buffer fill level.
2152    // Timing for compressed data is not directly available from the buffer fill level,
2153    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2154    // to return a certain fill level.
2155
2156    struct timespec timeout;
2157    const struct timespec *requested = &ClientProxy::kForever;
2158    if (ns != NS_WHENEVER) {
2159        timeout.tv_sec = ns / 1000000000LL;
2160        timeout.tv_nsec = ns % 1000000000LL;
2161        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2162        requested = &timeout;
2163    }
2164
2165    size_t writtenFrames = 0;
2166    while (mRemainingFrames > 0) {
2167
2168        Buffer audioBuffer;
2169        audioBuffer.frameCount = mRemainingFrames;
2170        size_t nonContig;
2171        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2172        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2173                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2174        requested = &ClientProxy::kNonBlocking;
2175        size_t avail = audioBuffer.frameCount + nonContig;
2176        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2177                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2178        if (err != NO_ERROR) {
2179            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2180                    (isOffloaded() && (err == DEAD_OBJECT))) {
2181                // FIXME bug 25195759
2182                return 1000000;
2183            }
2184            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2185            return NS_NEVER;
2186        }
2187
2188        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2189            mRetryOnPartialBuffer = false;
2190            if (avail < mRemainingFrames) {
2191                if (ns > 0) { // account for obtain time
2192                    const nsecs_t timeNow = systemTime();
2193                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2194                }
2195                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2196                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2197                    ns = myns;
2198                }
2199                return ns;
2200            }
2201        }
2202
2203        size_t reqSize = audioBuffer.size;
2204        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2205        size_t writtenSize = audioBuffer.size;
2206
2207        // Sanity check on returned size
2208        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2209            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2210                    reqSize, ssize_t(writtenSize));
2211            return NS_NEVER;
2212        }
2213
2214        if (writtenSize == 0) {
2215            // The callback is done filling buffers
2216            // Keep this thread going to handle timed events and
2217            // still try to get more data in intervals of WAIT_PERIOD_MS
2218            // but don't just loop and block the CPU, so wait
2219
2220            // mCbf(EVENT_MORE_DATA, ...) might either
2221            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2222            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2223            // (3) Return 0 size when no data is available, does not wait for more data.
2224            //
2225            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2226            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2227            // especially for case (3).
2228            //
2229            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2230            // and this loop; whereas for case (3) we could simply check once with the full
2231            // buffer size and skip the loop entirely.
2232
2233            nsecs_t myns;
2234            if (audio_has_proportional_frames(mFormat)) {
2235                // time to wait based on buffer occupancy
2236                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2237                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2238                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2239                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2240                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2241                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2242                myns = datans + (afns / 2);
2243            } else {
2244                // FIXME: This could ping quite a bit if the buffer isn't full.
2245                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2246                myns = kWaitPeriodNs;
2247            }
2248            if (ns > 0) { // account for obtain and callback time
2249                const nsecs_t timeNow = systemTime();
2250                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2251            }
2252            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2253                ns = myns;
2254            }
2255            return ns;
2256        }
2257
2258        size_t releasedFrames = writtenSize / mFrameSize;
2259        audioBuffer.frameCount = releasedFrames;
2260        mRemainingFrames -= releasedFrames;
2261        if (misalignment >= releasedFrames) {
2262            misalignment -= releasedFrames;
2263        } else {
2264            misalignment = 0;
2265        }
2266
2267        releaseBuffer(&audioBuffer);
2268        writtenFrames += releasedFrames;
2269
2270        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2271        // if callback doesn't like to accept the full chunk
2272        if (writtenSize < reqSize) {
2273            continue;
2274        }
2275
2276        // There could be enough non-contiguous frames available to satisfy the remaining request
2277        if (mRemainingFrames <= nonContig) {
2278            continue;
2279        }
2280
2281#if 0
2282        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2283        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2284        // that total to a sum == notificationFrames.
2285        if (0 < misalignment && misalignment <= mRemainingFrames) {
2286            mRemainingFrames = misalignment;
2287            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2288        }
2289#endif
2290
2291    }
2292    if (writtenFrames > 0) {
2293        AutoMutex lock(mLock);
2294        mFramesWritten += writtenFrames;
2295    }
2296    mRemainingFrames = notificationFrames;
2297    mRetryOnPartialBuffer = true;
2298
2299    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2300    return 0;
2301}
2302
2303status_t AudioTrack::restoreTrack_l(const char *from)
2304{
2305    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2306          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2307    ++mSequence;
2308
2309    // refresh the audio configuration cache in this process to make sure we get new
2310    // output parameters and new IAudioFlinger in createTrack_l()
2311    AudioSystem::clearAudioConfigCache();
2312
2313    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2314        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2315        // reconsider enabling for linear PCM encodings when position can be preserved.
2316        return DEAD_OBJECT;
2317    }
2318
2319    // Save so we can return count since creation.
2320    mUnderrunCountOffset = getUnderrunCount_l();
2321
2322    // save the old static buffer position
2323    uint32_t staticPosition = 0;
2324    size_t bufferPosition = 0;
2325    int loopCount = 0;
2326    if (mStaticProxy != 0) {
2327        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2328        staticPosition = mStaticProxy->getPosition().unsignedValue();
2329    }
2330
2331    mFlags = mOrigFlags;
2332
2333    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2334    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2335    // It will also delete the strong references on previous IAudioTrack and IMemory.
2336    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2337    status_t result = createTrack_l();
2338
2339    if (result == NO_ERROR) {
2340        // take the frames that will be lost by track recreation into account in saved position
2341        // For streaming tracks, this is the amount we obtained from the user/client
2342        // (not the number actually consumed at the server - those are already lost).
2343        if (mStaticProxy == 0) {
2344            mPosition = mReleased;
2345        }
2346        // Continue playback from last known position and restore loop.
2347        if (mStaticProxy != 0) {
2348            if (loopCount != 0) {
2349                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2350                        mLoopStart, mLoopEnd, loopCount);
2351            } else {
2352                mStaticProxy->setBufferPosition(bufferPosition);
2353                if (bufferPosition == mFrameCount) {
2354                    ALOGD("restoring track at end of static buffer");
2355                }
2356            }
2357        }
2358        // restore volume handler
2359        mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2360            sp<VolumeShaper::Operation> operationToEnd =
2361                    new VolumeShaper::Operation(shaper.mOperation);
2362            // TODO: Ideally we would restore to the exact xOffset position
2363            // as returned by getVolumeShaperState(), but we don't have that
2364            // information when restoring at the client unless we periodically poll
2365            // the server or create shared memory state.
2366            //
2367            // For now, we simply advance to the end of the VolumeShaper effect
2368            // if it has been started.
2369            if (shaper.isStarted()) {
2370                operationToEnd->setNormalizedTime(1.f);
2371            }
2372            return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2373        });
2374
2375        if (mState == STATE_ACTIVE) {
2376            result = mAudioTrack->start();
2377        }
2378        // server resets to zero so we offset
2379        mFramesWrittenServerOffset =
2380                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2381        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2382    }
2383    if (result != NO_ERROR) {
2384        ALOGW("restoreTrack_l() failed status %d", result);
2385        mState = STATE_STOPPED;
2386        mReleased = 0;
2387    }
2388
2389    return result;
2390}
2391
2392Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2393{
2394    // This is the sole place to read server consumed frames
2395    Modulo<uint32_t> newServer(mProxy->getPosition());
2396    const int32_t delta = (newServer - mServer).signedValue();
2397    // TODO There is controversy about whether there can be "negative jitter" in server position.
2398    //      This should be investigated further, and if possible, it should be addressed.
2399    //      A more definite failure mode is infrequent polling by client.
2400    //      One could call (void)getPosition_l() in releaseBuffer(),
2401    //      so mReleased and mPosition are always lock-step as best possible.
2402    //      That should ensure delta never goes negative for infrequent polling
2403    //      unless the server has more than 2^31 frames in its buffer,
2404    //      in which case the use of uint32_t for these counters has bigger issues.
2405    ALOGE_IF(delta < 0,
2406            "detected illegal retrograde motion by the server: mServer advanced by %d",
2407            delta);
2408    mServer = newServer;
2409    if (delta > 0) { // avoid retrograde
2410        mPosition += delta;
2411    }
2412    return mPosition;
2413}
2414
2415bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2416{
2417    updateLatency_l();
2418    // applicable for mixing tracks only (not offloaded or direct)
2419    if (mStaticProxy != 0) {
2420        return true; // static tracks do not have issues with buffer sizing.
2421    }
2422    const size_t minFrameCount =
2423            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2424                /*, 0 mNotificationsPerBufferReq*/);
2425    const bool allowed = mFrameCount >= minFrameCount;
2426    ALOGD_IF(!allowed,
2427            "isSampleRateSpeedAllowed_l denied "
2428            "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2429            "mFrameCount:%zu < minFrameCount:%zu",
2430            mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2431            mFrameCount, minFrameCount);
2432    return allowed;
2433}
2434
2435status_t AudioTrack::setParameters(const String8& keyValuePairs)
2436{
2437    AutoMutex lock(mLock);
2438    return mAudioTrack->setParameters(keyValuePairs);
2439}
2440
2441VolumeShaper::Status AudioTrack::applyVolumeShaper(
2442        const sp<VolumeShaper::Configuration>& configuration,
2443        const sp<VolumeShaper::Operation>& operation)
2444{
2445    AutoMutex lock(mLock);
2446    mVolumeHandler->setIdIfNecessary(configuration);
2447    VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2448
2449    if (status == DEAD_OBJECT) {
2450        if (restoreTrack_l("applyVolumeShaper") == OK) {
2451            status = mAudioTrack->applyVolumeShaper(configuration, operation);
2452        }
2453    }
2454    if (status >= 0) {
2455        // save VolumeShaper for restore
2456        mVolumeHandler->applyVolumeShaper(configuration, operation);
2457        if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2458            mVolumeHandler->setStarted();
2459        }
2460    } else {
2461        // warn only if not an expected restore failure.
2462        ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2463                "applyVolumeShaper failed: %d", status);
2464    }
2465    return status;
2466}
2467
2468sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2469{
2470    AutoMutex lock(mLock);
2471    sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2472    if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2473        if (restoreTrack_l("getVolumeShaperState") == OK) {
2474            state = mAudioTrack->getVolumeShaperState(id);
2475        }
2476    }
2477    return state;
2478}
2479
2480status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2481{
2482    if (timestamp == nullptr) {
2483        return BAD_VALUE;
2484    }
2485    AutoMutex lock(mLock);
2486    return getTimestamp_l(timestamp);
2487}
2488
2489status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2490{
2491    if (mCblk->mFlags & CBLK_INVALID) {
2492        const status_t status = restoreTrack_l("getTimestampExtended");
2493        if (status != OK) {
2494            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2495            // recommending that the track be recreated.
2496            return DEAD_OBJECT;
2497        }
2498    }
2499    // check for offloaded/direct here in case restoring somehow changed those flags.
2500    if (isOffloadedOrDirect_l()) {
2501        return INVALID_OPERATION; // not supported
2502    }
2503    status_t status = mProxy->getTimestamp(timestamp);
2504    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2505    bool found = false;
2506    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2507    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2508    // server side frame offset in case AudioTrack has been restored.
2509    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2510            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2511        if (timestamp->mTimeNs[i] >= 0) {
2512            // apply server offset (frames flushed is ignored
2513            // so we don't report the jump when the flush occurs).
2514            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2515            found = true;
2516        }
2517    }
2518    return found ? OK : WOULD_BLOCK;
2519}
2520
2521status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2522{
2523    AutoMutex lock(mLock);
2524    return getTimestamp_l(timestamp);
2525}
2526
2527status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2528{
2529    bool previousTimestampValid = mPreviousTimestampValid;
2530    // Set false here to cover all the error return cases.
2531    mPreviousTimestampValid = false;
2532
2533    switch (mState) {
2534    case STATE_ACTIVE:
2535    case STATE_PAUSED:
2536        break; // handle below
2537    case STATE_FLUSHED:
2538    case STATE_STOPPED:
2539        return WOULD_BLOCK;
2540    case STATE_STOPPING:
2541    case STATE_PAUSED_STOPPING:
2542        if (!isOffloaded_l()) {
2543            return INVALID_OPERATION;
2544        }
2545        break; // offloaded tracks handled below
2546    default:
2547        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2548        break;
2549    }
2550
2551    if (mCblk->mFlags & CBLK_INVALID) {
2552        const status_t status = restoreTrack_l("getTimestamp");
2553        if (status != OK) {
2554            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2555            // recommending that the track be recreated.
2556            return DEAD_OBJECT;
2557        }
2558    }
2559
2560    // The presented frame count must always lag behind the consumed frame count.
2561    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2562
2563    status_t status;
2564    if (isOffloadedOrDirect_l()) {
2565        // use Binder to get timestamp
2566        status = mAudioTrack->getTimestamp(timestamp);
2567    } else {
2568        // read timestamp from shared memory
2569        ExtendedTimestamp ets;
2570        status = mProxy->getTimestamp(&ets);
2571        if (status == OK) {
2572            ExtendedTimestamp::Location location;
2573            status = ets.getBestTimestamp(&timestamp, &location);
2574
2575            if (status == OK) {
2576                updateLatency_l();
2577                // It is possible that the best location has moved from the kernel to the server.
2578                // In this case we adjust the position from the previous computed latency.
2579                if (location == ExtendedTimestamp::LOCATION_SERVER) {
2580                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2581                            "getTimestamp() location moved from kernel to server");
2582                    // check that the last kernel OK time info exists and the positions
2583                    // are valid (if they predate the current track, the positions may
2584                    // be zero or negative).
2585                    const int64_t frames =
2586                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2587                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2588                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2589                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2590                            ?
2591                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2592                                    / 1000)
2593                            :
2594                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2595                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2596                    ALOGV("frame adjustment:%lld  timestamp:%s",
2597                            (long long)frames, ets.toString().c_str());
2598                    if (frames >= ets.mPosition[location]) {
2599                        timestamp.mPosition = 0;
2600                    } else {
2601                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2602                    }
2603                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2604                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2605                            "getTimestamp() location moved from server to kernel");
2606                }
2607
2608                // We update the timestamp time even when paused.
2609                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2610                    const int64_t now = systemTime();
2611                    const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2612                    const int64_t lag =
2613                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2614                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2615                            ? int64_t(mAfLatency * 1000000LL)
2616                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2617                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2618                             * NANOS_PER_SECOND / mSampleRate;
2619                    const int64_t limit = now - lag; // no earlier than this limit
2620                    if (at < limit) {
2621                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2622                                (long long)lag, (long long)at, (long long)limit);
2623                        timestamp.mTime = convertNsToTimespec(limit);
2624                    }
2625                }
2626                mPreviousLocation = location;
2627            } else {
2628                // right after AudioTrack is started, one may not find a timestamp
2629                ALOGV("getBestTimestamp did not find timestamp");
2630            }
2631        }
2632        if (status == INVALID_OPERATION) {
2633            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2634            // other failures are signaled by a negative time.
2635            // If we come out of FLUSHED or STOPPED where the position is known
2636            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2637            // "zero" for NuPlayer).  We don't convert for track restoration as position
2638            // does not reset.
2639            ALOGV("timestamp server offset:%lld restore frames:%lld",
2640                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2641            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2642                status = WOULD_BLOCK;
2643            }
2644        }
2645    }
2646    if (status != NO_ERROR) {
2647        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2648        return status;
2649    }
2650    if (isOffloadedOrDirect_l()) {
2651        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2652            // use cached paused position in case another offloaded track is running.
2653            timestamp.mPosition = mPausedPosition;
2654            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2655            // TODO: adjust for delay
2656            return NO_ERROR;
2657        }
2658
2659        // Check whether a pending flush or stop has completed, as those commands may
2660        // be asynchronous or return near finish or exhibit glitchy behavior.
2661        //
2662        // Originally this showed up as the first timestamp being a continuation of
2663        // the previous song under gapless playback.
2664        // However, we sometimes see zero timestamps, then a glitch of
2665        // the previous song's position, and then correct timestamps afterwards.
2666        if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2667            static const int kTimeJitterUs = 100000; // 100 ms
2668            static const int k1SecUs = 1000000;
2669
2670            const int64_t timeNow = getNowUs();
2671
2672            if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2673                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2674                if (timestampTimeUs < mStartFromZeroUs) {
2675                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2676                }
2677                const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2678                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2679                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2680
2681                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2682                    // Verify that the counter can't count faster than the sample rate
2683                    // since the start time.  If greater, then that means we may have failed
2684                    // to completely flush or stop the previous playing track.
2685                    ALOGW_IF(!mTimestampStartupGlitchReported,
2686                            "getTimestamp startup glitch detected"
2687                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2688                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2689                            timestamp.mPosition);
2690                    mTimestampStartupGlitchReported = true;
2691                    if (previousTimestampValid
2692                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2693                        timestamp = mPreviousTimestamp;
2694                        mPreviousTimestampValid = true;
2695                        return NO_ERROR;
2696                    }
2697                    return WOULD_BLOCK;
2698                }
2699                if (deltaPositionByUs != 0) {
2700                    mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2701                }
2702            } else {
2703                mStartFromZeroUs = 0; // don't check again, start time expired.
2704            }
2705            mTimestampStartupGlitchReported = false;
2706        }
2707    } else {
2708        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2709        (void) updateAndGetPosition_l();
2710        // Server consumed (mServer) and presented both use the same server time base,
2711        // and server consumed is always >= presented.
2712        // The delta between these represents the number of frames in the buffer pipeline.
2713        // If this delta between these is greater than the client position, it means that
2714        // actually presented is still stuck at the starting line (figuratively speaking),
2715        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2716        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2717        // mPosition exceeds 32 bits.
2718        // TODO Remove when timestamp is updated to contain pipeline status info.
2719        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2720        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2721                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2722            return INVALID_OPERATION;
2723        }
2724        // Convert timestamp position from server time base to client time base.
2725        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2726        // But if we change it to 64-bit then this could fail.
2727        // Use Modulo computation here.
2728        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2729        // Immediately after a call to getPosition_l(), mPosition and
2730        // mServer both represent the same frame position.  mPosition is
2731        // in client's point of view, and mServer is in server's point of
2732        // view.  So the difference between them is the "fudge factor"
2733        // between client and server views due to stop() and/or new
2734        // IAudioTrack.  And timestamp.mPosition is initially in server's
2735        // point of view, so we need to apply the same fudge factor to it.
2736    }
2737
2738    // Prevent retrograde motion in timestamp.
2739    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2740    if (status == NO_ERROR) {
2741        // previousTimestampValid is set to false when starting after a stop or flush.
2742        if (previousTimestampValid) {
2743            const int64_t previousTimeNanos =
2744                    audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2745            int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2746
2747            // Fix stale time when checking timestamp right after start().
2748            //
2749            // For offload compatibility, use a default lag value here.
2750            // Any time discrepancy between this update and the pause timestamp is handled
2751            // by the retrograde check afterwards.
2752            const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2753            const int64_t limitNs = mStartNs - lagNs;
2754            if (currentTimeNanos < limitNs) {
2755                ALOGD("correcting timestamp time for pause, "
2756                        "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2757                        (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2758                timestamp.mTime = convertNsToTimespec(limitNs);
2759                currentTimeNanos = limitNs;
2760            }
2761
2762            // retrograde check
2763            if (currentTimeNanos < previousTimeNanos) {
2764                ALOGW("retrograde timestamp time corrected, %lld < %lld",
2765                        (long long)currentTimeNanos, (long long)previousTimeNanos);
2766                timestamp.mTime = mPreviousTimestamp.mTime;
2767                // currentTimeNanos not used below.
2768            }
2769
2770            // Looking at signed delta will work even when the timestamps
2771            // are wrapping around.
2772            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2773                    - mPreviousTimestamp.mPosition).signedValue();
2774            if (deltaPosition < 0) {
2775                // Only report once per position instead of spamming the log.
2776                if (!mRetrogradeMotionReported) {
2777                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2778                            deltaPosition,
2779                            timestamp.mPosition,
2780                            mPreviousTimestamp.mPosition);
2781                    mRetrogradeMotionReported = true;
2782                }
2783            } else {
2784                mRetrogradeMotionReported = false;
2785            }
2786            if (deltaPosition < 0) {
2787                timestamp.mPosition = mPreviousTimestamp.mPosition;
2788                deltaPosition = 0;
2789            }
2790#if 0
2791            // Uncomment this to verify audio timestamp rate.
2792            const int64_t deltaTime =
2793                    audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2794            if (deltaTime != 0) {
2795                const int64_t computedSampleRate =
2796                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2797                ALOGD("computedSampleRate:%u  sampleRate:%u",
2798                        (unsigned)computedSampleRate, mSampleRate);
2799            }
2800#endif
2801        }
2802        mPreviousTimestamp = timestamp;
2803        mPreviousTimestampValid = true;
2804    }
2805
2806    return status;
2807}
2808
2809String8 AudioTrack::getParameters(const String8& keys)
2810{
2811    audio_io_handle_t output = getOutput();
2812    if (output != AUDIO_IO_HANDLE_NONE) {
2813        return AudioSystem::getParameters(output, keys);
2814    } else {
2815        return String8::empty();
2816    }
2817}
2818
2819bool AudioTrack::isOffloaded() const
2820{
2821    AutoMutex lock(mLock);
2822    return isOffloaded_l();
2823}
2824
2825bool AudioTrack::isDirect() const
2826{
2827    AutoMutex lock(mLock);
2828    return isDirect_l();
2829}
2830
2831bool AudioTrack::isOffloadedOrDirect() const
2832{
2833    AutoMutex lock(mLock);
2834    return isOffloadedOrDirect_l();
2835}
2836
2837
2838status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2839{
2840
2841    const size_t SIZE = 256;
2842    char buffer[SIZE];
2843    String8 result;
2844
2845    result.append(" AudioTrack::dump\n");
2846    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2847            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2848    result.append(buffer);
2849    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2850            mChannelCount, mFrameCount);
2851    result.append(buffer);
2852    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2853            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2854    result.append(buffer);
2855    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2856    result.append(buffer);
2857    ::write(fd, result.string(), result.size());
2858    return NO_ERROR;
2859}
2860
2861uint32_t AudioTrack::getUnderrunCount() const
2862{
2863    AutoMutex lock(mLock);
2864    return getUnderrunCount_l();
2865}
2866
2867uint32_t AudioTrack::getUnderrunCount_l() const
2868{
2869    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2870}
2871
2872uint32_t AudioTrack::getUnderrunFrames() const
2873{
2874    AutoMutex lock(mLock);
2875    return mProxy->getUnderrunFrames();
2876}
2877
2878status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2879{
2880    if (callback == 0) {
2881        ALOGW("%s adding NULL callback!", __FUNCTION__);
2882        return BAD_VALUE;
2883    }
2884    AutoMutex lock(mLock);
2885    if (mDeviceCallback.unsafe_get() == callback.get()) {
2886        ALOGW("%s adding same callback!", __FUNCTION__);
2887        return INVALID_OPERATION;
2888    }
2889    status_t status = NO_ERROR;
2890    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2891        if (mDeviceCallback != 0) {
2892            ALOGW("%s callback already present!", __FUNCTION__);
2893            AudioSystem::removeAudioDeviceCallback(this, mOutput);
2894        }
2895        status = AudioSystem::addAudioDeviceCallback(this, mOutput);
2896    }
2897    mDeviceCallback = callback;
2898    return status;
2899}
2900
2901status_t AudioTrack::removeAudioDeviceCallback(
2902        const sp<AudioSystem::AudioDeviceCallback>& callback)
2903{
2904    if (callback == 0) {
2905        ALOGW("%s removing NULL callback!", __FUNCTION__);
2906        return BAD_VALUE;
2907    }
2908    AutoMutex lock(mLock);
2909    if (mDeviceCallback.unsafe_get() != callback.get()) {
2910        ALOGW("%s removing different callback!", __FUNCTION__);
2911        return INVALID_OPERATION;
2912    }
2913    mDeviceCallback.clear();
2914    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2915        AudioSystem::removeAudioDeviceCallback(this, mOutput);
2916    }
2917    return NO_ERROR;
2918}
2919
2920
2921void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2922                                 audio_port_handle_t deviceId)
2923{
2924    sp<AudioSystem::AudioDeviceCallback> callback;
2925    {
2926        AutoMutex lock(mLock);
2927        if (audioIo != mOutput) {
2928            return;
2929        }
2930        callback = mDeviceCallback.promote();
2931        // only update device if the track is active as route changes due to other use cases are
2932        // irrelevant for this client
2933        if (mState == STATE_ACTIVE) {
2934            mRoutedDeviceId = deviceId;
2935        }
2936    }
2937    if (callback.get() != nullptr) {
2938        callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
2939    }
2940}
2941
2942status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2943{
2944    if (msec == nullptr ||
2945            (location != ExtendedTimestamp::LOCATION_SERVER
2946                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2947        return BAD_VALUE;
2948    }
2949    AutoMutex lock(mLock);
2950    // inclusive of offloaded and direct tracks.
2951    //
2952    // It is possible, but not enabled, to allow duration computation for non-pcm
2953    // audio_has_proportional_frames() formats because currently they have
2954    // the drain rate equivalent to the pcm sample rate * framesize.
2955    if (!isPurePcmData_l()) {
2956        return INVALID_OPERATION;
2957    }
2958    ExtendedTimestamp ets;
2959    if (getTimestamp_l(&ets) == OK
2960            && ets.mTimeNs[location] > 0) {
2961        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2962                - ets.mPosition[location];
2963        if (diff < 0) {
2964            *msec = 0;
2965        } else {
2966            // ms is the playback time by frames
2967            int64_t ms = (int64_t)((double)diff * 1000 /
2968                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2969            // clockdiff is the timestamp age (negative)
2970            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2971                    ets.mTimeNs[location]
2972                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2973                    - systemTime(SYSTEM_TIME_MONOTONIC);
2974
2975            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2976            static const int NANOS_PER_MILLIS = 1000000;
2977            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2978        }
2979        return NO_ERROR;
2980    }
2981    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2982        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2983    }
2984    // use server position directly (offloaded and direct arrive here)
2985    updateAndGetPosition_l();
2986    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2987    *msec = (diff <= 0) ? 0
2988            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2989    return NO_ERROR;
2990}
2991
2992bool AudioTrack::hasStarted()
2993{
2994    AutoMutex lock(mLock);
2995    switch (mState) {
2996    case STATE_STOPPED:
2997        if (isOffloadedOrDirect_l()) {
2998            // check if we have started in the past to return true.
2999            return mStartFromZeroUs > 0;
3000        }
3001        // A normal audio track may still be draining, so
3002        // check if stream has ended.  This covers fasttrack position
3003        // instability and start/stop without any data written.
3004        if (mProxy->getStreamEndDone()) {
3005            return true;
3006        }
3007        // fall through
3008    case STATE_ACTIVE:
3009    case STATE_STOPPING:
3010        break;
3011    case STATE_PAUSED:
3012    case STATE_PAUSED_STOPPING:
3013    case STATE_FLUSHED:
3014        return false;  // we're not active
3015    default:
3016        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
3017        break;
3018    }
3019
3020    // wait indicates whether we need to wait for a timestamp.
3021    // This is conservatively figured - if we encounter an unexpected error
3022    // then we will not wait.
3023    bool wait = false;
3024    if (isOffloadedOrDirect_l()) {
3025        AudioTimestamp ts;
3026        status_t status = getTimestamp_l(ts);
3027        if (status == WOULD_BLOCK) {
3028            wait = true;
3029        } else if (status == OK) {
3030            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
3031        }
3032        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
3033                (int)wait,
3034                ts.mPosition,
3035                (long long)mStartTs.mPosition);
3036    } else {
3037        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
3038        ExtendedTimestamp ets;
3039        status_t status = getTimestamp_l(&ets);
3040        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
3041            wait = true;
3042        } else if (status == OK) {
3043            for (location = ExtendedTimestamp::LOCATION_KERNEL;
3044                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
3045                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
3046                    continue;
3047                }
3048                wait = ets.mPosition[location] == 0
3049                        || ets.mPosition[location] == mStartEts.mPosition[location];
3050                break;
3051            }
3052        }
3053        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
3054                (int)wait,
3055                (long long)ets.mPosition[location],
3056                (long long)mStartEts.mPosition[location]);
3057    }
3058    return !wait;
3059}
3060
3061// =========================================================================
3062
3063void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3064{
3065    sp<AudioTrack> audioTrack = mAudioTrack.promote();
3066    if (audioTrack != 0) {
3067        AutoMutex lock(audioTrack->mLock);
3068        audioTrack->mProxy->binderDied();
3069    }
3070}
3071
3072// =========================================================================
3073
3074AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
3075    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3076      mIgnoreNextPausedInt(false)
3077{
3078}
3079
3080AudioTrack::AudioTrackThread::~AudioTrackThread()
3081{
3082}
3083
3084bool AudioTrack::AudioTrackThread::threadLoop()
3085{
3086    {
3087        AutoMutex _l(mMyLock);
3088        if (mPaused) {
3089            // TODO check return value and handle or log
3090            mMyCond.wait(mMyLock);
3091            // caller will check for exitPending()
3092            return true;
3093        }
3094        if (mIgnoreNextPausedInt) {
3095            mIgnoreNextPausedInt = false;
3096            mPausedInt = false;
3097        }
3098        if (mPausedInt) {
3099            // TODO use futex instead of condition, for event flag "or"
3100            if (mPausedNs > 0) {
3101                // TODO check return value and handle or log
3102                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3103            } else {
3104                // TODO check return value and handle or log
3105                mMyCond.wait(mMyLock);
3106            }
3107            mPausedInt = false;
3108            return true;
3109        }
3110    }
3111    if (exitPending()) {
3112        return false;
3113    }
3114    nsecs_t ns = mReceiver.processAudioBuffer();
3115    switch (ns) {
3116    case 0:
3117        return true;
3118    case NS_INACTIVE:
3119        pauseInternal();
3120        return true;
3121    case NS_NEVER:
3122        return false;
3123    case NS_WHENEVER:
3124        // Event driven: call wake() when callback notifications conditions change.
3125        ns = INT64_MAX;
3126        // fall through
3127    default:
3128        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3129        pauseInternal(ns);
3130        return true;
3131    }
3132}
3133
3134void AudioTrack::AudioTrackThread::requestExit()
3135{
3136    // must be in this order to avoid a race condition
3137    Thread::requestExit();
3138    resume();
3139}
3140
3141void AudioTrack::AudioTrackThread::pause()
3142{
3143    AutoMutex _l(mMyLock);
3144    mPaused = true;
3145}
3146
3147void AudioTrack::AudioTrackThread::resume()
3148{
3149    AutoMutex _l(mMyLock);
3150    mIgnoreNextPausedInt = true;
3151    if (mPaused || mPausedInt) {
3152        mPaused = false;
3153        mPausedInt = false;
3154        mMyCond.signal();
3155    }
3156}
3157
3158void AudioTrack::AudioTrackThread::wake()
3159{
3160    AutoMutex _l(mMyLock);
3161    if (!mPaused) {
3162        // wake() might be called while servicing a callback - ignore the next
3163        // pause time and call processAudioBuffer.
3164        mIgnoreNextPausedInt = true;
3165        if (mPausedInt && mPausedNs > 0) {
3166            // audio track is active and internally paused with timeout.
3167            mPausedInt = false;
3168            mMyCond.signal();
3169        }
3170    }
3171}
3172
3173void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3174{
3175    AutoMutex _l(mMyLock);
3176    mPausedInt = true;
3177    mPausedNs = ns;
3178}
3179
3180} // namespace android
3181