AudioTrack.cpp revision ffa3695a012d22c6c81cf311232c5c84c06f9219
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/clock.h>
26#include <audio_utils/primitives.h>
27#include <binder/IPCThreadState.h>
28#include <media/AudioTrack.h>
29#include <utils/Log.h>
30#include <private/media/AudioTrackShared.h>
31#include <media/IAudioFlinger.h>
32#include <media/AudioPolicyHelper.h>
33#include <media/AudioResamplerPublic.h>
34
35#define WAIT_PERIOD_MS                  10
36#define WAIT_STREAM_END_TIMEOUT_SEC     120
37static const int kMaxLoopCountNotifications = 32;
38
39namespace android {
40// ---------------------------------------------------------------------------
41
42// TODO: Move to a separate .h
43
44template <typename T>
45static inline const T &min(const T &x, const T &y) {
46    return x < y ? x : y;
47}
48
49template <typename T>
50static inline const T &max(const T &x, const T &y) {
51    return x > y ? x : y;
52}
53
54static const int32_t NANOS_PER_SECOND = 1000000000;
55
56static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
57{
58    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
59}
60
61static int64_t convertTimespecToUs(const struct timespec &tv)
62{
63    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
64}
65
66// TODO move to audio_utils.
67static inline struct timespec convertNsToTimespec(int64_t ns) {
68    struct timespec tv;
69    tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
70    tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
71    return tv;
72}
73
74// current monotonic time in microseconds.
75static int64_t getNowUs()
76{
77    struct timespec tv;
78    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
79    return convertTimespecToUs(tv);
80}
81
82// FIXME: we don't use the pitch setting in the time stretcher (not working);
83// instead we emulate it using our sample rate converter.
84static const bool kFixPitch = true; // enable pitch fix
85static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
86{
87    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
88}
89
90static inline float adjustSpeed(float speed, float pitch)
91{
92    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
93}
94
95static inline float adjustPitch(float pitch)
96{
97    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
98}
99
100// Must match similar computation in createTrack_l in Threads.cpp.
101// TODO: Move to a common library
102static size_t calculateMinFrameCount(
103        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
104        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
105{
106    // Ensure that buffer depth covers at least audio hardware latency
107    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
108    if (minBufCount < 2) {
109        minBufCount = 2;
110    }
111#if 0
112    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
113    // but keeping the code here to make it easier to add later.
114    if (minBufCount < notificationsPerBufferReq) {
115        minBufCount = notificationsPerBufferReq;
116    }
117#endif
118    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
119            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
120            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
121            /*, notificationsPerBufferReq*/);
122    return minBufCount * sourceFramesNeededWithTimestretch(
123            sampleRate, afFrameCount, afSampleRate, speed);
124}
125
126// static
127status_t AudioTrack::getMinFrameCount(
128        size_t* frameCount,
129        audio_stream_type_t streamType,
130        uint32_t sampleRate)
131{
132    if (frameCount == NULL) {
133        return BAD_VALUE;
134    }
135
136    // FIXME handle in server, like createTrack_l(), possible missing info:
137    //          audio_io_handle_t output
138    //          audio_format_t format
139    //          audio_channel_mask_t channelMask
140    //          audio_output_flags_t flags (FAST)
141    uint32_t afSampleRate;
142    status_t status;
143    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
144    if (status != NO_ERROR) {
145        ALOGE("Unable to query output sample rate for stream type %d; status %d",
146                streamType, status);
147        return status;
148    }
149    size_t afFrameCount;
150    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
151    if (status != NO_ERROR) {
152        ALOGE("Unable to query output frame count for stream type %d; status %d",
153                streamType, status);
154        return status;
155    }
156    uint32_t afLatency;
157    status = AudioSystem::getOutputLatency(&afLatency, streamType);
158    if (status != NO_ERROR) {
159        ALOGE("Unable to query output latency for stream type %d; status %d",
160                streamType, status);
161        return status;
162    }
163
164    // When called from createTrack, speed is 1.0f (normal speed).
165    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
166    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
167            /*, 0 notificationsPerBufferReq*/);
168
169    // The formula above should always produce a non-zero value under normal circumstances:
170    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
171    // Return error in the unlikely event that it does not, as that's part of the API contract.
172    if (*frameCount == 0) {
173        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
174                streamType, sampleRate);
175        return BAD_VALUE;
176    }
177    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
178            *frameCount, afFrameCount, afSampleRate, afLatency);
179    return NO_ERROR;
180}
181
182// ---------------------------------------------------------------------------
183
184AudioTrack::AudioTrack()
185    : mStatus(NO_INIT),
186      mState(STATE_STOPPED),
187      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
188      mPreviousSchedulingGroup(SP_DEFAULT),
189      mPausedPosition(0),
190      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
191      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
192      mPortId(AUDIO_PORT_HANDLE_NONE)
193{
194    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
195    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
196    mAttributes.flags = 0x0;
197    strcpy(mAttributes.tags, "");
198}
199
200AudioTrack::AudioTrack(
201        audio_stream_type_t streamType,
202        uint32_t sampleRate,
203        audio_format_t format,
204        audio_channel_mask_t channelMask,
205        size_t frameCount,
206        audio_output_flags_t flags,
207        callback_t cbf,
208        void* user,
209        int32_t notificationFrames,
210        audio_session_t sessionId,
211        transfer_type transferType,
212        const audio_offload_info_t *offloadInfo,
213        uid_t uid,
214        pid_t pid,
215        const audio_attributes_t* pAttributes,
216        bool doNotReconnect,
217        float maxRequiredSpeed)
218    : mStatus(NO_INIT),
219      mState(STATE_STOPPED),
220      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
221      mPreviousSchedulingGroup(SP_DEFAULT),
222      mPausedPosition(0),
223      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
224      mPortId(AUDIO_PORT_HANDLE_NONE)
225{
226    mStatus = set(streamType, sampleRate, format, channelMask,
227            frameCount, flags, cbf, user, notificationFrames,
228            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
229            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
230}
231
232AudioTrack::AudioTrack(
233        audio_stream_type_t streamType,
234        uint32_t sampleRate,
235        audio_format_t format,
236        audio_channel_mask_t channelMask,
237        const sp<IMemory>& sharedBuffer,
238        audio_output_flags_t flags,
239        callback_t cbf,
240        void* user,
241        int32_t notificationFrames,
242        audio_session_t sessionId,
243        transfer_type transferType,
244        const audio_offload_info_t *offloadInfo,
245        uid_t uid,
246        pid_t pid,
247        const audio_attributes_t* pAttributes,
248        bool doNotReconnect,
249        float maxRequiredSpeed)
250    : mStatus(NO_INIT),
251      mState(STATE_STOPPED),
252      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
253      mPreviousSchedulingGroup(SP_DEFAULT),
254      mPausedPosition(0),
255      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
256      mPortId(AUDIO_PORT_HANDLE_NONE)
257{
258    mStatus = set(streamType, sampleRate, format, channelMask,
259            0 /*frameCount*/, flags, cbf, user, notificationFrames,
260            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
261            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
262}
263
264AudioTrack::~AudioTrack()
265{
266    if (mStatus == NO_ERROR) {
267        // Make sure that callback function exits in the case where
268        // it is looping on buffer full condition in obtainBuffer().
269        // Otherwise the callback thread will never exit.
270        stop();
271        if (mAudioTrackThread != 0) {
272            mProxy->interrupt();
273            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
274            mAudioTrackThread->requestExitAndWait();
275            mAudioTrackThread.clear();
276        }
277        // No lock here: worst case we remove a NULL callback which will be a nop
278        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
279            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
280        }
281        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
282        mAudioTrack.clear();
283        mCblkMemory.clear();
284        mSharedBuffer.clear();
285        IPCThreadState::self()->flushCommands();
286        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
287                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
288        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
289    }
290}
291
292status_t AudioTrack::set(
293        audio_stream_type_t streamType,
294        uint32_t sampleRate,
295        audio_format_t format,
296        audio_channel_mask_t channelMask,
297        size_t frameCount,
298        audio_output_flags_t flags,
299        callback_t cbf,
300        void* user,
301        int32_t notificationFrames,
302        const sp<IMemory>& sharedBuffer,
303        bool threadCanCallJava,
304        audio_session_t sessionId,
305        transfer_type transferType,
306        const audio_offload_info_t *offloadInfo,
307        uid_t uid,
308        pid_t pid,
309        const audio_attributes_t* pAttributes,
310        bool doNotReconnect,
311        float maxRequiredSpeed)
312{
313    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
314          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
315          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
316          sessionId, transferType, uid, pid);
317
318    mThreadCanCallJava = threadCanCallJava;
319
320    switch (transferType) {
321    case TRANSFER_DEFAULT:
322        if (sharedBuffer != 0) {
323            transferType = TRANSFER_SHARED;
324        } else if (cbf == NULL || threadCanCallJava) {
325            transferType = TRANSFER_SYNC;
326        } else {
327            transferType = TRANSFER_CALLBACK;
328        }
329        break;
330    case TRANSFER_CALLBACK:
331        if (cbf == NULL || sharedBuffer != 0) {
332            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
333            return BAD_VALUE;
334        }
335        break;
336    case TRANSFER_OBTAIN:
337    case TRANSFER_SYNC:
338        if (sharedBuffer != 0) {
339            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
340            return BAD_VALUE;
341        }
342        break;
343    case TRANSFER_SHARED:
344        if (sharedBuffer == 0) {
345            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
346            return BAD_VALUE;
347        }
348        break;
349    default:
350        ALOGE("Invalid transfer type %d", transferType);
351        return BAD_VALUE;
352    }
353    mSharedBuffer = sharedBuffer;
354    mTransfer = transferType;
355    mDoNotReconnect = doNotReconnect;
356
357    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
358            sharedBuffer->size());
359
360    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
361
362    // invariant that mAudioTrack != 0 is true only after set() returns successfully
363    if (mAudioTrack != 0) {
364        ALOGE("Track already in use");
365        return INVALID_OPERATION;
366    }
367
368    // handle default values first.
369    if (streamType == AUDIO_STREAM_DEFAULT) {
370        streamType = AUDIO_STREAM_MUSIC;
371    }
372    if (pAttributes == NULL) {
373        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
374            ALOGE("Invalid stream type %d", streamType);
375            return BAD_VALUE;
376        }
377        mStreamType = streamType;
378
379    } else {
380        // stream type shouldn't be looked at, this track has audio attributes
381        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
382        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
383                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
384        mStreamType = AUDIO_STREAM_DEFAULT;
385        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
386            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
387        }
388        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
389            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
390        }
391        // check deep buffer after flags have been modified above
392        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
393            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
394        }
395    }
396
397    // these below should probably come from the audioFlinger too...
398    if (format == AUDIO_FORMAT_DEFAULT) {
399        format = AUDIO_FORMAT_PCM_16_BIT;
400    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
401        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
402    }
403
404    // validate parameters
405    if (!audio_is_valid_format(format)) {
406        ALOGE("Invalid format %#x", format);
407        return BAD_VALUE;
408    }
409    mFormat = format;
410
411    if (!audio_is_output_channel(channelMask)) {
412        ALOGE("Invalid channel mask %#x", channelMask);
413        return BAD_VALUE;
414    }
415    mChannelMask = channelMask;
416    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
417    mChannelCount = channelCount;
418
419    // force direct flag if format is not linear PCM
420    // or offload was requested
421    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
422            || !audio_is_linear_pcm(format)) {
423        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
424                    ? "Offload request, forcing to Direct Output"
425                    : "Not linear PCM, forcing to Direct Output");
426        flags = (audio_output_flags_t)
427                // FIXME why can't we allow direct AND fast?
428                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
429    }
430
431    // force direct flag if HW A/V sync requested
432    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
433        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
434    }
435
436    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
437        if (audio_has_proportional_frames(format)) {
438            mFrameSize = channelCount * audio_bytes_per_sample(format);
439        } else {
440            mFrameSize = sizeof(uint8_t);
441        }
442    } else {
443        ALOG_ASSERT(audio_has_proportional_frames(format));
444        mFrameSize = channelCount * audio_bytes_per_sample(format);
445        // createTrack will return an error if PCM format is not supported by server,
446        // so no need to check for specific PCM formats here
447    }
448
449    // sampling rate must be specified for direct outputs
450    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
451        return BAD_VALUE;
452    }
453    mSampleRate = sampleRate;
454    mOriginalSampleRate = sampleRate;
455    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
456    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
457    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
458
459    // Make copy of input parameter offloadInfo so that in the future:
460    //  (a) createTrack_l doesn't need it as an input parameter
461    //  (b) we can support re-creation of offloaded tracks
462    if (offloadInfo != NULL) {
463        mOffloadInfoCopy = *offloadInfo;
464        mOffloadInfo = &mOffloadInfoCopy;
465    } else {
466        mOffloadInfo = NULL;
467        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
468    }
469
470    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
471    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
472    mSendLevel = 0.0f;
473    // mFrameCount is initialized in createTrack_l
474    mReqFrameCount = frameCount;
475    if (notificationFrames >= 0) {
476        mNotificationFramesReq = notificationFrames;
477        mNotificationsPerBufferReq = 0;
478    } else {
479        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
480            ALOGE("notificationFrames=%d not permitted for non-fast track",
481                    notificationFrames);
482            return BAD_VALUE;
483        }
484        if (frameCount > 0) {
485            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
486                    notificationFrames, frameCount);
487            return BAD_VALUE;
488        }
489        mNotificationFramesReq = 0;
490        const uint32_t minNotificationsPerBuffer = 1;
491        const uint32_t maxNotificationsPerBuffer = 8;
492        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
493                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
494        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
495                "notificationFrames=%d clamped to the range -%u to -%u",
496                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
497    }
498    mNotificationFramesAct = 0;
499    if (sessionId == AUDIO_SESSION_ALLOCATE) {
500        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
501    } else {
502        mSessionId = sessionId;
503    }
504    int callingpid = IPCThreadState::self()->getCallingPid();
505    int mypid = getpid();
506    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
507        mClientUid = IPCThreadState::self()->getCallingUid();
508    } else {
509        mClientUid = uid;
510    }
511    if (pid == -1 || (callingpid != mypid)) {
512        mClientPid = callingpid;
513    } else {
514        mClientPid = pid;
515    }
516    mAuxEffectId = 0;
517    mOrigFlags = mFlags = flags;
518    mCbf = cbf;
519
520    if (cbf != NULL) {
521        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
522        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
523        // thread begins in paused state, and will not reference us until start()
524    }
525
526    // create the IAudioTrack
527    status_t status = createTrack_l();
528
529    if (status != NO_ERROR) {
530        if (mAudioTrackThread != 0) {
531            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
532            mAudioTrackThread->requestExitAndWait();
533            mAudioTrackThread.clear();
534        }
535        return status;
536    }
537
538    mStatus = NO_ERROR;
539    mUserData = user;
540    mLoopCount = 0;
541    mLoopStart = 0;
542    mLoopEnd = 0;
543    mLoopCountNotified = 0;
544    mMarkerPosition = 0;
545    mMarkerReached = false;
546    mNewPosition = 0;
547    mUpdatePeriod = 0;
548    mPosition = 0;
549    mReleased = 0;
550    mStartNs = 0;
551    mStartFromZeroUs = 0;
552    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
553    mSequence = 1;
554    mObservedSequence = mSequence;
555    mInUnderrun = false;
556    mPreviousTimestampValid = false;
557    mTimestampStartupGlitchReported = false;
558    mRetrogradeMotionReported = false;
559    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
560    mStartTs.mPosition = 0;
561    mUnderrunCountOffset = 0;
562    mFramesWritten = 0;
563    mFramesWrittenServerOffset = 0;
564    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
565    mVolumeHandler = new VolumeHandler();
566    return NO_ERROR;
567}
568
569// -------------------------------------------------------------------------
570
571status_t AudioTrack::start()
572{
573    AutoMutex lock(mLock);
574
575    if (mState == STATE_ACTIVE) {
576        return INVALID_OPERATION;
577    }
578
579    mInUnderrun = true;
580
581    State previousState = mState;
582    if (previousState == STATE_PAUSED_STOPPING) {
583        mState = STATE_STOPPING;
584    } else {
585        mState = STATE_ACTIVE;
586    }
587    (void) updateAndGetPosition_l();
588
589    // save start timestamp
590    if (isOffloadedOrDirect_l()) {
591        if (getTimestamp_l(mStartTs) != OK) {
592            mStartTs.mPosition = 0;
593        }
594    } else {
595        if (getTimestamp_l(&mStartEts) != OK) {
596            mStartEts.clear();
597        }
598    }
599    mStartNs = systemTime(); // save this for timestamp adjustment after starting.
600    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
601        // reset current position as seen by client to 0
602        mPosition = 0;
603        mPreviousTimestampValid = false;
604        mTimestampStartupGlitchReported = false;
605        mRetrogradeMotionReported = false;
606        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
607
608        if (!isOffloadedOrDirect_l()
609                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
610            // Server side has consumed something, but is it finished consuming?
611            // It is possible since flush and stop are asynchronous that the server
612            // is still active at this point.
613            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
614                    (long long)(mFramesWrittenServerOffset
615                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
616                    (long long)mStartEts.mFlushed,
617                    (long long)mFramesWritten);
618            mFramesWrittenServerOffset = -mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
619        }
620        mFramesWritten = 0;
621        mProxy->clearTimestamp(); // need new server push for valid timestamp
622        mMarkerReached = false;
623
624        // For offloaded tracks, we don't know if the hardware counters are really zero here,
625        // since the flush is asynchronous and stop may not fully drain.
626        // We save the time when the track is started to later verify whether
627        // the counters are realistic (i.e. start from zero after this time).
628        mStartFromZeroUs = mStartNs / 1000;
629
630        // force refresh of remaining frames by processAudioBuffer() as last
631        // write before stop could be partial.
632        mRefreshRemaining = true;
633    }
634    mNewPosition = mPosition + mUpdatePeriod;
635    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
636
637    status_t status = NO_ERROR;
638    if (!(flags & CBLK_INVALID)) {
639        status = mAudioTrack->start();
640        if (status == DEAD_OBJECT) {
641            flags |= CBLK_INVALID;
642        }
643    }
644    if (flags & CBLK_INVALID) {
645        status = restoreTrack_l("start");
646    }
647
648    // resume or pause the callback thread as needed.
649    sp<AudioTrackThread> t = mAudioTrackThread;
650    if (status == NO_ERROR) {
651        if (t != 0) {
652            if (previousState == STATE_STOPPING) {
653                mProxy->interrupt();
654            } else {
655                t->resume();
656            }
657        } else {
658            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
659            get_sched_policy(0, &mPreviousSchedulingGroup);
660            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
661        }
662
663        // Start our local VolumeHandler for restoration purposes.
664        mVolumeHandler->setStarted();
665    } else {
666        ALOGE("start() status %d", status);
667        mState = previousState;
668        if (t != 0) {
669            if (previousState != STATE_STOPPING) {
670                t->pause();
671            }
672        } else {
673            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
674            set_sched_policy(0, mPreviousSchedulingGroup);
675        }
676    }
677
678    return status;
679}
680
681void AudioTrack::stop()
682{
683    AutoMutex lock(mLock);
684    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
685        return;
686    }
687
688    if (isOffloaded_l()) {
689        mState = STATE_STOPPING;
690    } else {
691        mState = STATE_STOPPED;
692        ALOGD_IF(mSharedBuffer == nullptr,
693                "stop() called with %u frames delivered", mReleased.value());
694        mReleased = 0;
695    }
696
697    mProxy->interrupt();
698    mAudioTrack->stop();
699
700    // Note: legacy handling - stop does not clear playback marker
701    // and periodic update counter, but flush does for streaming tracks.
702
703    if (mSharedBuffer != 0) {
704        // clear buffer position and loop count.
705        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
706                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
707    }
708
709    sp<AudioTrackThread> t = mAudioTrackThread;
710    if (t != 0) {
711        if (!isOffloaded_l()) {
712            t->pause();
713        }
714    } else {
715        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
716        set_sched_policy(0, mPreviousSchedulingGroup);
717    }
718}
719
720bool AudioTrack::stopped() const
721{
722    AutoMutex lock(mLock);
723    return mState != STATE_ACTIVE;
724}
725
726void AudioTrack::flush()
727{
728    if (mSharedBuffer != 0) {
729        return;
730    }
731    AutoMutex lock(mLock);
732    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
733        return;
734    }
735    flush_l();
736}
737
738void AudioTrack::flush_l()
739{
740    ALOG_ASSERT(mState != STATE_ACTIVE);
741
742    // clear playback marker and periodic update counter
743    mMarkerPosition = 0;
744    mMarkerReached = false;
745    mUpdatePeriod = 0;
746    mRefreshRemaining = true;
747
748    mState = STATE_FLUSHED;
749    mReleased = 0;
750    if (isOffloaded_l()) {
751        mProxy->interrupt();
752    }
753    mProxy->flush();
754    mAudioTrack->flush();
755}
756
757void AudioTrack::pause()
758{
759    AutoMutex lock(mLock);
760    if (mState == STATE_ACTIVE) {
761        mState = STATE_PAUSED;
762    } else if (mState == STATE_STOPPING) {
763        mState = STATE_PAUSED_STOPPING;
764    } else {
765        return;
766    }
767    mProxy->interrupt();
768    mAudioTrack->pause();
769
770    if (isOffloaded_l()) {
771        if (mOutput != AUDIO_IO_HANDLE_NONE) {
772            // An offload output can be re-used between two audio tracks having
773            // the same configuration. A timestamp query for a paused track
774            // while the other is running would return an incorrect time.
775            // To fix this, cache the playback position on a pause() and return
776            // this time when requested until the track is resumed.
777
778            // OffloadThread sends HAL pause in its threadLoop. Time saved
779            // here can be slightly off.
780
781            // TODO: check return code for getRenderPosition.
782
783            uint32_t halFrames;
784            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
785            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
786        }
787    }
788}
789
790status_t AudioTrack::setVolume(float left, float right)
791{
792    // This duplicates a test by AudioTrack JNI, but that is not the only caller
793    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
794            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
795        return BAD_VALUE;
796    }
797
798    AutoMutex lock(mLock);
799    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
800    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
801
802    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
803
804    if (isOffloaded_l()) {
805        mAudioTrack->signal();
806    }
807    return NO_ERROR;
808}
809
810status_t AudioTrack::setVolume(float volume)
811{
812    return setVolume(volume, volume);
813}
814
815status_t AudioTrack::setAuxEffectSendLevel(float level)
816{
817    // This duplicates a test by AudioTrack JNI, but that is not the only caller
818    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
819        return BAD_VALUE;
820    }
821
822    AutoMutex lock(mLock);
823    mSendLevel = level;
824    mProxy->setSendLevel(level);
825
826    return NO_ERROR;
827}
828
829void AudioTrack::getAuxEffectSendLevel(float* level) const
830{
831    if (level != NULL) {
832        *level = mSendLevel;
833    }
834}
835
836status_t AudioTrack::setSampleRate(uint32_t rate)
837{
838    AutoMutex lock(mLock);
839    if (rate == mSampleRate) {
840        return NO_ERROR;
841    }
842    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
843        return INVALID_OPERATION;
844    }
845    if (mOutput == AUDIO_IO_HANDLE_NONE) {
846        return NO_INIT;
847    }
848    // NOTE: it is theoretically possible, but highly unlikely, that a device change
849    // could mean a previously allowed sampling rate is no longer allowed.
850    uint32_t afSamplingRate;
851    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
852        return NO_INIT;
853    }
854    // pitch is emulated by adjusting speed and sampleRate
855    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
856    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
857        return BAD_VALUE;
858    }
859    // TODO: Should we also check if the buffer size is compatible?
860
861    mSampleRate = rate;
862    mProxy->setSampleRate(effectiveSampleRate);
863
864    return NO_ERROR;
865}
866
867uint32_t AudioTrack::getSampleRate() const
868{
869    AutoMutex lock(mLock);
870
871    // sample rate can be updated during playback by the offloaded decoder so we need to
872    // query the HAL and update if needed.
873// FIXME use Proxy return channel to update the rate from server and avoid polling here
874    if (isOffloadedOrDirect_l()) {
875        if (mOutput != AUDIO_IO_HANDLE_NONE) {
876            uint32_t sampleRate = 0;
877            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
878            if (status == NO_ERROR) {
879                mSampleRate = sampleRate;
880            }
881        }
882    }
883    return mSampleRate;
884}
885
886uint32_t AudioTrack::getOriginalSampleRate() const
887{
888    return mOriginalSampleRate;
889}
890
891status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
892{
893    AutoMutex lock(mLock);
894    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
895        return NO_ERROR;
896    }
897    if (isOffloadedOrDirect_l()) {
898        return INVALID_OPERATION;
899    }
900    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
901        return INVALID_OPERATION;
902    }
903
904    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
905            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
906    // pitch is emulated by adjusting speed and sampleRate
907    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
908    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
909    const float effectivePitch = adjustPitch(playbackRate.mPitch);
910    AudioPlaybackRate playbackRateTemp = playbackRate;
911    playbackRateTemp.mSpeed = effectiveSpeed;
912    playbackRateTemp.mPitch = effectivePitch;
913
914    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
915            effectiveRate, effectiveSpeed, effectivePitch);
916
917    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
918        ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
919                playbackRate.mSpeed, playbackRate.mPitch);
920        return BAD_VALUE;
921    }
922    // Check if the buffer size is compatible.
923    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
924        ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
925                playbackRate.mSpeed, playbackRate.mPitch);
926        return BAD_VALUE;
927    }
928
929    // Check resampler ratios are within bounds
930    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
931            (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
932        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
933                playbackRate.mSpeed, playbackRate.mPitch);
934        return BAD_VALUE;
935    }
936
937    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
938        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
939                        playbackRate.mSpeed, playbackRate.mPitch);
940        return BAD_VALUE;
941    }
942    mPlaybackRate = playbackRate;
943    //set effective rates
944    mProxy->setPlaybackRate(playbackRateTemp);
945    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
946    return NO_ERROR;
947}
948
949const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
950{
951    AutoMutex lock(mLock);
952    return mPlaybackRate;
953}
954
955ssize_t AudioTrack::getBufferSizeInFrames()
956{
957    AutoMutex lock(mLock);
958    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
959        return NO_INIT;
960    }
961    return (ssize_t) mProxy->getBufferSizeInFrames();
962}
963
964status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
965{
966    if (duration == nullptr) {
967        return BAD_VALUE;
968    }
969    AutoMutex lock(mLock);
970    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
971        return NO_INIT;
972    }
973    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
974    if (bufferSizeInFrames < 0) {
975        return (status_t)bufferSizeInFrames;
976    }
977    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
978            / ((double)mSampleRate * mPlaybackRate.mSpeed));
979    return NO_ERROR;
980}
981
982ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
983{
984    AutoMutex lock(mLock);
985    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
986        return NO_INIT;
987    }
988    // Reject if timed track or compressed audio.
989    if (!audio_is_linear_pcm(mFormat)) {
990        return INVALID_OPERATION;
991    }
992    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
993}
994
995status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
996{
997    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
998        return INVALID_OPERATION;
999    }
1000
1001    if (loopCount == 0) {
1002        ;
1003    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1004            loopEnd - loopStart >= MIN_LOOP) {
1005        ;
1006    } else {
1007        return BAD_VALUE;
1008    }
1009
1010    AutoMutex lock(mLock);
1011    // See setPosition() regarding setting parameters such as loop points or position while active
1012    if (mState == STATE_ACTIVE) {
1013        return INVALID_OPERATION;
1014    }
1015    setLoop_l(loopStart, loopEnd, loopCount);
1016    return NO_ERROR;
1017}
1018
1019void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1020{
1021    // We do not update the periodic notification point.
1022    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1023    mLoopCount = loopCount;
1024    mLoopEnd = loopEnd;
1025    mLoopStart = loopStart;
1026    mLoopCountNotified = loopCount;
1027    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1028
1029    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1030}
1031
1032status_t AudioTrack::setMarkerPosition(uint32_t marker)
1033{
1034    // The only purpose of setting marker position is to get a callback
1035    if (mCbf == NULL || isOffloadedOrDirect()) {
1036        return INVALID_OPERATION;
1037    }
1038
1039    AutoMutex lock(mLock);
1040    mMarkerPosition = marker;
1041    mMarkerReached = false;
1042
1043    sp<AudioTrackThread> t = mAudioTrackThread;
1044    if (t != 0) {
1045        t->wake();
1046    }
1047    return NO_ERROR;
1048}
1049
1050status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1051{
1052    if (isOffloadedOrDirect()) {
1053        return INVALID_OPERATION;
1054    }
1055    if (marker == NULL) {
1056        return BAD_VALUE;
1057    }
1058
1059    AutoMutex lock(mLock);
1060    mMarkerPosition.getValue(marker);
1061
1062    return NO_ERROR;
1063}
1064
1065status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1066{
1067    // The only purpose of setting position update period is to get a callback
1068    if (mCbf == NULL || isOffloadedOrDirect()) {
1069        return INVALID_OPERATION;
1070    }
1071
1072    AutoMutex lock(mLock);
1073    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1074    mUpdatePeriod = updatePeriod;
1075
1076    sp<AudioTrackThread> t = mAudioTrackThread;
1077    if (t != 0) {
1078        t->wake();
1079    }
1080    return NO_ERROR;
1081}
1082
1083status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1084{
1085    if (isOffloadedOrDirect()) {
1086        return INVALID_OPERATION;
1087    }
1088    if (updatePeriod == NULL) {
1089        return BAD_VALUE;
1090    }
1091
1092    AutoMutex lock(mLock);
1093    *updatePeriod = mUpdatePeriod;
1094
1095    return NO_ERROR;
1096}
1097
1098status_t AudioTrack::setPosition(uint32_t position)
1099{
1100    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1101        return INVALID_OPERATION;
1102    }
1103    if (position > mFrameCount) {
1104        return BAD_VALUE;
1105    }
1106
1107    AutoMutex lock(mLock);
1108    // Currently we require that the player is inactive before setting parameters such as position
1109    // or loop points.  Otherwise, there could be a race condition: the application could read the
1110    // current position, compute a new position or loop parameters, and then set that position or
1111    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1112    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1113    // to specify how it wants to handle such scenarios.
1114    if (mState == STATE_ACTIVE) {
1115        return INVALID_OPERATION;
1116    }
1117    // After setting the position, use full update period before notification.
1118    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1119    mStaticProxy->setBufferPosition(position);
1120
1121    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1122    return NO_ERROR;
1123}
1124
1125status_t AudioTrack::getPosition(uint32_t *position)
1126{
1127    if (position == NULL) {
1128        return BAD_VALUE;
1129    }
1130
1131    AutoMutex lock(mLock);
1132    // FIXME: offloaded and direct tracks call into the HAL for render positions
1133    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1134    // as we do not know the capability of the HAL for pcm position support and standby.
1135    // There may be some latency differences between the HAL position and the proxy position.
1136    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1137        uint32_t dspFrames = 0;
1138
1139        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1140            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1141            *position = mPausedPosition;
1142            return NO_ERROR;
1143        }
1144
1145        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1146            uint32_t halFrames; // actually unused
1147            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1148            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1149        }
1150        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1151        // due to hardware latency. We leave this behavior for now.
1152        *position = dspFrames;
1153    } else {
1154        if (mCblk->mFlags & CBLK_INVALID) {
1155            (void) restoreTrack_l("getPosition");
1156            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1157            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1158        }
1159
1160        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1161        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1162                0 : updateAndGetPosition_l().value();
1163    }
1164    return NO_ERROR;
1165}
1166
1167status_t AudioTrack::getBufferPosition(uint32_t *position)
1168{
1169    if (mSharedBuffer == 0) {
1170        return INVALID_OPERATION;
1171    }
1172    if (position == NULL) {
1173        return BAD_VALUE;
1174    }
1175
1176    AutoMutex lock(mLock);
1177    *position = mStaticProxy->getBufferPosition();
1178    return NO_ERROR;
1179}
1180
1181status_t AudioTrack::reload()
1182{
1183    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1184        return INVALID_OPERATION;
1185    }
1186
1187    AutoMutex lock(mLock);
1188    // See setPosition() regarding setting parameters such as loop points or position while active
1189    if (mState == STATE_ACTIVE) {
1190        return INVALID_OPERATION;
1191    }
1192    mNewPosition = mUpdatePeriod;
1193    (void) updateAndGetPosition_l();
1194    mPosition = 0;
1195    mPreviousTimestampValid = false;
1196#if 0
1197    // The documentation is not clear on the behavior of reload() and the restoration
1198    // of loop count. Historically we have not restored loop count, start, end,
1199    // but it makes sense if one desires to repeat playing a particular sound.
1200    if (mLoopCount != 0) {
1201        mLoopCountNotified = mLoopCount;
1202        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1203    }
1204#endif
1205    mStaticProxy->setBufferPosition(0);
1206    return NO_ERROR;
1207}
1208
1209audio_io_handle_t AudioTrack::getOutput() const
1210{
1211    AutoMutex lock(mLock);
1212    return mOutput;
1213}
1214
1215status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1216    AutoMutex lock(mLock);
1217    if (mSelectedDeviceId != deviceId) {
1218        mSelectedDeviceId = deviceId;
1219        if (mStatus == NO_ERROR) {
1220            android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1221        }
1222    }
1223    return NO_ERROR;
1224}
1225
1226audio_port_handle_t AudioTrack::getOutputDevice() {
1227    AutoMutex lock(mLock);
1228    return mSelectedDeviceId;
1229}
1230
1231audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1232    AutoMutex lock(mLock);
1233    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1234        return AUDIO_PORT_HANDLE_NONE;
1235    }
1236    // if the output stream does not have an active audio patch, use either the device initially
1237    // selected by audio policy manager or the last routed device
1238    audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1239    if (deviceId == AUDIO_PORT_HANDLE_NONE) {
1240        deviceId = mRoutedDeviceId;
1241    }
1242    mRoutedDeviceId = deviceId;
1243    return deviceId;
1244}
1245
1246status_t AudioTrack::attachAuxEffect(int effectId)
1247{
1248    AutoMutex lock(mLock);
1249    status_t status = mAudioTrack->attachAuxEffect(effectId);
1250    if (status == NO_ERROR) {
1251        mAuxEffectId = effectId;
1252    }
1253    return status;
1254}
1255
1256audio_stream_type_t AudioTrack::streamType() const
1257{
1258    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1259        return audio_attributes_to_stream_type(&mAttributes);
1260    }
1261    return mStreamType;
1262}
1263
1264uint32_t AudioTrack::latency()
1265{
1266    AutoMutex lock(mLock);
1267    updateLatency_l();
1268    return mLatency;
1269}
1270
1271// -------------------------------------------------------------------------
1272
1273// must be called with mLock held
1274void AudioTrack::updateLatency_l()
1275{
1276    status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1277    if (status != NO_ERROR) {
1278        ALOGW("getLatency(%d) failed status %d", mOutput, status);
1279    } else {
1280        // FIXME don't believe this lie
1281        mLatency = mAfLatency + (1000 * mFrameCount) / mSampleRate;
1282    }
1283}
1284
1285// TODO Move this macro to a common header file for enum to string conversion in audio framework.
1286#define MEDIA_CASE_ENUM(name) case name: return #name
1287const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1288    switch (transferType) {
1289        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1290        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1291        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1292        MEDIA_CASE_ENUM(TRANSFER_SYNC);
1293        MEDIA_CASE_ENUM(TRANSFER_SHARED);
1294        default:
1295            return "UNRECOGNIZED";
1296    }
1297}
1298
1299status_t AudioTrack::createTrack_l()
1300{
1301    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1302    if (audioFlinger == 0) {
1303        ALOGE("Could not get audioflinger");
1304        return NO_INIT;
1305    }
1306
1307    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1308        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1309    }
1310    audio_io_handle_t output;
1311    audio_stream_type_t streamType = mStreamType;
1312    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1313
1314    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1315    // After fast request is denied, we will request again if IAudioTrack is re-created.
1316
1317    status_t status;
1318    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
1319    config.sample_rate = mSampleRate;
1320    config.channel_mask = mChannelMask;
1321    config.format = mFormat;
1322    config.offload_info = mOffloadInfoCopy;
1323    mRoutedDeviceId = mSelectedDeviceId;
1324    status = AudioSystem::getOutputForAttr(attr, &output,
1325                                           mSessionId, &streamType, mClientUid,
1326                                           &config,
1327                                           mFlags, &mRoutedDeviceId, &mPortId);
1328
1329    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1330        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
1331              " format %#x, channel mask %#x, flags %#x",
1332              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
1333              mFlags);
1334        return BAD_VALUE;
1335    }
1336    {
1337    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1338    // we must release it ourselves if anything goes wrong.
1339
1340    // Not all of these values are needed under all conditions, but it is easier to get them all
1341    status = AudioSystem::getLatency(output, &mAfLatency);
1342    if (status != NO_ERROR) {
1343        ALOGE("getLatency(%d) failed status %d", output, status);
1344        goto release;
1345    }
1346    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1347
1348    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1349    if (status != NO_ERROR) {
1350        ALOGE("getFrameCount(output=%d) status %d", output, status);
1351        goto release;
1352    }
1353
1354    // TODO consider making this a member variable if there are other uses for it later
1355    size_t afFrameCountHAL;
1356    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1357    if (status != NO_ERROR) {
1358        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1359        goto release;
1360    }
1361    ALOG_ASSERT(afFrameCountHAL > 0);
1362
1363    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1364    if (status != NO_ERROR) {
1365        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1366        goto release;
1367    }
1368    if (mSampleRate == 0) {
1369        mSampleRate = mAfSampleRate;
1370        mOriginalSampleRate = mAfSampleRate;
1371    }
1372
1373    // Client can only express a preference for FAST.  Server will perform additional tests.
1374    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1375        // either of these use cases:
1376        // use case 1: shared buffer
1377        bool sharedBuffer = mSharedBuffer != 0;
1378        bool transferAllowed =
1379            // use case 2: callback transfer mode
1380            (mTransfer == TRANSFER_CALLBACK) ||
1381            // use case 3: obtain/release mode
1382            (mTransfer == TRANSFER_OBTAIN) ||
1383            // use case 4: synchronous write
1384            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1385
1386        bool useCaseAllowed = sharedBuffer || transferAllowed;
1387        if (!useCaseAllowed) {
1388            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, not shared buffer and transfer = %s",
1389                  convertTransferToText(mTransfer));
1390        }
1391
1392        // sample rates must also match
1393        bool sampleRateAllowed = mSampleRate == mAfSampleRate;
1394        if (!sampleRateAllowed) {
1395            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
1396                  mSampleRate, mAfSampleRate);
1397        }
1398
1399        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
1400        if (!fastAllowed) {
1401            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1402        }
1403    }
1404
1405    mNotificationFramesAct = mNotificationFramesReq;
1406
1407    size_t frameCount = mReqFrameCount;
1408    if (!audio_has_proportional_frames(mFormat)) {
1409
1410        if (mSharedBuffer != 0) {
1411            // Same comment as below about ignoring frameCount parameter for set()
1412            frameCount = mSharedBuffer->size();
1413        } else if (frameCount == 0) {
1414            frameCount = mAfFrameCount;
1415        }
1416        if (mNotificationFramesAct != frameCount) {
1417            mNotificationFramesAct = frameCount;
1418        }
1419    } else if (mSharedBuffer != 0) {
1420        // FIXME: Ensure client side memory buffers need
1421        // not have additional alignment beyond sample
1422        // (e.g. 16 bit stereo accessed as 32 bit frame).
1423        size_t alignment = audio_bytes_per_sample(mFormat);
1424        if (alignment & 1) {
1425            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1426            alignment = 1;
1427        }
1428        if (mChannelCount > 1) {
1429            // More than 2 channels does not require stronger alignment than stereo
1430            alignment <<= 1;
1431        }
1432        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1433            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1434                    mSharedBuffer->pointer(), mChannelCount);
1435            status = BAD_VALUE;
1436            goto release;
1437        }
1438
1439        // When initializing a shared buffer AudioTrack via constructors,
1440        // there's no frameCount parameter.
1441        // But when initializing a shared buffer AudioTrack via set(),
1442        // there _is_ a frameCount parameter.  We silently ignore it.
1443        frameCount = mSharedBuffer->size() / mFrameSize;
1444    } else {
1445        size_t minFrameCount = 0;
1446        // For fast tracks the frame count calculations and checks are mostly done by server,
1447        // but we try to respect the application's request for notifications per buffer.
1448        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1449            if (mNotificationsPerBufferReq > 0) {
1450                // Avoid possible arithmetic overflow during multiplication.
1451                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1452                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1453                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1454                            mNotificationsPerBufferReq, afFrameCountHAL);
1455                } else {
1456                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1457                }
1458            }
1459        } else {
1460            // for normal tracks precompute the frame count based on speed.
1461            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1462                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1463            minFrameCount = calculateMinFrameCount(
1464                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1465                    speed /*, 0 mNotificationsPerBufferReq*/);
1466        }
1467        if (frameCount < minFrameCount) {
1468            frameCount = minFrameCount;
1469        }
1470    }
1471
1472    audio_output_flags_t flags = mFlags;
1473
1474    pid_t tid = -1;
1475    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1476        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1477        // application-level code follows all non-blocking design rules, the language runtime
1478        // doesn't also follow those rules, so the thread will not benefit overall.
1479        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1480            tid = mAudioTrackThread->getTid();
1481        }
1482    }
1483
1484    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1485                                // but we will still need the original value also
1486    audio_session_t originalSessionId = mSessionId;
1487    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1488                                                      mSampleRate,
1489                                                      mFormat,
1490                                                      mChannelMask,
1491                                                      &temp,
1492                                                      &flags,
1493                                                      mSharedBuffer,
1494                                                      output,
1495                                                      mClientPid,
1496                                                      tid,
1497                                                      &mSessionId,
1498                                                      mClientUid,
1499                                                      &status,
1500                                                      mPortId);
1501    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1502            "session ID changed from %d to %d", originalSessionId, mSessionId);
1503
1504    if (status != NO_ERROR) {
1505        ALOGE("AudioFlinger could not create track, status: %d", status);
1506        goto release;
1507    }
1508    ALOG_ASSERT(track != 0);
1509
1510    // AudioFlinger now owns the reference to the I/O handle,
1511    // so we are no longer responsible for releasing it.
1512
1513    // FIXME compare to AudioRecord
1514    sp<IMemory> iMem = track->getCblk();
1515    if (iMem == 0) {
1516        ALOGE("Could not get control block");
1517        return NO_INIT;
1518    }
1519    void *iMemPointer = iMem->pointer();
1520    if (iMemPointer == NULL) {
1521        ALOGE("Could not get control block pointer");
1522        return NO_INIT;
1523    }
1524    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1525    if (mAudioTrack != 0) {
1526        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1527        mDeathNotifier.clear();
1528    }
1529    mAudioTrack = track;
1530    mCblkMemory = iMem;
1531    IPCThreadState::self()->flushCommands();
1532
1533    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1534    mCblk = cblk;
1535    // note that temp is the (possibly revised) value of frameCount
1536    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1537        // In current design, AudioTrack client checks and ensures frame count validity before
1538        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1539        // for fast track as it uses a special method of assigning frame count.
1540        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1541    }
1542    frameCount = temp;
1543
1544    mAwaitBoost = false;
1545    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1546        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1547            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
1548            if (!mThreadCanCallJava) {
1549                mAwaitBoost = true;
1550            }
1551        } else {
1552            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
1553                    temp);
1554        }
1555    }
1556    mFlags = flags;
1557
1558    // Make sure that application is notified with sufficient margin before underrun.
1559    // The client can divide the AudioTrack buffer into sub-buffers,
1560    // and expresses its desire to server as the notification frame count.
1561    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1562        size_t maxNotificationFrames;
1563        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1564            // notify every HAL buffer, regardless of the size of the track buffer
1565            maxNotificationFrames = afFrameCountHAL;
1566        } else {
1567            // For normal tracks, use at least double-buffering if no sample rate conversion,
1568            // or at least triple-buffering if there is sample rate conversion
1569            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1570            maxNotificationFrames = frameCount / nBuffering;
1571        }
1572        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1573            if (mNotificationFramesAct == 0) {
1574                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1575                    maxNotificationFrames, frameCount);
1576            } else {
1577                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1578                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1579            }
1580            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1581        }
1582    }
1583
1584    // We retain a copy of the I/O handle, but don't own the reference
1585    mOutput = output;
1586    mRefreshRemaining = true;
1587
1588    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1589    // is the value of pointer() for the shared buffer, otherwise buffers points
1590    // immediately after the control block.  This address is for the mapping within client
1591    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1592    void* buffers;
1593    if (mSharedBuffer == 0) {
1594        buffers = cblk + 1;
1595    } else {
1596        buffers = mSharedBuffer->pointer();
1597        if (buffers == NULL) {
1598            ALOGE("Could not get buffer pointer");
1599            return NO_INIT;
1600        }
1601    }
1602
1603    mAudioTrack->attachAuxEffect(mAuxEffectId);
1604    mFrameCount = frameCount;
1605    updateLatency_l();  // this refetches mAfLatency and sets mLatency
1606
1607    // If IAudioTrack is re-created, don't let the requested frameCount
1608    // decrease.  This can confuse clients that cache frameCount().
1609    if (frameCount > mReqFrameCount) {
1610        mReqFrameCount = frameCount;
1611    }
1612
1613    // reset server position to 0 as we have new cblk.
1614    mServer = 0;
1615
1616    // update proxy
1617    if (mSharedBuffer == 0) {
1618        mStaticProxy.clear();
1619        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1620    } else {
1621        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1622        mProxy = mStaticProxy;
1623    }
1624
1625    mProxy->setVolumeLR(gain_minifloat_pack(
1626            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1627            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1628
1629    mProxy->setSendLevel(mSendLevel);
1630    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1631    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1632    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1633    mProxy->setSampleRate(effectiveSampleRate);
1634
1635    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1636    playbackRateTemp.mSpeed = effectiveSpeed;
1637    playbackRateTemp.mPitch = effectivePitch;
1638    mProxy->setPlaybackRate(playbackRateTemp);
1639    mProxy->setMinimum(mNotificationFramesAct);
1640
1641    mDeathNotifier = new DeathNotifier(this);
1642    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1643
1644    if (mDeviceCallback != 0) {
1645        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1646    }
1647
1648    return NO_ERROR;
1649    }
1650
1651release:
1652    AudioSystem::releaseOutput(output, streamType, mSessionId);
1653    if (status == NO_ERROR) {
1654        status = NO_INIT;
1655    }
1656    return status;
1657}
1658
1659status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1660{
1661    if (audioBuffer == NULL) {
1662        if (nonContig != NULL) {
1663            *nonContig = 0;
1664        }
1665        return BAD_VALUE;
1666    }
1667    if (mTransfer != TRANSFER_OBTAIN) {
1668        audioBuffer->frameCount = 0;
1669        audioBuffer->size = 0;
1670        audioBuffer->raw = NULL;
1671        if (nonContig != NULL) {
1672            *nonContig = 0;
1673        }
1674        return INVALID_OPERATION;
1675    }
1676
1677    const struct timespec *requested;
1678    struct timespec timeout;
1679    if (waitCount == -1) {
1680        requested = &ClientProxy::kForever;
1681    } else if (waitCount == 0) {
1682        requested = &ClientProxy::kNonBlocking;
1683    } else if (waitCount > 0) {
1684        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1685        timeout.tv_sec = ms / 1000;
1686        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1687        requested = &timeout;
1688    } else {
1689        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1690        requested = NULL;
1691    }
1692    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1693}
1694
1695status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1696        struct timespec *elapsed, size_t *nonContig)
1697{
1698    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1699    uint32_t oldSequence = 0;
1700    uint32_t newSequence;
1701
1702    Proxy::Buffer buffer;
1703    status_t status = NO_ERROR;
1704
1705    static const int32_t kMaxTries = 5;
1706    int32_t tryCounter = kMaxTries;
1707
1708    do {
1709        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1710        // keep them from going away if another thread re-creates the track during obtainBuffer()
1711        sp<AudioTrackClientProxy> proxy;
1712        sp<IMemory> iMem;
1713
1714        {   // start of lock scope
1715            AutoMutex lock(mLock);
1716
1717            newSequence = mSequence;
1718            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1719            if (status == DEAD_OBJECT) {
1720                // re-create track, unless someone else has already done so
1721                if (newSequence == oldSequence) {
1722                    status = restoreTrack_l("obtainBuffer");
1723                    if (status != NO_ERROR) {
1724                        buffer.mFrameCount = 0;
1725                        buffer.mRaw = NULL;
1726                        buffer.mNonContig = 0;
1727                        break;
1728                    }
1729                }
1730            }
1731            oldSequence = newSequence;
1732
1733            if (status == NOT_ENOUGH_DATA) {
1734                restartIfDisabled();
1735            }
1736
1737            // Keep the extra references
1738            proxy = mProxy;
1739            iMem = mCblkMemory;
1740
1741            if (mState == STATE_STOPPING) {
1742                status = -EINTR;
1743                buffer.mFrameCount = 0;
1744                buffer.mRaw = NULL;
1745                buffer.mNonContig = 0;
1746                break;
1747            }
1748
1749            // Non-blocking if track is stopped or paused
1750            if (mState != STATE_ACTIVE) {
1751                requested = &ClientProxy::kNonBlocking;
1752            }
1753
1754        }   // end of lock scope
1755
1756        buffer.mFrameCount = audioBuffer->frameCount;
1757        // FIXME starts the requested timeout and elapsed over from scratch
1758        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1759    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1760
1761    audioBuffer->frameCount = buffer.mFrameCount;
1762    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1763    audioBuffer->raw = buffer.mRaw;
1764    if (nonContig != NULL) {
1765        *nonContig = buffer.mNonContig;
1766    }
1767    return status;
1768}
1769
1770void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1771{
1772    // FIXME add error checking on mode, by adding an internal version
1773    if (mTransfer == TRANSFER_SHARED) {
1774        return;
1775    }
1776
1777    size_t stepCount = audioBuffer->size / mFrameSize;
1778    if (stepCount == 0) {
1779        return;
1780    }
1781
1782    Proxy::Buffer buffer;
1783    buffer.mFrameCount = stepCount;
1784    buffer.mRaw = audioBuffer->raw;
1785
1786    AutoMutex lock(mLock);
1787    mReleased += stepCount;
1788    mInUnderrun = false;
1789    mProxy->releaseBuffer(&buffer);
1790
1791    // restart track if it was disabled by audioflinger due to previous underrun
1792    restartIfDisabled();
1793}
1794
1795void AudioTrack::restartIfDisabled()
1796{
1797    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1798    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1799        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1800        // FIXME ignoring status
1801        mAudioTrack->start();
1802    }
1803}
1804
1805// -------------------------------------------------------------------------
1806
1807ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1808{
1809    if (mTransfer != TRANSFER_SYNC) {
1810        return INVALID_OPERATION;
1811    }
1812
1813    if (isDirect()) {
1814        AutoMutex lock(mLock);
1815        int32_t flags = android_atomic_and(
1816                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1817                            &mCblk->mFlags);
1818        if (flags & CBLK_INVALID) {
1819            return DEAD_OBJECT;
1820        }
1821    }
1822
1823    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1824        // Sanity-check: user is most-likely passing an error code, and it would
1825        // make the return value ambiguous (actualSize vs error).
1826        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1827        return BAD_VALUE;
1828    }
1829
1830    size_t written = 0;
1831    Buffer audioBuffer;
1832
1833    while (userSize >= mFrameSize) {
1834        audioBuffer.frameCount = userSize / mFrameSize;
1835
1836        status_t err = obtainBuffer(&audioBuffer,
1837                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1838        if (err < 0) {
1839            if (written > 0) {
1840                break;
1841            }
1842            if (err == TIMED_OUT || err == -EINTR) {
1843                err = WOULD_BLOCK;
1844            }
1845            return ssize_t(err);
1846        }
1847
1848        size_t toWrite = audioBuffer.size;
1849        memcpy(audioBuffer.i8, buffer, toWrite);
1850        buffer = ((const char *) buffer) + toWrite;
1851        userSize -= toWrite;
1852        written += toWrite;
1853
1854        releaseBuffer(&audioBuffer);
1855    }
1856
1857    if (written > 0) {
1858        mFramesWritten += written / mFrameSize;
1859    }
1860    return written;
1861}
1862
1863// -------------------------------------------------------------------------
1864
1865nsecs_t AudioTrack::processAudioBuffer()
1866{
1867    // Currently the AudioTrack thread is not created if there are no callbacks.
1868    // Would it ever make sense to run the thread, even without callbacks?
1869    // If so, then replace this by checks at each use for mCbf != NULL.
1870    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1871
1872    mLock.lock();
1873    if (mAwaitBoost) {
1874        mAwaitBoost = false;
1875        mLock.unlock();
1876        static const int32_t kMaxTries = 5;
1877        int32_t tryCounter = kMaxTries;
1878        uint32_t pollUs = 10000;
1879        do {
1880            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1881            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1882                break;
1883            }
1884            usleep(pollUs);
1885            pollUs <<= 1;
1886        } while (tryCounter-- > 0);
1887        if (tryCounter < 0) {
1888            ALOGE("did not receive expected priority boost on time");
1889        }
1890        // Run again immediately
1891        return 0;
1892    }
1893
1894    // Can only reference mCblk while locked
1895    int32_t flags = android_atomic_and(
1896        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1897
1898    // Check for track invalidation
1899    if (flags & CBLK_INVALID) {
1900        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1901        // AudioSystem cache. We should not exit here but after calling the callback so
1902        // that the upper layers can recreate the track
1903        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1904            status_t status __unused = restoreTrack_l("processAudioBuffer");
1905            // FIXME unused status
1906            // after restoration, continue below to make sure that the loop and buffer events
1907            // are notified because they have been cleared from mCblk->mFlags above.
1908        }
1909    }
1910
1911    bool waitStreamEnd = mState == STATE_STOPPING;
1912    bool active = mState == STATE_ACTIVE;
1913
1914    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1915    bool newUnderrun = false;
1916    if (flags & CBLK_UNDERRUN) {
1917#if 0
1918        // Currently in shared buffer mode, when the server reaches the end of buffer,
1919        // the track stays active in continuous underrun state.  It's up to the application
1920        // to pause or stop the track, or set the position to a new offset within buffer.
1921        // This was some experimental code to auto-pause on underrun.   Keeping it here
1922        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1923        if (mTransfer == TRANSFER_SHARED) {
1924            mState = STATE_PAUSED;
1925            active = false;
1926        }
1927#endif
1928        if (!mInUnderrun) {
1929            mInUnderrun = true;
1930            newUnderrun = true;
1931        }
1932    }
1933
1934    // Get current position of server
1935    Modulo<uint32_t> position(updateAndGetPosition_l());
1936
1937    // Manage marker callback
1938    bool markerReached = false;
1939    Modulo<uint32_t> markerPosition(mMarkerPosition);
1940    // uses 32 bit wraparound for comparison with position.
1941    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1942        mMarkerReached = markerReached = true;
1943    }
1944
1945    // Determine number of new position callback(s) that will be needed, while locked
1946    size_t newPosCount = 0;
1947    Modulo<uint32_t> newPosition(mNewPosition);
1948    uint32_t updatePeriod = mUpdatePeriod;
1949    // FIXME fails for wraparound, need 64 bits
1950    if (updatePeriod > 0 && position >= newPosition) {
1951        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1952        mNewPosition += updatePeriod * newPosCount;
1953    }
1954
1955    // Cache other fields that will be needed soon
1956    uint32_t sampleRate = mSampleRate;
1957    float speed = mPlaybackRate.mSpeed;
1958    const uint32_t notificationFrames = mNotificationFramesAct;
1959    if (mRefreshRemaining) {
1960        mRefreshRemaining = false;
1961        mRemainingFrames = notificationFrames;
1962        mRetryOnPartialBuffer = false;
1963    }
1964    size_t misalignment = mProxy->getMisalignment();
1965    uint32_t sequence = mSequence;
1966    sp<AudioTrackClientProxy> proxy = mProxy;
1967
1968    // Determine the number of new loop callback(s) that will be needed, while locked.
1969    int loopCountNotifications = 0;
1970    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1971
1972    if (mLoopCount > 0) {
1973        int loopCount;
1974        size_t bufferPosition;
1975        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1976        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1977        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1978        mLoopCountNotified = loopCount; // discard any excess notifications
1979    } else if (mLoopCount < 0) {
1980        // FIXME: We're not accurate with notification count and position with infinite looping
1981        // since loopCount from server side will always return -1 (we could decrement it).
1982        size_t bufferPosition = mStaticProxy->getBufferPosition();
1983        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1984        loopPeriod = mLoopEnd - bufferPosition;
1985    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1986        size_t bufferPosition = mStaticProxy->getBufferPosition();
1987        loopPeriod = mFrameCount - bufferPosition;
1988    }
1989
1990    // These fields don't need to be cached, because they are assigned only by set():
1991    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1992    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1993
1994    mLock.unlock();
1995
1996    // get anchor time to account for callbacks.
1997    const nsecs_t timeBeforeCallbacks = systemTime();
1998
1999    if (waitStreamEnd) {
2000        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2001        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2002        // (and make sure we don't callback for more data while we're stopping).
2003        // This helps with position, marker notifications, and track invalidation.
2004        struct timespec timeout;
2005        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2006        timeout.tv_nsec = 0;
2007
2008        status_t status = proxy->waitStreamEndDone(&timeout);
2009        switch (status) {
2010        case NO_ERROR:
2011        case DEAD_OBJECT:
2012        case TIMED_OUT:
2013            if (status != DEAD_OBJECT) {
2014                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2015                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2016                mCbf(EVENT_STREAM_END, mUserData, NULL);
2017            }
2018            {
2019                AutoMutex lock(mLock);
2020                // The previously assigned value of waitStreamEnd is no longer valid,
2021                // since the mutex has been unlocked and either the callback handler
2022                // or another thread could have re-started the AudioTrack during that time.
2023                waitStreamEnd = mState == STATE_STOPPING;
2024                if (waitStreamEnd) {
2025                    mState = STATE_STOPPED;
2026                    mReleased = 0;
2027                }
2028            }
2029            if (waitStreamEnd && status != DEAD_OBJECT) {
2030               return NS_INACTIVE;
2031            }
2032            break;
2033        }
2034        return 0;
2035    }
2036
2037    // perform callbacks while unlocked
2038    if (newUnderrun) {
2039        mCbf(EVENT_UNDERRUN, mUserData, NULL);
2040    }
2041    while (loopCountNotifications > 0) {
2042        mCbf(EVENT_LOOP_END, mUserData, NULL);
2043        --loopCountNotifications;
2044    }
2045    if (flags & CBLK_BUFFER_END) {
2046        mCbf(EVENT_BUFFER_END, mUserData, NULL);
2047    }
2048    if (markerReached) {
2049        mCbf(EVENT_MARKER, mUserData, &markerPosition);
2050    }
2051    while (newPosCount > 0) {
2052        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2053        mCbf(EVENT_NEW_POS, mUserData, &temp);
2054        newPosition += updatePeriod;
2055        newPosCount--;
2056    }
2057
2058    if (mObservedSequence != sequence) {
2059        mObservedSequence = sequence;
2060        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2061        // for offloaded tracks, just wait for the upper layers to recreate the track
2062        if (isOffloadedOrDirect()) {
2063            return NS_INACTIVE;
2064        }
2065    }
2066
2067    // if inactive, then don't run me again until re-started
2068    if (!active) {
2069        return NS_INACTIVE;
2070    }
2071
2072    // Compute the estimated time until the next timed event (position, markers, loops)
2073    // FIXME only for non-compressed audio
2074    uint32_t minFrames = ~0;
2075    if (!markerReached && position < markerPosition) {
2076        minFrames = (markerPosition - position).value();
2077    }
2078    if (loopPeriod > 0 && loopPeriod < minFrames) {
2079        // loopPeriod is already adjusted for actual position.
2080        minFrames = loopPeriod;
2081    }
2082    if (updatePeriod > 0) {
2083        minFrames = min(minFrames, (newPosition - position).value());
2084    }
2085
2086    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2087    static const uint32_t kPoll = 0;
2088    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2089        minFrames = kPoll * notificationFrames;
2090    }
2091
2092    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2093    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2094    const nsecs_t timeAfterCallbacks = systemTime();
2095
2096    // Convert frame units to time units
2097    nsecs_t ns = NS_WHENEVER;
2098    if (minFrames != (uint32_t) ~0) {
2099        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
2100        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2101        // TODO: Should we warn if the callback time is too long?
2102        if (ns < 0) ns = 0;
2103    }
2104
2105    // If not supplying data by EVENT_MORE_DATA, then we're done
2106    if (mTransfer != TRANSFER_CALLBACK) {
2107        return ns;
2108    }
2109
2110    // EVENT_MORE_DATA callback handling.
2111    // Timing for linear pcm audio data formats can be derived directly from the
2112    // buffer fill level.
2113    // Timing for compressed data is not directly available from the buffer fill level,
2114    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2115    // to return a certain fill level.
2116
2117    struct timespec timeout;
2118    const struct timespec *requested = &ClientProxy::kForever;
2119    if (ns != NS_WHENEVER) {
2120        timeout.tv_sec = ns / 1000000000LL;
2121        timeout.tv_nsec = ns % 1000000000LL;
2122        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2123        requested = &timeout;
2124    }
2125
2126    size_t writtenFrames = 0;
2127    while (mRemainingFrames > 0) {
2128
2129        Buffer audioBuffer;
2130        audioBuffer.frameCount = mRemainingFrames;
2131        size_t nonContig;
2132        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2133        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2134                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2135        requested = &ClientProxy::kNonBlocking;
2136        size_t avail = audioBuffer.frameCount + nonContig;
2137        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2138                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2139        if (err != NO_ERROR) {
2140            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2141                    (isOffloaded() && (err == DEAD_OBJECT))) {
2142                // FIXME bug 25195759
2143                return 1000000;
2144            }
2145            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2146            return NS_NEVER;
2147        }
2148
2149        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2150            mRetryOnPartialBuffer = false;
2151            if (avail < mRemainingFrames) {
2152                if (ns > 0) { // account for obtain time
2153                    const nsecs_t timeNow = systemTime();
2154                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2155                }
2156                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2157                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2158                    ns = myns;
2159                }
2160                return ns;
2161            }
2162        }
2163
2164        size_t reqSize = audioBuffer.size;
2165        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2166        size_t writtenSize = audioBuffer.size;
2167
2168        // Sanity check on returned size
2169        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2170            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2171                    reqSize, ssize_t(writtenSize));
2172            return NS_NEVER;
2173        }
2174
2175        if (writtenSize == 0) {
2176            // The callback is done filling buffers
2177            // Keep this thread going to handle timed events and
2178            // still try to get more data in intervals of WAIT_PERIOD_MS
2179            // but don't just loop and block the CPU, so wait
2180
2181            // mCbf(EVENT_MORE_DATA, ...) might either
2182            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2183            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2184            // (3) Return 0 size when no data is available, does not wait for more data.
2185            //
2186            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2187            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2188            // especially for case (3).
2189            //
2190            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2191            // and this loop; whereas for case (3) we could simply check once with the full
2192            // buffer size and skip the loop entirely.
2193
2194            nsecs_t myns;
2195            if (audio_has_proportional_frames(mFormat)) {
2196                // time to wait based on buffer occupancy
2197                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2198                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2199                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2200                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2201                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2202                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2203                myns = datans + (afns / 2);
2204            } else {
2205                // FIXME: This could ping quite a bit if the buffer isn't full.
2206                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2207                myns = kWaitPeriodNs;
2208            }
2209            if (ns > 0) { // account for obtain and callback time
2210                const nsecs_t timeNow = systemTime();
2211                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2212            }
2213            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2214                ns = myns;
2215            }
2216            return ns;
2217        }
2218
2219        size_t releasedFrames = writtenSize / mFrameSize;
2220        audioBuffer.frameCount = releasedFrames;
2221        mRemainingFrames -= releasedFrames;
2222        if (misalignment >= releasedFrames) {
2223            misalignment -= releasedFrames;
2224        } else {
2225            misalignment = 0;
2226        }
2227
2228        releaseBuffer(&audioBuffer);
2229        writtenFrames += releasedFrames;
2230
2231        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2232        // if callback doesn't like to accept the full chunk
2233        if (writtenSize < reqSize) {
2234            continue;
2235        }
2236
2237        // There could be enough non-contiguous frames available to satisfy the remaining request
2238        if (mRemainingFrames <= nonContig) {
2239            continue;
2240        }
2241
2242#if 0
2243        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2244        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2245        // that total to a sum == notificationFrames.
2246        if (0 < misalignment && misalignment <= mRemainingFrames) {
2247            mRemainingFrames = misalignment;
2248            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2249        }
2250#endif
2251
2252    }
2253    if (writtenFrames > 0) {
2254        AutoMutex lock(mLock);
2255        mFramesWritten += writtenFrames;
2256    }
2257    mRemainingFrames = notificationFrames;
2258    mRetryOnPartialBuffer = true;
2259
2260    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2261    return 0;
2262}
2263
2264status_t AudioTrack::restoreTrack_l(const char *from)
2265{
2266    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2267          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2268    ++mSequence;
2269
2270    // refresh the audio configuration cache in this process to make sure we get new
2271    // output parameters and new IAudioFlinger in createTrack_l()
2272    AudioSystem::clearAudioConfigCache();
2273
2274    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2275        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2276        // reconsider enabling for linear PCM encodings when position can be preserved.
2277        return DEAD_OBJECT;
2278    }
2279
2280    // Save so we can return count since creation.
2281    mUnderrunCountOffset = getUnderrunCount_l();
2282
2283    // save the old static buffer position
2284    uint32_t staticPosition = 0;
2285    size_t bufferPosition = 0;
2286    int loopCount = 0;
2287    if (mStaticProxy != 0) {
2288        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2289        staticPosition = mStaticProxy->getPosition().unsignedValue();
2290    }
2291
2292    mFlags = mOrigFlags;
2293
2294    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2295    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2296    // It will also delete the strong references on previous IAudioTrack and IMemory.
2297    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2298    status_t result = createTrack_l();
2299
2300    if (result == NO_ERROR) {
2301        // take the frames that will be lost by track recreation into account in saved position
2302        // For streaming tracks, this is the amount we obtained from the user/client
2303        // (not the number actually consumed at the server - those are already lost).
2304        if (mStaticProxy == 0) {
2305            mPosition = mReleased;
2306        }
2307        // Continue playback from last known position and restore loop.
2308        if (mStaticProxy != 0) {
2309            if (loopCount != 0) {
2310                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2311                        mLoopStart, mLoopEnd, loopCount);
2312            } else {
2313                mStaticProxy->setBufferPosition(bufferPosition);
2314                if (bufferPosition == mFrameCount) {
2315                    ALOGD("restoring track at end of static buffer");
2316                }
2317            }
2318        }
2319        // restore volume handler
2320        mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2321            sp<VolumeShaper::Operation> operationToEnd =
2322                    new VolumeShaper::Operation(shaper.mOperation);
2323            // TODO: Ideally we would restore to the exact xOffset position
2324            // as returned by getVolumeShaperState(), but we don't have that
2325            // information when restoring at the client unless we periodically poll
2326            // the server or create shared memory state.
2327            //
2328            // For now, we simply advance to the end of the VolumeShaper effect
2329            // if it has been started.
2330            if (shaper.isStarted()) {
2331                operationToEnd->setNormalizedTime(1.f);
2332            }
2333            return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2334        });
2335
2336        if (mState == STATE_ACTIVE) {
2337            result = mAudioTrack->start();
2338        }
2339        // server resets to zero so we offset
2340        mFramesWrittenServerOffset =
2341                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2342        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2343    }
2344    if (result != NO_ERROR) {
2345        ALOGW("restoreTrack_l() failed status %d", result);
2346        mState = STATE_STOPPED;
2347        mReleased = 0;
2348    }
2349
2350    return result;
2351}
2352
2353Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2354{
2355    // This is the sole place to read server consumed frames
2356    Modulo<uint32_t> newServer(mProxy->getPosition());
2357    const int32_t delta = (newServer - mServer).signedValue();
2358    // TODO There is controversy about whether there can be "negative jitter" in server position.
2359    //      This should be investigated further, and if possible, it should be addressed.
2360    //      A more definite failure mode is infrequent polling by client.
2361    //      One could call (void)getPosition_l() in releaseBuffer(),
2362    //      so mReleased and mPosition are always lock-step as best possible.
2363    //      That should ensure delta never goes negative for infrequent polling
2364    //      unless the server has more than 2^31 frames in its buffer,
2365    //      in which case the use of uint32_t for these counters has bigger issues.
2366    ALOGE_IF(delta < 0,
2367            "detected illegal retrograde motion by the server: mServer advanced by %d",
2368            delta);
2369    mServer = newServer;
2370    if (delta > 0) { // avoid retrograde
2371        mPosition += delta;
2372    }
2373    return mPosition;
2374}
2375
2376bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2377{
2378    updateLatency_l();
2379    // applicable for mixing tracks only (not offloaded or direct)
2380    if (mStaticProxy != 0) {
2381        return true; // static tracks do not have issues with buffer sizing.
2382    }
2383    const size_t minFrameCount =
2384            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2385                /*, 0 mNotificationsPerBufferReq*/);
2386    const bool allowed = mFrameCount >= minFrameCount;
2387    ALOGD_IF(!allowed,
2388            "isSampleRateSpeedAllowed_l denied "
2389            "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2390            "mFrameCount:%zu < minFrameCount:%zu",
2391            mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2392            mFrameCount, minFrameCount);
2393    return allowed;
2394}
2395
2396status_t AudioTrack::setParameters(const String8& keyValuePairs)
2397{
2398    AutoMutex lock(mLock);
2399    return mAudioTrack->setParameters(keyValuePairs);
2400}
2401
2402VolumeShaper::Status AudioTrack::applyVolumeShaper(
2403        const sp<VolumeShaper::Configuration>& configuration,
2404        const sp<VolumeShaper::Operation>& operation)
2405{
2406    AutoMutex lock(mLock);
2407    mVolumeHandler->setIdIfNecessary(configuration);
2408    VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2409
2410    if (status == DEAD_OBJECT) {
2411        if (restoreTrack_l("applyVolumeShaper") == OK) {
2412            status = mAudioTrack->applyVolumeShaper(configuration, operation);
2413        }
2414    }
2415    if (status >= 0) {
2416        // save VolumeShaper for restore
2417        mVolumeHandler->applyVolumeShaper(configuration, operation);
2418        if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2419            mVolumeHandler->setStarted();
2420        }
2421    } else {
2422        // warn only if not an expected restore failure.
2423        ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2424                "applyVolumeShaper failed: %d", status);
2425    }
2426    return status;
2427}
2428
2429sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2430{
2431    AutoMutex lock(mLock);
2432    sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2433    if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2434        if (restoreTrack_l("getVolumeShaperState") == OK) {
2435            state = mAudioTrack->getVolumeShaperState(id);
2436        }
2437    }
2438    return state;
2439}
2440
2441status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2442{
2443    if (timestamp == nullptr) {
2444        return BAD_VALUE;
2445    }
2446    AutoMutex lock(mLock);
2447    return getTimestamp_l(timestamp);
2448}
2449
2450status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2451{
2452    if (mCblk->mFlags & CBLK_INVALID) {
2453        const status_t status = restoreTrack_l("getTimestampExtended");
2454        if (status != OK) {
2455            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2456            // recommending that the track be recreated.
2457            return DEAD_OBJECT;
2458        }
2459    }
2460    // check for offloaded/direct here in case restoring somehow changed those flags.
2461    if (isOffloadedOrDirect_l()) {
2462        return INVALID_OPERATION; // not supported
2463    }
2464    status_t status = mProxy->getTimestamp(timestamp);
2465    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2466    bool found = false;
2467    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2468    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2469    // server side frame offset in case AudioTrack has been restored.
2470    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2471            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2472        if (timestamp->mTimeNs[i] >= 0) {
2473            // apply server offset (frames flushed is ignored
2474            // so we don't report the jump when the flush occurs).
2475            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2476            found = true;
2477        }
2478    }
2479    return found ? OK : WOULD_BLOCK;
2480}
2481
2482status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2483{
2484    AutoMutex lock(mLock);
2485    return getTimestamp_l(timestamp);
2486}
2487
2488status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2489{
2490    bool previousTimestampValid = mPreviousTimestampValid;
2491    // Set false here to cover all the error return cases.
2492    mPreviousTimestampValid = false;
2493
2494    switch (mState) {
2495    case STATE_ACTIVE:
2496    case STATE_PAUSED:
2497        break; // handle below
2498    case STATE_FLUSHED:
2499    case STATE_STOPPED:
2500        return WOULD_BLOCK;
2501    case STATE_STOPPING:
2502    case STATE_PAUSED_STOPPING:
2503        if (!isOffloaded_l()) {
2504            return INVALID_OPERATION;
2505        }
2506        break; // offloaded tracks handled below
2507    default:
2508        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2509        break;
2510    }
2511
2512    if (mCblk->mFlags & CBLK_INVALID) {
2513        const status_t status = restoreTrack_l("getTimestamp");
2514        if (status != OK) {
2515            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2516            // recommending that the track be recreated.
2517            return DEAD_OBJECT;
2518        }
2519    }
2520
2521    // The presented frame count must always lag behind the consumed frame count.
2522    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2523
2524    status_t status;
2525    if (isOffloadedOrDirect_l()) {
2526        // use Binder to get timestamp
2527        status = mAudioTrack->getTimestamp(timestamp);
2528    } else {
2529        // read timestamp from shared memory
2530        ExtendedTimestamp ets;
2531        status = mProxy->getTimestamp(&ets);
2532        if (status == OK) {
2533            ExtendedTimestamp::Location location;
2534            status = ets.getBestTimestamp(&timestamp, &location);
2535
2536            if (status == OK) {
2537                updateLatency_l();
2538                // It is possible that the best location has moved from the kernel to the server.
2539                // In this case we adjust the position from the previous computed latency.
2540                if (location == ExtendedTimestamp::LOCATION_SERVER) {
2541                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2542                            "getTimestamp() location moved from kernel to server");
2543                    // check that the last kernel OK time info exists and the positions
2544                    // are valid (if they predate the current track, the positions may
2545                    // be zero or negative).
2546                    const int64_t frames =
2547                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2548                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2549                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2550                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2551                            ?
2552                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2553                                    / 1000)
2554                            :
2555                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2556                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2557                    ALOGV("frame adjustment:%lld  timestamp:%s",
2558                            (long long)frames, ets.toString().c_str());
2559                    if (frames >= ets.mPosition[location]) {
2560                        timestamp.mPosition = 0;
2561                    } else {
2562                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2563                    }
2564                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2565                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2566                            "getTimestamp() location moved from server to kernel");
2567                }
2568
2569                // We update the timestamp time even when paused.
2570                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2571                    const int64_t now = systemTime();
2572                    const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2573                    const int64_t lag =
2574                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2575                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2576                            ? int64_t(mAfLatency * 1000000LL)
2577                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2578                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2579                             * NANOS_PER_SECOND / mSampleRate;
2580                    const int64_t limit = now - lag; // no earlier than this limit
2581                    if (at < limit) {
2582                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2583                                (long long)lag, (long long)at, (long long)limit);
2584                        timestamp.mTime = convertNsToTimespec(limit);
2585                    }
2586                }
2587                mPreviousLocation = location;
2588            } else {
2589                // right after AudioTrack is started, one may not find a timestamp
2590                ALOGV("getBestTimestamp did not find timestamp");
2591            }
2592        }
2593        if (status == INVALID_OPERATION) {
2594            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2595            // other failures are signaled by a negative time.
2596            // If we come out of FLUSHED or STOPPED where the position is known
2597            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2598            // "zero" for NuPlayer).  We don't convert for track restoration as position
2599            // does not reset.
2600            ALOGV("timestamp server offset:%lld restore frames:%lld",
2601                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2602            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2603                status = WOULD_BLOCK;
2604            }
2605        }
2606    }
2607    if (status != NO_ERROR) {
2608        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2609        return status;
2610    }
2611    if (isOffloadedOrDirect_l()) {
2612        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2613            // use cached paused position in case another offloaded track is running.
2614            timestamp.mPosition = mPausedPosition;
2615            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2616            // TODO: adjust for delay
2617            return NO_ERROR;
2618        }
2619
2620        // Check whether a pending flush or stop has completed, as those commands may
2621        // be asynchronous or return near finish or exhibit glitchy behavior.
2622        //
2623        // Originally this showed up as the first timestamp being a continuation of
2624        // the previous song under gapless playback.
2625        // However, we sometimes see zero timestamps, then a glitch of
2626        // the previous song's position, and then correct timestamps afterwards.
2627        if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2628            static const int kTimeJitterUs = 100000; // 100 ms
2629            static const int k1SecUs = 1000000;
2630
2631            const int64_t timeNow = getNowUs();
2632
2633            if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2634                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2635                if (timestampTimeUs < mStartFromZeroUs) {
2636                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2637                }
2638                const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2639                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2640                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2641
2642                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2643                    // Verify that the counter can't count faster than the sample rate
2644                    // since the start time.  If greater, then that means we may have failed
2645                    // to completely flush or stop the previous playing track.
2646                    ALOGW_IF(!mTimestampStartupGlitchReported,
2647                            "getTimestamp startup glitch detected"
2648                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2649                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2650                            timestamp.mPosition);
2651                    mTimestampStartupGlitchReported = true;
2652                    if (previousTimestampValid
2653                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2654                        timestamp = mPreviousTimestamp;
2655                        mPreviousTimestampValid = true;
2656                        return NO_ERROR;
2657                    }
2658                    return WOULD_BLOCK;
2659                }
2660                if (deltaPositionByUs != 0) {
2661                    mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2662                }
2663            } else {
2664                mStartFromZeroUs = 0; // don't check again, start time expired.
2665            }
2666            mTimestampStartupGlitchReported = false;
2667        }
2668    } else {
2669        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2670        (void) updateAndGetPosition_l();
2671        // Server consumed (mServer) and presented both use the same server time base,
2672        // and server consumed is always >= presented.
2673        // The delta between these represents the number of frames in the buffer pipeline.
2674        // If this delta between these is greater than the client position, it means that
2675        // actually presented is still stuck at the starting line (figuratively speaking),
2676        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2677        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2678        // mPosition exceeds 32 bits.
2679        // TODO Remove when timestamp is updated to contain pipeline status info.
2680        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2681        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2682                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2683            return INVALID_OPERATION;
2684        }
2685        // Convert timestamp position from server time base to client time base.
2686        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2687        // But if we change it to 64-bit then this could fail.
2688        // Use Modulo computation here.
2689        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2690        // Immediately after a call to getPosition_l(), mPosition and
2691        // mServer both represent the same frame position.  mPosition is
2692        // in client's point of view, and mServer is in server's point of
2693        // view.  So the difference between them is the "fudge factor"
2694        // between client and server views due to stop() and/or new
2695        // IAudioTrack.  And timestamp.mPosition is initially in server's
2696        // point of view, so we need to apply the same fudge factor to it.
2697    }
2698
2699    // Prevent retrograde motion in timestamp.
2700    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2701    if (status == NO_ERROR) {
2702        // previousTimestampValid is set to false when starting after a stop or flush.
2703        if (previousTimestampValid) {
2704            const int64_t previousTimeNanos =
2705                    audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2706            int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2707
2708            // Fix stale time when checking timestamp right after start().
2709            //
2710            // For offload compatibility, use a default lag value here.
2711            // Any time discrepancy between this update and the pause timestamp is handled
2712            // by the retrograde check afterwards.
2713            const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2714            const int64_t limitNs = mStartNs - lagNs;
2715            if (currentTimeNanos < limitNs) {
2716                ALOGD("correcting timestamp time for pause, "
2717                        "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2718                        (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2719                timestamp.mTime = convertNsToTimespec(limitNs);
2720                currentTimeNanos = limitNs;
2721            }
2722
2723            // retrograde check
2724            if (currentTimeNanos < previousTimeNanos) {
2725                ALOGW("retrograde timestamp time corrected, %lld < %lld",
2726                        (long long)currentTimeNanos, (long long)previousTimeNanos);
2727                timestamp.mTime = mPreviousTimestamp.mTime;
2728                // currentTimeNanos not used below.
2729            }
2730
2731            // Looking at signed delta will work even when the timestamps
2732            // are wrapping around.
2733            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2734                    - mPreviousTimestamp.mPosition).signedValue();
2735            if (deltaPosition < 0) {
2736                // Only report once per position instead of spamming the log.
2737                if (!mRetrogradeMotionReported) {
2738                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2739                            deltaPosition,
2740                            timestamp.mPosition,
2741                            mPreviousTimestamp.mPosition);
2742                    mRetrogradeMotionReported = true;
2743                }
2744            } else {
2745                mRetrogradeMotionReported = false;
2746            }
2747            if (deltaPosition < 0) {
2748                timestamp.mPosition = mPreviousTimestamp.mPosition;
2749                deltaPosition = 0;
2750            }
2751#if 0
2752            // Uncomment this to verify audio timestamp rate.
2753            const int64_t deltaTime =
2754                    audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2755            if (deltaTime != 0) {
2756                const int64_t computedSampleRate =
2757                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2758                ALOGD("computedSampleRate:%u  sampleRate:%u",
2759                        (unsigned)computedSampleRate, mSampleRate);
2760            }
2761#endif
2762        }
2763        mPreviousTimestamp = timestamp;
2764        mPreviousTimestampValid = true;
2765    }
2766
2767    return status;
2768}
2769
2770String8 AudioTrack::getParameters(const String8& keys)
2771{
2772    audio_io_handle_t output = getOutput();
2773    if (output != AUDIO_IO_HANDLE_NONE) {
2774        return AudioSystem::getParameters(output, keys);
2775    } else {
2776        return String8::empty();
2777    }
2778}
2779
2780bool AudioTrack::isOffloaded() const
2781{
2782    AutoMutex lock(mLock);
2783    return isOffloaded_l();
2784}
2785
2786bool AudioTrack::isDirect() const
2787{
2788    AutoMutex lock(mLock);
2789    return isDirect_l();
2790}
2791
2792bool AudioTrack::isOffloadedOrDirect() const
2793{
2794    AutoMutex lock(mLock);
2795    return isOffloadedOrDirect_l();
2796}
2797
2798
2799status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2800{
2801
2802    const size_t SIZE = 256;
2803    char buffer[SIZE];
2804    String8 result;
2805
2806    result.append(" AudioTrack::dump\n");
2807    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2808            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2809    result.append(buffer);
2810    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2811            mChannelCount, mFrameCount);
2812    result.append(buffer);
2813    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2814            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2815    result.append(buffer);
2816    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2817    result.append(buffer);
2818    ::write(fd, result.string(), result.size());
2819    return NO_ERROR;
2820}
2821
2822uint32_t AudioTrack::getUnderrunCount() const
2823{
2824    AutoMutex lock(mLock);
2825    return getUnderrunCount_l();
2826}
2827
2828uint32_t AudioTrack::getUnderrunCount_l() const
2829{
2830    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2831}
2832
2833uint32_t AudioTrack::getUnderrunFrames() const
2834{
2835    AutoMutex lock(mLock);
2836    return mProxy->getUnderrunFrames();
2837}
2838
2839status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2840{
2841    if (callback == 0) {
2842        ALOGW("%s adding NULL callback!", __FUNCTION__);
2843        return BAD_VALUE;
2844    }
2845    AutoMutex lock(mLock);
2846    if (mDeviceCallback == callback) {
2847        ALOGW("%s adding same callback!", __FUNCTION__);
2848        return INVALID_OPERATION;
2849    }
2850    status_t status = NO_ERROR;
2851    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2852        if (mDeviceCallback != 0) {
2853            ALOGW("%s callback already present!", __FUNCTION__);
2854            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2855        }
2856        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2857    }
2858    mDeviceCallback = callback;
2859    return status;
2860}
2861
2862status_t AudioTrack::removeAudioDeviceCallback(
2863        const sp<AudioSystem::AudioDeviceCallback>& callback)
2864{
2865    if (callback == 0) {
2866        ALOGW("%s removing NULL callback!", __FUNCTION__);
2867        return BAD_VALUE;
2868    }
2869    AutoMutex lock(mLock);
2870    if (mDeviceCallback != callback) {
2871        ALOGW("%s removing different callback!", __FUNCTION__);
2872        return INVALID_OPERATION;
2873    }
2874    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2875        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2876    }
2877    mDeviceCallback = 0;
2878    return NO_ERROR;
2879}
2880
2881status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2882{
2883    if (msec == nullptr ||
2884            (location != ExtendedTimestamp::LOCATION_SERVER
2885                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2886        return BAD_VALUE;
2887    }
2888    AutoMutex lock(mLock);
2889    // inclusive of offloaded and direct tracks.
2890    //
2891    // It is possible, but not enabled, to allow duration computation for non-pcm
2892    // audio_has_proportional_frames() formats because currently they have
2893    // the drain rate equivalent to the pcm sample rate * framesize.
2894    if (!isPurePcmData_l()) {
2895        return INVALID_OPERATION;
2896    }
2897    ExtendedTimestamp ets;
2898    if (getTimestamp_l(&ets) == OK
2899            && ets.mTimeNs[location] > 0) {
2900        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2901                - ets.mPosition[location];
2902        if (diff < 0) {
2903            *msec = 0;
2904        } else {
2905            // ms is the playback time by frames
2906            int64_t ms = (int64_t)((double)diff * 1000 /
2907                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2908            // clockdiff is the timestamp age (negative)
2909            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2910                    ets.mTimeNs[location]
2911                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2912                    - systemTime(SYSTEM_TIME_MONOTONIC);
2913
2914            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2915            static const int NANOS_PER_MILLIS = 1000000;
2916            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2917        }
2918        return NO_ERROR;
2919    }
2920    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2921        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2922    }
2923    // use server position directly (offloaded and direct arrive here)
2924    updateAndGetPosition_l();
2925    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2926    *msec = (diff <= 0) ? 0
2927            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2928    return NO_ERROR;
2929}
2930
2931bool AudioTrack::hasStarted()
2932{
2933    AutoMutex lock(mLock);
2934    switch (mState) {
2935    case STATE_STOPPED:
2936        if (isOffloadedOrDirect_l()) {
2937            // check if we have started in the past to return true.
2938            return mStartFromZeroUs > 0;
2939        }
2940        // A normal audio track may still be draining, so
2941        // check if stream has ended.  This covers fasttrack position
2942        // instability and start/stop without any data written.
2943        if (mProxy->getStreamEndDone()) {
2944            return true;
2945        }
2946        // fall through
2947    case STATE_ACTIVE:
2948    case STATE_STOPPING:
2949        break;
2950    case STATE_PAUSED:
2951    case STATE_PAUSED_STOPPING:
2952    case STATE_FLUSHED:
2953        return false;  // we're not active
2954    default:
2955        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2956        break;
2957    }
2958
2959    // wait indicates whether we need to wait for a timestamp.
2960    // This is conservatively figured - if we encounter an unexpected error
2961    // then we will not wait.
2962    bool wait = false;
2963    if (isOffloadedOrDirect_l()) {
2964        AudioTimestamp ts;
2965        status_t status = getTimestamp_l(ts);
2966        if (status == WOULD_BLOCK) {
2967            wait = true;
2968        } else if (status == OK) {
2969            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2970        }
2971        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
2972                (int)wait,
2973                ts.mPosition,
2974                (long long)mStartTs.mPosition);
2975    } else {
2976        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2977        ExtendedTimestamp ets;
2978        status_t status = getTimestamp_l(&ets);
2979        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
2980            wait = true;
2981        } else if (status == OK) {
2982            for (location = ExtendedTimestamp::LOCATION_KERNEL;
2983                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2984                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2985                    continue;
2986                }
2987                wait = ets.mPosition[location] == 0
2988                        || ets.mPosition[location] == mStartEts.mPosition[location];
2989                break;
2990            }
2991        }
2992        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
2993                (int)wait,
2994                (long long)ets.mPosition[location],
2995                (long long)mStartEts.mPosition[location]);
2996    }
2997    return !wait;
2998}
2999
3000// =========================================================================
3001
3002void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3003{
3004    sp<AudioTrack> audioTrack = mAudioTrack.promote();
3005    if (audioTrack != 0) {
3006        AutoMutex lock(audioTrack->mLock);
3007        audioTrack->mProxy->binderDied();
3008    }
3009}
3010
3011// =========================================================================
3012
3013AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
3014    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3015      mIgnoreNextPausedInt(false)
3016{
3017}
3018
3019AudioTrack::AudioTrackThread::~AudioTrackThread()
3020{
3021}
3022
3023bool AudioTrack::AudioTrackThread::threadLoop()
3024{
3025    {
3026        AutoMutex _l(mMyLock);
3027        if (mPaused) {
3028            // TODO check return value and handle or log
3029            mMyCond.wait(mMyLock);
3030            // caller will check for exitPending()
3031            return true;
3032        }
3033        if (mIgnoreNextPausedInt) {
3034            mIgnoreNextPausedInt = false;
3035            mPausedInt = false;
3036        }
3037        if (mPausedInt) {
3038            // TODO use futex instead of condition, for event flag "or"
3039            if (mPausedNs > 0) {
3040                // TODO check return value and handle or log
3041                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3042            } else {
3043                // TODO check return value and handle or log
3044                mMyCond.wait(mMyLock);
3045            }
3046            mPausedInt = false;
3047            return true;
3048        }
3049    }
3050    if (exitPending()) {
3051        return false;
3052    }
3053    nsecs_t ns = mReceiver.processAudioBuffer();
3054    switch (ns) {
3055    case 0:
3056        return true;
3057    case NS_INACTIVE:
3058        pauseInternal();
3059        return true;
3060    case NS_NEVER:
3061        return false;
3062    case NS_WHENEVER:
3063        // Event driven: call wake() when callback notifications conditions change.
3064        ns = INT64_MAX;
3065        // fall through
3066    default:
3067        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3068        pauseInternal(ns);
3069        return true;
3070    }
3071}
3072
3073void AudioTrack::AudioTrackThread::requestExit()
3074{
3075    // must be in this order to avoid a race condition
3076    Thread::requestExit();
3077    resume();
3078}
3079
3080void AudioTrack::AudioTrackThread::pause()
3081{
3082    AutoMutex _l(mMyLock);
3083    mPaused = true;
3084}
3085
3086void AudioTrack::AudioTrackThread::resume()
3087{
3088    AutoMutex _l(mMyLock);
3089    mIgnoreNextPausedInt = true;
3090    if (mPaused || mPausedInt) {
3091        mPaused = false;
3092        mPausedInt = false;
3093        mMyCond.signal();
3094    }
3095}
3096
3097void AudioTrack::AudioTrackThread::wake()
3098{
3099    AutoMutex _l(mMyLock);
3100    if (!mPaused) {
3101        // wake() might be called while servicing a callback - ignore the next
3102        // pause time and call processAudioBuffer.
3103        mIgnoreNextPausedInt = true;
3104        if (mPausedInt && mPausedNs > 0) {
3105            // audio track is active and internally paused with timeout.
3106            mPausedInt = false;
3107            mMyCond.signal();
3108        }
3109    }
3110}
3111
3112void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3113{
3114    AutoMutex _l(mMyLock);
3115    mPausedInt = true;
3116    mPausedNs = ns;
3117}
3118
3119} // namespace android
3120