AudioTrack.cpp revision 13969262f704ca27c82d60e3c7cf4b271e3b5918
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/clock.h>
26#include <audio_utils/primitives.h>
27#include <binder/IPCThreadState.h>
28#include <media/AudioTrack.h>
29#include <utils/Log.h>
30#include <private/media/AudioTrackShared.h>
31#include <media/IAudioFlinger.h>
32#include <media/AudioPolicyHelper.h>
33#include <media/AudioResamplerPublic.h>
34
35#define WAIT_PERIOD_MS                  10
36#define WAIT_STREAM_END_TIMEOUT_SEC     120
37static const int kMaxLoopCountNotifications = 32;
38
39namespace android {
40// ---------------------------------------------------------------------------
41
42// TODO: Move to a separate .h
43
44template <typename T>
45static inline const T &min(const T &x, const T &y) {
46    return x < y ? x : y;
47}
48
49template <typename T>
50static inline const T &max(const T &x, const T &y) {
51    return x > y ? x : y;
52}
53
54static const int32_t NANOS_PER_SECOND = 1000000000;
55
56static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
57{
58    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
59}
60
61static int64_t convertTimespecToUs(const struct timespec &tv)
62{
63    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
64}
65
66// TODO move to audio_utils.
67static inline struct timespec convertNsToTimespec(int64_t ns) {
68    struct timespec tv;
69    tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
70    tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
71    return tv;
72}
73
74// current monotonic time in microseconds.
75static int64_t getNowUs()
76{
77    struct timespec tv;
78    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
79    return convertTimespecToUs(tv);
80}
81
82// FIXME: we don't use the pitch setting in the time stretcher (not working);
83// instead we emulate it using our sample rate converter.
84static const bool kFixPitch = true; // enable pitch fix
85static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
86{
87    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
88}
89
90static inline float adjustSpeed(float speed, float pitch)
91{
92    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
93}
94
95static inline float adjustPitch(float pitch)
96{
97    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
98}
99
100// Must match similar computation in createTrack_l in Threads.cpp.
101// TODO: Move to a common library
102static size_t calculateMinFrameCount(
103        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
104        uint32_t sampleRate, float speed /*, uint32_t notificationsPerBufferReq*/)
105{
106    // Ensure that buffer depth covers at least audio hardware latency
107    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
108    if (minBufCount < 2) {
109        minBufCount = 2;
110    }
111#if 0
112    // The notificationsPerBufferReq parameter is not yet used for non-fast tracks,
113    // but keeping the code here to make it easier to add later.
114    if (minBufCount < notificationsPerBufferReq) {
115        minBufCount = notificationsPerBufferReq;
116    }
117#endif
118    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
119            "sampleRate %u  speed %f  minBufCount: %u" /*"  notificationsPerBufferReq %u"*/,
120            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount
121            /*, notificationsPerBufferReq*/);
122    return minBufCount * sourceFramesNeededWithTimestretch(
123            sampleRate, afFrameCount, afSampleRate, speed);
124}
125
126// static
127status_t AudioTrack::getMinFrameCount(
128        size_t* frameCount,
129        audio_stream_type_t streamType,
130        uint32_t sampleRate)
131{
132    if (frameCount == NULL) {
133        return BAD_VALUE;
134    }
135
136    // FIXME handle in server, like createTrack_l(), possible missing info:
137    //          audio_io_handle_t output
138    //          audio_format_t format
139    //          audio_channel_mask_t channelMask
140    //          audio_output_flags_t flags (FAST)
141    uint32_t afSampleRate;
142    status_t status;
143    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
144    if (status != NO_ERROR) {
145        ALOGE("Unable to query output sample rate for stream type %d; status %d",
146                streamType, status);
147        return status;
148    }
149    size_t afFrameCount;
150    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
151    if (status != NO_ERROR) {
152        ALOGE("Unable to query output frame count for stream type %d; status %d",
153                streamType, status);
154        return status;
155    }
156    uint32_t afLatency;
157    status = AudioSystem::getOutputLatency(&afLatency, streamType);
158    if (status != NO_ERROR) {
159        ALOGE("Unable to query output latency for stream type %d; status %d",
160                streamType, status);
161        return status;
162    }
163
164    // When called from createTrack, speed is 1.0f (normal speed).
165    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
166    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f
167            /*, 0 notificationsPerBufferReq*/);
168
169    // The formula above should always produce a non-zero value under normal circumstances:
170    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
171    // Return error in the unlikely event that it does not, as that's part of the API contract.
172    if (*frameCount == 0) {
173        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
174                streamType, sampleRate);
175        return BAD_VALUE;
176    }
177    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
178            *frameCount, afFrameCount, afSampleRate, afLatency);
179    return NO_ERROR;
180}
181
182// ---------------------------------------------------------------------------
183
184AudioTrack::AudioTrack()
185    : mStatus(NO_INIT),
186      mState(STATE_STOPPED),
187      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
188      mPreviousSchedulingGroup(SP_DEFAULT),
189      mPausedPosition(0),
190      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
191      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE),
192      mPortId(AUDIO_PORT_HANDLE_NONE)
193{
194    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
195    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
196    mAttributes.flags = 0x0;
197    strcpy(mAttributes.tags, "");
198}
199
200AudioTrack::AudioTrack(
201        audio_stream_type_t streamType,
202        uint32_t sampleRate,
203        audio_format_t format,
204        audio_channel_mask_t channelMask,
205        size_t frameCount,
206        audio_output_flags_t flags,
207        callback_t cbf,
208        void* user,
209        int32_t notificationFrames,
210        audio_session_t sessionId,
211        transfer_type transferType,
212        const audio_offload_info_t *offloadInfo,
213        uid_t uid,
214        pid_t pid,
215        const audio_attributes_t* pAttributes,
216        bool doNotReconnect,
217        float maxRequiredSpeed)
218    : mStatus(NO_INIT),
219      mState(STATE_STOPPED),
220      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
221      mPreviousSchedulingGroup(SP_DEFAULT),
222      mPausedPosition(0),
223      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
224      mPortId(AUDIO_PORT_HANDLE_NONE)
225{
226    mStatus = set(streamType, sampleRate, format, channelMask,
227            frameCount, flags, cbf, user, notificationFrames,
228            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
229            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
230}
231
232AudioTrack::AudioTrack(
233        audio_stream_type_t streamType,
234        uint32_t sampleRate,
235        audio_format_t format,
236        audio_channel_mask_t channelMask,
237        const sp<IMemory>& sharedBuffer,
238        audio_output_flags_t flags,
239        callback_t cbf,
240        void* user,
241        int32_t notificationFrames,
242        audio_session_t sessionId,
243        transfer_type transferType,
244        const audio_offload_info_t *offloadInfo,
245        uid_t uid,
246        pid_t pid,
247        const audio_attributes_t* pAttributes,
248        bool doNotReconnect,
249        float maxRequiredSpeed)
250    : mStatus(NO_INIT),
251      mState(STATE_STOPPED),
252      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
253      mPreviousSchedulingGroup(SP_DEFAULT),
254      mPausedPosition(0),
255      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
256      mPortId(AUDIO_PORT_HANDLE_NONE)
257{
258    mStatus = set(streamType, sampleRate, format, channelMask,
259            0 /*frameCount*/, flags, cbf, user, notificationFrames,
260            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
261            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
262}
263
264AudioTrack::~AudioTrack()
265{
266    if (mStatus == NO_ERROR) {
267        // Make sure that callback function exits in the case where
268        // it is looping on buffer full condition in obtainBuffer().
269        // Otherwise the callback thread will never exit.
270        stop();
271        if (mAudioTrackThread != 0) {
272            mProxy->interrupt();
273            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
274            mAudioTrackThread->requestExitAndWait();
275            mAudioTrackThread.clear();
276        }
277        // No lock here: worst case we remove a NULL callback which will be a nop
278        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
279            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
280        }
281        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
282        mAudioTrack.clear();
283        mCblkMemory.clear();
284        mSharedBuffer.clear();
285        IPCThreadState::self()->flushCommands();
286        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
287                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
288        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
289    }
290}
291
292status_t AudioTrack::set(
293        audio_stream_type_t streamType,
294        uint32_t sampleRate,
295        audio_format_t format,
296        audio_channel_mask_t channelMask,
297        size_t frameCount,
298        audio_output_flags_t flags,
299        callback_t cbf,
300        void* user,
301        int32_t notificationFrames,
302        const sp<IMemory>& sharedBuffer,
303        bool threadCanCallJava,
304        audio_session_t sessionId,
305        transfer_type transferType,
306        const audio_offload_info_t *offloadInfo,
307        uid_t uid,
308        pid_t pid,
309        const audio_attributes_t* pAttributes,
310        bool doNotReconnect,
311        float maxRequiredSpeed)
312{
313    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
314          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
315          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
316          sessionId, transferType, uid, pid);
317
318    mThreadCanCallJava = threadCanCallJava;
319
320    switch (transferType) {
321    case TRANSFER_DEFAULT:
322        if (sharedBuffer != 0) {
323            transferType = TRANSFER_SHARED;
324        } else if (cbf == NULL || threadCanCallJava) {
325            transferType = TRANSFER_SYNC;
326        } else {
327            transferType = TRANSFER_CALLBACK;
328        }
329        break;
330    case TRANSFER_CALLBACK:
331        if (cbf == NULL || sharedBuffer != 0) {
332            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
333            return BAD_VALUE;
334        }
335        break;
336    case TRANSFER_OBTAIN:
337    case TRANSFER_SYNC:
338        if (sharedBuffer != 0) {
339            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
340            return BAD_VALUE;
341        }
342        break;
343    case TRANSFER_SHARED:
344        if (sharedBuffer == 0) {
345            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
346            return BAD_VALUE;
347        }
348        break;
349    default:
350        ALOGE("Invalid transfer type %d", transferType);
351        return BAD_VALUE;
352    }
353    mSharedBuffer = sharedBuffer;
354    mTransfer = transferType;
355    mDoNotReconnect = doNotReconnect;
356
357    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
358            sharedBuffer->size());
359
360    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
361
362    // invariant that mAudioTrack != 0 is true only after set() returns successfully
363    if (mAudioTrack != 0) {
364        ALOGE("Track already in use");
365        return INVALID_OPERATION;
366    }
367
368    // handle default values first.
369    if (streamType == AUDIO_STREAM_DEFAULT) {
370        streamType = AUDIO_STREAM_MUSIC;
371    }
372    if (pAttributes == NULL) {
373        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
374            ALOGE("Invalid stream type %d", streamType);
375            return BAD_VALUE;
376        }
377        mStreamType = streamType;
378
379    } else {
380        // stream type shouldn't be looked at, this track has audio attributes
381        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
382        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
383                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
384        mStreamType = AUDIO_STREAM_DEFAULT;
385        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
386            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
387        }
388        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
389            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
390        }
391        // check deep buffer after flags have been modified above
392        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
393            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
394        }
395    }
396
397    // these below should probably come from the audioFlinger too...
398    if (format == AUDIO_FORMAT_DEFAULT) {
399        format = AUDIO_FORMAT_PCM_16_BIT;
400    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
401        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
402    }
403
404    // validate parameters
405    if (!audio_is_valid_format(format)) {
406        ALOGE("Invalid format %#x", format);
407        return BAD_VALUE;
408    }
409    mFormat = format;
410
411    if (!audio_is_output_channel(channelMask)) {
412        ALOGE("Invalid channel mask %#x", channelMask);
413        return BAD_VALUE;
414    }
415    mChannelMask = channelMask;
416    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
417    mChannelCount = channelCount;
418
419    // force direct flag if format is not linear PCM
420    // or offload was requested
421    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
422            || !audio_is_linear_pcm(format)) {
423        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
424                    ? "Offload request, forcing to Direct Output"
425                    : "Not linear PCM, forcing to Direct Output");
426        flags = (audio_output_flags_t)
427                // FIXME why can't we allow direct AND fast?
428                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
429    }
430
431    // force direct flag if HW A/V sync requested
432    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
433        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
434    }
435
436    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
437        if (audio_has_proportional_frames(format)) {
438            mFrameSize = channelCount * audio_bytes_per_sample(format);
439        } else {
440            mFrameSize = sizeof(uint8_t);
441        }
442    } else {
443        ALOG_ASSERT(audio_has_proportional_frames(format));
444        mFrameSize = channelCount * audio_bytes_per_sample(format);
445        // createTrack will return an error if PCM format is not supported by server,
446        // so no need to check for specific PCM formats here
447    }
448
449    // sampling rate must be specified for direct outputs
450    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
451        return BAD_VALUE;
452    }
453    mSampleRate = sampleRate;
454    mOriginalSampleRate = sampleRate;
455    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
456    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
457    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
458
459    // Make copy of input parameter offloadInfo so that in the future:
460    //  (a) createTrack_l doesn't need it as an input parameter
461    //  (b) we can support re-creation of offloaded tracks
462    if (offloadInfo != NULL) {
463        mOffloadInfoCopy = *offloadInfo;
464        mOffloadInfo = &mOffloadInfoCopy;
465    } else {
466        mOffloadInfo = NULL;
467        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
468    }
469
470    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
471    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
472    mSendLevel = 0.0f;
473    // mFrameCount is initialized in createTrack_l
474    mReqFrameCount = frameCount;
475    if (notificationFrames >= 0) {
476        mNotificationFramesReq = notificationFrames;
477        mNotificationsPerBufferReq = 0;
478    } else {
479        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
480            ALOGE("notificationFrames=%d not permitted for non-fast track",
481                    notificationFrames);
482            return BAD_VALUE;
483        }
484        if (frameCount > 0) {
485            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
486                    notificationFrames, frameCount);
487            return BAD_VALUE;
488        }
489        mNotificationFramesReq = 0;
490        const uint32_t minNotificationsPerBuffer = 1;
491        const uint32_t maxNotificationsPerBuffer = 8;
492        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
493                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
494        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
495                "notificationFrames=%d clamped to the range -%u to -%u",
496                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
497    }
498    mNotificationFramesAct = 0;
499    if (sessionId == AUDIO_SESSION_ALLOCATE) {
500        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
501    } else {
502        mSessionId = sessionId;
503    }
504    int callingpid = IPCThreadState::self()->getCallingPid();
505    int mypid = getpid();
506    if (uid == AUDIO_UID_INVALID || (callingpid != mypid)) {
507        mClientUid = IPCThreadState::self()->getCallingUid();
508    } else {
509        mClientUid = uid;
510    }
511    if (pid == -1 || (callingpid != mypid)) {
512        mClientPid = callingpid;
513    } else {
514        mClientPid = pid;
515    }
516    mAuxEffectId = 0;
517    mOrigFlags = mFlags = flags;
518    mCbf = cbf;
519
520    if (cbf != NULL) {
521        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
522        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
523        // thread begins in paused state, and will not reference us until start()
524    }
525
526    // create the IAudioTrack
527    status_t status = createTrack_l();
528
529    if (status != NO_ERROR) {
530        if (mAudioTrackThread != 0) {
531            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
532            mAudioTrackThread->requestExitAndWait();
533            mAudioTrackThread.clear();
534        }
535        return status;
536    }
537
538    mStatus = NO_ERROR;
539    mUserData = user;
540    mLoopCount = 0;
541    mLoopStart = 0;
542    mLoopEnd = 0;
543    mLoopCountNotified = 0;
544    mMarkerPosition = 0;
545    mMarkerReached = false;
546    mNewPosition = 0;
547    mUpdatePeriod = 0;
548    mPosition = 0;
549    mReleased = 0;
550    mStartNs = 0;
551    mStartFromZeroUs = 0;
552    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
553    mSequence = 1;
554    mObservedSequence = mSequence;
555    mInUnderrun = false;
556    mPreviousTimestampValid = false;
557    mTimestampStartupGlitchReported = false;
558    mRetrogradeMotionReported = false;
559    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
560    mStartTs.mPosition = 0;
561    mUnderrunCountOffset = 0;
562    mFramesWritten = 0;
563    mFramesWrittenServerOffset = 0;
564    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
565    mVolumeHandler = new VolumeHandler();
566    return NO_ERROR;
567}
568
569// -------------------------------------------------------------------------
570
571status_t AudioTrack::start()
572{
573    AutoMutex lock(mLock);
574
575    if (mState == STATE_ACTIVE) {
576        return INVALID_OPERATION;
577    }
578
579    mInUnderrun = true;
580
581    State previousState = mState;
582    if (previousState == STATE_PAUSED_STOPPING) {
583        mState = STATE_STOPPING;
584    } else {
585        mState = STATE_ACTIVE;
586    }
587    (void) updateAndGetPosition_l();
588
589    // save start timestamp
590    if (isOffloadedOrDirect_l()) {
591        if (getTimestamp_l(mStartTs) != OK) {
592            mStartTs.mPosition = 0;
593        }
594    } else {
595        if (getTimestamp_l(&mStartEts) != OK) {
596            mStartEts.clear();
597        }
598    }
599    mStartNs = systemTime(); // save this for timestamp adjustment after starting.
600    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
601        // reset current position as seen by client to 0
602        mPosition = 0;
603        mPreviousTimestampValid = false;
604        mTimestampStartupGlitchReported = false;
605        mRetrogradeMotionReported = false;
606        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
607
608        if (!isOffloadedOrDirect_l()
609                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
610            // Server side has consumed something, but is it finished consuming?
611            // It is possible since flush and stop are asynchronous that the server
612            // is still active at this point.
613            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
614                    (long long)(mFramesWrittenServerOffset
615                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
616                    (long long)mStartEts.mFlushed,
617                    (long long)mFramesWritten);
618            // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
619            mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
620        }
621        mFramesWritten = 0;
622        mProxy->clearTimestamp(); // need new server push for valid timestamp
623        mMarkerReached = false;
624
625        // For offloaded tracks, we don't know if the hardware counters are really zero here,
626        // since the flush is asynchronous and stop may not fully drain.
627        // We save the time when the track is started to later verify whether
628        // the counters are realistic (i.e. start from zero after this time).
629        mStartFromZeroUs = mStartNs / 1000;
630
631        // force refresh of remaining frames by processAudioBuffer() as last
632        // write before stop could be partial.
633        mRefreshRemaining = true;
634    }
635    mNewPosition = mPosition + mUpdatePeriod;
636    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
637
638    status_t status = NO_ERROR;
639    if (!(flags & CBLK_INVALID)) {
640        status = mAudioTrack->start();
641        if (status == DEAD_OBJECT) {
642            flags |= CBLK_INVALID;
643        }
644    }
645    if (flags & CBLK_INVALID) {
646        status = restoreTrack_l("start");
647    }
648
649    // resume or pause the callback thread as needed.
650    sp<AudioTrackThread> t = mAudioTrackThread;
651    if (status == NO_ERROR) {
652        if (t != 0) {
653            if (previousState == STATE_STOPPING) {
654                mProxy->interrupt();
655            } else {
656                t->resume();
657            }
658        } else {
659            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
660            get_sched_policy(0, &mPreviousSchedulingGroup);
661            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
662        }
663
664        // Start our local VolumeHandler for restoration purposes.
665        mVolumeHandler->setStarted();
666    } else {
667        ALOGE("start() status %d", status);
668        mState = previousState;
669        if (t != 0) {
670            if (previousState != STATE_STOPPING) {
671                t->pause();
672            }
673        } else {
674            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
675            set_sched_policy(0, mPreviousSchedulingGroup);
676        }
677    }
678
679    return status;
680}
681
682void AudioTrack::stop()
683{
684    AutoMutex lock(mLock);
685    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
686        return;
687    }
688
689    if (isOffloaded_l()) {
690        mState = STATE_STOPPING;
691    } else {
692        mState = STATE_STOPPED;
693        ALOGD_IF(mSharedBuffer == nullptr,
694                "stop() called with %u frames delivered", mReleased.value());
695        mReleased = 0;
696    }
697
698    mProxy->interrupt();
699    mAudioTrack->stop();
700
701    // Note: legacy handling - stop does not clear playback marker
702    // and periodic update counter, but flush does for streaming tracks.
703
704    if (mSharedBuffer != 0) {
705        // clear buffer position and loop count.
706        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
707                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
708    }
709
710    sp<AudioTrackThread> t = mAudioTrackThread;
711    if (t != 0) {
712        if (!isOffloaded_l()) {
713            t->pause();
714        }
715    } else {
716        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
717        set_sched_policy(0, mPreviousSchedulingGroup);
718    }
719}
720
721bool AudioTrack::stopped() const
722{
723    AutoMutex lock(mLock);
724    return mState != STATE_ACTIVE;
725}
726
727void AudioTrack::flush()
728{
729    if (mSharedBuffer != 0) {
730        return;
731    }
732    AutoMutex lock(mLock);
733    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
734        return;
735    }
736    flush_l();
737}
738
739void AudioTrack::flush_l()
740{
741    ALOG_ASSERT(mState != STATE_ACTIVE);
742
743    // clear playback marker and periodic update counter
744    mMarkerPosition = 0;
745    mMarkerReached = false;
746    mUpdatePeriod = 0;
747    mRefreshRemaining = true;
748
749    mState = STATE_FLUSHED;
750    mReleased = 0;
751    if (isOffloaded_l()) {
752        mProxy->interrupt();
753    }
754    mProxy->flush();
755    mAudioTrack->flush();
756}
757
758void AudioTrack::pause()
759{
760    AutoMutex lock(mLock);
761    if (mState == STATE_ACTIVE) {
762        mState = STATE_PAUSED;
763    } else if (mState == STATE_STOPPING) {
764        mState = STATE_PAUSED_STOPPING;
765    } else {
766        return;
767    }
768    mProxy->interrupt();
769    mAudioTrack->pause();
770
771    if (isOffloaded_l()) {
772        if (mOutput != AUDIO_IO_HANDLE_NONE) {
773            // An offload output can be re-used between two audio tracks having
774            // the same configuration. A timestamp query for a paused track
775            // while the other is running would return an incorrect time.
776            // To fix this, cache the playback position on a pause() and return
777            // this time when requested until the track is resumed.
778
779            // OffloadThread sends HAL pause in its threadLoop. Time saved
780            // here can be slightly off.
781
782            // TODO: check return code for getRenderPosition.
783
784            uint32_t halFrames;
785            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
786            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
787        }
788    }
789}
790
791status_t AudioTrack::setVolume(float left, float right)
792{
793    // This duplicates a test by AudioTrack JNI, but that is not the only caller
794    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
795            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
796        return BAD_VALUE;
797    }
798
799    AutoMutex lock(mLock);
800    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
801    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
802
803    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
804
805    if (isOffloaded_l()) {
806        mAudioTrack->signal();
807    }
808    return NO_ERROR;
809}
810
811status_t AudioTrack::setVolume(float volume)
812{
813    return setVolume(volume, volume);
814}
815
816status_t AudioTrack::setAuxEffectSendLevel(float level)
817{
818    // This duplicates a test by AudioTrack JNI, but that is not the only caller
819    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
820        return BAD_VALUE;
821    }
822
823    AutoMutex lock(mLock);
824    mSendLevel = level;
825    mProxy->setSendLevel(level);
826
827    return NO_ERROR;
828}
829
830void AudioTrack::getAuxEffectSendLevel(float* level) const
831{
832    if (level != NULL) {
833        *level = mSendLevel;
834    }
835}
836
837status_t AudioTrack::setSampleRate(uint32_t rate)
838{
839    AutoMutex lock(mLock);
840    if (rate == mSampleRate) {
841        return NO_ERROR;
842    }
843    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
844        return INVALID_OPERATION;
845    }
846    if (mOutput == AUDIO_IO_HANDLE_NONE) {
847        return NO_INIT;
848    }
849    // NOTE: it is theoretically possible, but highly unlikely, that a device change
850    // could mean a previously allowed sampling rate is no longer allowed.
851    uint32_t afSamplingRate;
852    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
853        return NO_INIT;
854    }
855    // pitch is emulated by adjusting speed and sampleRate
856    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
857    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
858        return BAD_VALUE;
859    }
860    // TODO: Should we also check if the buffer size is compatible?
861
862    mSampleRate = rate;
863    mProxy->setSampleRate(effectiveSampleRate);
864
865    return NO_ERROR;
866}
867
868uint32_t AudioTrack::getSampleRate() const
869{
870    AutoMutex lock(mLock);
871
872    // sample rate can be updated during playback by the offloaded decoder so we need to
873    // query the HAL and update if needed.
874// FIXME use Proxy return channel to update the rate from server and avoid polling here
875    if (isOffloadedOrDirect_l()) {
876        if (mOutput != AUDIO_IO_HANDLE_NONE) {
877            uint32_t sampleRate = 0;
878            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
879            if (status == NO_ERROR) {
880                mSampleRate = sampleRate;
881            }
882        }
883    }
884    return mSampleRate;
885}
886
887uint32_t AudioTrack::getOriginalSampleRate() const
888{
889    return mOriginalSampleRate;
890}
891
892status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
893{
894    AutoMutex lock(mLock);
895    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
896        return NO_ERROR;
897    }
898    if (isOffloadedOrDirect_l()) {
899        return INVALID_OPERATION;
900    }
901    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
902        return INVALID_OPERATION;
903    }
904
905    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
906            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
907    // pitch is emulated by adjusting speed and sampleRate
908    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
909    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
910    const float effectivePitch = adjustPitch(playbackRate.mPitch);
911    AudioPlaybackRate playbackRateTemp = playbackRate;
912    playbackRateTemp.mSpeed = effectiveSpeed;
913    playbackRateTemp.mPitch = effectivePitch;
914
915    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
916            effectiveRate, effectiveSpeed, effectivePitch);
917
918    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
919        ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
920                playbackRate.mSpeed, playbackRate.mPitch);
921        return BAD_VALUE;
922    }
923    // Check if the buffer size is compatible.
924    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
925        ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
926                playbackRate.mSpeed, playbackRate.mPitch);
927        return BAD_VALUE;
928    }
929
930    // Check resampler ratios are within bounds
931    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
932            (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
933        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
934                playbackRate.mSpeed, playbackRate.mPitch);
935        return BAD_VALUE;
936    }
937
938    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
939        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
940                        playbackRate.mSpeed, playbackRate.mPitch);
941        return BAD_VALUE;
942    }
943    mPlaybackRate = playbackRate;
944    //set effective rates
945    mProxy->setPlaybackRate(playbackRateTemp);
946    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
947    return NO_ERROR;
948}
949
950const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
951{
952    AutoMutex lock(mLock);
953    return mPlaybackRate;
954}
955
956ssize_t AudioTrack::getBufferSizeInFrames()
957{
958    AutoMutex lock(mLock);
959    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
960        return NO_INIT;
961    }
962    return (ssize_t) mProxy->getBufferSizeInFrames();
963}
964
965status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
966{
967    if (duration == nullptr) {
968        return BAD_VALUE;
969    }
970    AutoMutex lock(mLock);
971    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
972        return NO_INIT;
973    }
974    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
975    if (bufferSizeInFrames < 0) {
976        return (status_t)bufferSizeInFrames;
977    }
978    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
979            / ((double)mSampleRate * mPlaybackRate.mSpeed));
980    return NO_ERROR;
981}
982
983ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
984{
985    AutoMutex lock(mLock);
986    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
987        return NO_INIT;
988    }
989    // Reject if timed track or compressed audio.
990    if (!audio_is_linear_pcm(mFormat)) {
991        return INVALID_OPERATION;
992    }
993    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
994}
995
996status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
997{
998    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
999        return INVALID_OPERATION;
1000    }
1001
1002    if (loopCount == 0) {
1003        ;
1004    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1005            loopEnd - loopStart >= MIN_LOOP) {
1006        ;
1007    } else {
1008        return BAD_VALUE;
1009    }
1010
1011    AutoMutex lock(mLock);
1012    // See setPosition() regarding setting parameters such as loop points or position while active
1013    if (mState == STATE_ACTIVE) {
1014        return INVALID_OPERATION;
1015    }
1016    setLoop_l(loopStart, loopEnd, loopCount);
1017    return NO_ERROR;
1018}
1019
1020void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1021{
1022    // We do not update the periodic notification point.
1023    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1024    mLoopCount = loopCount;
1025    mLoopEnd = loopEnd;
1026    mLoopStart = loopStart;
1027    mLoopCountNotified = loopCount;
1028    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1029
1030    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1031}
1032
1033status_t AudioTrack::setMarkerPosition(uint32_t marker)
1034{
1035    // The only purpose of setting marker position is to get a callback
1036    if (mCbf == NULL || isOffloadedOrDirect()) {
1037        return INVALID_OPERATION;
1038    }
1039
1040    AutoMutex lock(mLock);
1041    mMarkerPosition = marker;
1042    mMarkerReached = false;
1043
1044    sp<AudioTrackThread> t = mAudioTrackThread;
1045    if (t != 0) {
1046        t->wake();
1047    }
1048    return NO_ERROR;
1049}
1050
1051status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1052{
1053    if (isOffloadedOrDirect()) {
1054        return INVALID_OPERATION;
1055    }
1056    if (marker == NULL) {
1057        return BAD_VALUE;
1058    }
1059
1060    AutoMutex lock(mLock);
1061    mMarkerPosition.getValue(marker);
1062
1063    return NO_ERROR;
1064}
1065
1066status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1067{
1068    // The only purpose of setting position update period is to get a callback
1069    if (mCbf == NULL || isOffloadedOrDirect()) {
1070        return INVALID_OPERATION;
1071    }
1072
1073    AutoMutex lock(mLock);
1074    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1075    mUpdatePeriod = updatePeriod;
1076
1077    sp<AudioTrackThread> t = mAudioTrackThread;
1078    if (t != 0) {
1079        t->wake();
1080    }
1081    return NO_ERROR;
1082}
1083
1084status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1085{
1086    if (isOffloadedOrDirect()) {
1087        return INVALID_OPERATION;
1088    }
1089    if (updatePeriod == NULL) {
1090        return BAD_VALUE;
1091    }
1092
1093    AutoMutex lock(mLock);
1094    *updatePeriod = mUpdatePeriod;
1095
1096    return NO_ERROR;
1097}
1098
1099status_t AudioTrack::setPosition(uint32_t position)
1100{
1101    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1102        return INVALID_OPERATION;
1103    }
1104    if (position > mFrameCount) {
1105        return BAD_VALUE;
1106    }
1107
1108    AutoMutex lock(mLock);
1109    // Currently we require that the player is inactive before setting parameters such as position
1110    // or loop points.  Otherwise, there could be a race condition: the application could read the
1111    // current position, compute a new position or loop parameters, and then set that position or
1112    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1113    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1114    // to specify how it wants to handle such scenarios.
1115    if (mState == STATE_ACTIVE) {
1116        return INVALID_OPERATION;
1117    }
1118    // After setting the position, use full update period before notification.
1119    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1120    mStaticProxy->setBufferPosition(position);
1121
1122    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1123    return NO_ERROR;
1124}
1125
1126status_t AudioTrack::getPosition(uint32_t *position)
1127{
1128    if (position == NULL) {
1129        return BAD_VALUE;
1130    }
1131
1132    AutoMutex lock(mLock);
1133    // FIXME: offloaded and direct tracks call into the HAL for render positions
1134    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1135    // as we do not know the capability of the HAL for pcm position support and standby.
1136    // There may be some latency differences between the HAL position and the proxy position.
1137    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1138        uint32_t dspFrames = 0;
1139
1140        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1141            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1142            *position = mPausedPosition;
1143            return NO_ERROR;
1144        }
1145
1146        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1147            uint32_t halFrames; // actually unused
1148            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1149            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1150        }
1151        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1152        // due to hardware latency. We leave this behavior for now.
1153        *position = dspFrames;
1154    } else {
1155        if (mCblk->mFlags & CBLK_INVALID) {
1156            (void) restoreTrack_l("getPosition");
1157            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1158            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1159        }
1160
1161        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1162        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1163                0 : updateAndGetPosition_l().value();
1164    }
1165    return NO_ERROR;
1166}
1167
1168status_t AudioTrack::getBufferPosition(uint32_t *position)
1169{
1170    if (mSharedBuffer == 0) {
1171        return INVALID_OPERATION;
1172    }
1173    if (position == NULL) {
1174        return BAD_VALUE;
1175    }
1176
1177    AutoMutex lock(mLock);
1178    *position = mStaticProxy->getBufferPosition();
1179    return NO_ERROR;
1180}
1181
1182status_t AudioTrack::reload()
1183{
1184    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1185        return INVALID_OPERATION;
1186    }
1187
1188    AutoMutex lock(mLock);
1189    // See setPosition() regarding setting parameters such as loop points or position while active
1190    if (mState == STATE_ACTIVE) {
1191        return INVALID_OPERATION;
1192    }
1193    mNewPosition = mUpdatePeriod;
1194    (void) updateAndGetPosition_l();
1195    mPosition = 0;
1196    mPreviousTimestampValid = false;
1197#if 0
1198    // The documentation is not clear on the behavior of reload() and the restoration
1199    // of loop count. Historically we have not restored loop count, start, end,
1200    // but it makes sense if one desires to repeat playing a particular sound.
1201    if (mLoopCount != 0) {
1202        mLoopCountNotified = mLoopCount;
1203        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1204    }
1205#endif
1206    mStaticProxy->setBufferPosition(0);
1207    return NO_ERROR;
1208}
1209
1210audio_io_handle_t AudioTrack::getOutput() const
1211{
1212    AutoMutex lock(mLock);
1213    return mOutput;
1214}
1215
1216status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1217    AutoMutex lock(mLock);
1218    if (mSelectedDeviceId != deviceId) {
1219        mSelectedDeviceId = deviceId;
1220        if (mStatus == NO_ERROR) {
1221            android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1222        }
1223    }
1224    return NO_ERROR;
1225}
1226
1227audio_port_handle_t AudioTrack::getOutputDevice() {
1228    AutoMutex lock(mLock);
1229    return mSelectedDeviceId;
1230}
1231
1232audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1233    AutoMutex lock(mLock);
1234    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1235        return AUDIO_PORT_HANDLE_NONE;
1236    }
1237    // if the output stream does not have an active audio patch, use either the device initially
1238    // selected by audio policy manager or the last routed device
1239    audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1240    if (deviceId == AUDIO_PORT_HANDLE_NONE) {
1241        deviceId = mRoutedDeviceId;
1242    }
1243    mRoutedDeviceId = deviceId;
1244    return deviceId;
1245}
1246
1247status_t AudioTrack::attachAuxEffect(int effectId)
1248{
1249    AutoMutex lock(mLock);
1250    status_t status = mAudioTrack->attachAuxEffect(effectId);
1251    if (status == NO_ERROR) {
1252        mAuxEffectId = effectId;
1253    }
1254    return status;
1255}
1256
1257audio_stream_type_t AudioTrack::streamType() const
1258{
1259    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1260        return audio_attributes_to_stream_type(&mAttributes);
1261    }
1262    return mStreamType;
1263}
1264
1265uint32_t AudioTrack::latency()
1266{
1267    AutoMutex lock(mLock);
1268    updateLatency_l();
1269    return mLatency;
1270}
1271
1272// -------------------------------------------------------------------------
1273
1274// must be called with mLock held
1275void AudioTrack::updateLatency_l()
1276{
1277    status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1278    if (status != NO_ERROR) {
1279        ALOGW("getLatency(%d) failed status %d", mOutput, status);
1280    } else {
1281        // FIXME don't believe this lie
1282        mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1283    }
1284}
1285
1286// TODO Move this macro to a common header file for enum to string conversion in audio framework.
1287#define MEDIA_CASE_ENUM(name) case name: return #name
1288const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1289    switch (transferType) {
1290        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1291        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1292        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1293        MEDIA_CASE_ENUM(TRANSFER_SYNC);
1294        MEDIA_CASE_ENUM(TRANSFER_SHARED);
1295        default:
1296            return "UNRECOGNIZED";
1297    }
1298}
1299
1300status_t AudioTrack::createTrack_l()
1301{
1302    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1303    if (audioFlinger == 0) {
1304        ALOGE("Could not get audioflinger");
1305        return NO_INIT;
1306    }
1307
1308    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1309        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1310    }
1311    audio_io_handle_t output;
1312    audio_stream_type_t streamType = mStreamType;
1313    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1314
1315    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1316    // After fast request is denied, we will request again if IAudioTrack is re-created.
1317
1318    status_t status;
1319    audio_config_t config = AUDIO_CONFIG_INITIALIZER;
1320    config.sample_rate = mSampleRate;
1321    config.channel_mask = mChannelMask;
1322    config.format = mFormat;
1323    config.offload_info = mOffloadInfoCopy;
1324    mRoutedDeviceId = mSelectedDeviceId;
1325    status = AudioSystem::getOutputForAttr(attr, &output,
1326                                           mSessionId, &streamType, mClientUid,
1327                                           &config,
1328                                           mFlags, &mRoutedDeviceId, &mPortId);
1329
1330    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1331        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u,"
1332              " format %#x, channel mask %#x, flags %#x",
1333              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask,
1334              mFlags);
1335        return BAD_VALUE;
1336    }
1337    {
1338    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1339    // we must release it ourselves if anything goes wrong.
1340
1341    // Not all of these values are needed under all conditions, but it is easier to get them all
1342    status = AudioSystem::getLatency(output, &mAfLatency);
1343    if (status != NO_ERROR) {
1344        ALOGE("getLatency(%d) failed status %d", output, status);
1345        goto release;
1346    }
1347    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1348
1349    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1350    if (status != NO_ERROR) {
1351        ALOGE("getFrameCount(output=%d) status %d", output, status);
1352        goto release;
1353    }
1354
1355    // TODO consider making this a member variable if there are other uses for it later
1356    size_t afFrameCountHAL;
1357    status = AudioSystem::getFrameCountHAL(output, &afFrameCountHAL);
1358    if (status != NO_ERROR) {
1359        ALOGE("getFrameCountHAL(output=%d) status %d", output, status);
1360        goto release;
1361    }
1362    ALOG_ASSERT(afFrameCountHAL > 0);
1363
1364    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1365    if (status != NO_ERROR) {
1366        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1367        goto release;
1368    }
1369    if (mSampleRate == 0) {
1370        mSampleRate = mAfSampleRate;
1371        mOriginalSampleRate = mAfSampleRate;
1372    }
1373
1374    // Client can only express a preference for FAST.  Server will perform additional tests.
1375    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1376        // either of these use cases:
1377        // use case 1: shared buffer
1378        bool sharedBuffer = mSharedBuffer != 0;
1379        bool transferAllowed =
1380            // use case 2: callback transfer mode
1381            (mTransfer == TRANSFER_CALLBACK) ||
1382            // use case 3: obtain/release mode
1383            (mTransfer == TRANSFER_OBTAIN) ||
1384            // use case 4: synchronous write
1385            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1386
1387        bool useCaseAllowed = sharedBuffer || transferAllowed;
1388        if (!useCaseAllowed) {
1389            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, not shared buffer and transfer = %s",
1390                  convertTransferToText(mTransfer));
1391        }
1392
1393        // sample rates must also match
1394        bool sampleRateAllowed = mSampleRate == mAfSampleRate;
1395        if (!sampleRateAllowed) {
1396            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied, rates do not match %u Hz, require %u Hz",
1397                  mSampleRate, mAfSampleRate);
1398        }
1399
1400        bool fastAllowed = useCaseAllowed && sampleRateAllowed;
1401        if (!fastAllowed) {
1402            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1403        }
1404    }
1405
1406    mNotificationFramesAct = mNotificationFramesReq;
1407
1408    size_t frameCount = mReqFrameCount;
1409    if (!audio_has_proportional_frames(mFormat)) {
1410
1411        if (mSharedBuffer != 0) {
1412            // Same comment as below about ignoring frameCount parameter for set()
1413            frameCount = mSharedBuffer->size();
1414        } else if (frameCount == 0) {
1415            frameCount = mAfFrameCount;
1416        }
1417        if (mNotificationFramesAct != frameCount) {
1418            mNotificationFramesAct = frameCount;
1419        }
1420    } else if (mSharedBuffer != 0) {
1421        // FIXME: Ensure client side memory buffers need
1422        // not have additional alignment beyond sample
1423        // (e.g. 16 bit stereo accessed as 32 bit frame).
1424        size_t alignment = audio_bytes_per_sample(mFormat);
1425        if (alignment & 1) {
1426            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1427            alignment = 1;
1428        }
1429        if (mChannelCount > 1) {
1430            // More than 2 channels does not require stronger alignment than stereo
1431            alignment <<= 1;
1432        }
1433        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1434            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1435                    mSharedBuffer->pointer(), mChannelCount);
1436            status = BAD_VALUE;
1437            goto release;
1438        }
1439
1440        // When initializing a shared buffer AudioTrack via constructors,
1441        // there's no frameCount parameter.
1442        // But when initializing a shared buffer AudioTrack via set(),
1443        // there _is_ a frameCount parameter.  We silently ignore it.
1444        frameCount = mSharedBuffer->size() / mFrameSize;
1445    } else {
1446        size_t minFrameCount = 0;
1447        // For fast tracks the frame count calculations and checks are mostly done by server,
1448        // but we try to respect the application's request for notifications per buffer.
1449        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1450            if (mNotificationsPerBufferReq > 0) {
1451                // Avoid possible arithmetic overflow during multiplication.
1452                // mNotificationsPerBuffer is clamped to a small integer earlier, so it is unlikely.
1453                if (mNotificationsPerBufferReq > SIZE_MAX / afFrameCountHAL) {
1454                    ALOGE("Requested notificationPerBuffer=%u ignored for HAL frameCount=%zu",
1455                            mNotificationsPerBufferReq, afFrameCountHAL);
1456                } else {
1457                    minFrameCount = afFrameCountHAL * mNotificationsPerBufferReq;
1458                }
1459            }
1460        } else {
1461            // for normal tracks precompute the frame count based on speed.
1462            const float speed = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1463                            max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1464            minFrameCount = calculateMinFrameCount(
1465                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1466                    speed /*, 0 mNotificationsPerBufferReq*/);
1467        }
1468        if (frameCount < minFrameCount) {
1469            frameCount = minFrameCount;
1470        }
1471    }
1472
1473    audio_output_flags_t flags = mFlags;
1474
1475    pid_t tid = -1;
1476    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1477        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1478        // application-level code follows all non-blocking design rules, the language runtime
1479        // doesn't also follow those rules, so the thread will not benefit overall.
1480        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1481            tid = mAudioTrackThread->getTid();
1482        }
1483    }
1484
1485    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1486                                // but we will still need the original value also
1487    audio_session_t originalSessionId = mSessionId;
1488    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1489                                                      mSampleRate,
1490                                                      mFormat,
1491                                                      mChannelMask,
1492                                                      &temp,
1493                                                      &flags,
1494                                                      mSharedBuffer,
1495                                                      output,
1496                                                      mClientPid,
1497                                                      tid,
1498                                                      &mSessionId,
1499                                                      mClientUid,
1500                                                      &status,
1501                                                      mPortId);
1502    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1503            "session ID changed from %d to %d", originalSessionId, mSessionId);
1504
1505    if (status != NO_ERROR) {
1506        ALOGE("AudioFlinger could not create track, status: %d", status);
1507        goto release;
1508    }
1509    ALOG_ASSERT(track != 0);
1510
1511    // AudioFlinger now owns the reference to the I/O handle,
1512    // so we are no longer responsible for releasing it.
1513
1514    // FIXME compare to AudioRecord
1515    sp<IMemory> iMem = track->getCblk();
1516    if (iMem == 0) {
1517        ALOGE("Could not get control block");
1518        return NO_INIT;
1519    }
1520    void *iMemPointer = iMem->pointer();
1521    if (iMemPointer == NULL) {
1522        ALOGE("Could not get control block pointer");
1523        return NO_INIT;
1524    }
1525    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1526    if (mAudioTrack != 0) {
1527        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1528        mDeathNotifier.clear();
1529    }
1530    mAudioTrack = track;
1531    mCblkMemory = iMem;
1532    IPCThreadState::self()->flushCommands();
1533
1534    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1535    mCblk = cblk;
1536    // note that temp is the (possibly revised) value of frameCount
1537    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1538        // In current design, AudioTrack client checks and ensures frame count validity before
1539        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1540        // for fast track as it uses a special method of assigning frame count.
1541        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1542    }
1543    frameCount = temp;
1544
1545    mAwaitBoost = false;
1546    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1547        if (flags & AUDIO_OUTPUT_FLAG_FAST) {
1548            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu", frameCount, temp);
1549            if (!mThreadCanCallJava) {
1550                mAwaitBoost = true;
1551            }
1552        } else {
1553            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", frameCount,
1554                    temp);
1555        }
1556    }
1557    mFlags = flags;
1558
1559    // Make sure that application is notified with sufficient margin before underrun.
1560    // The client can divide the AudioTrack buffer into sub-buffers,
1561    // and expresses its desire to server as the notification frame count.
1562    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1563        size_t maxNotificationFrames;
1564        if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1565            // notify every HAL buffer, regardless of the size of the track buffer
1566            maxNotificationFrames = afFrameCountHAL;
1567        } else {
1568            // For normal tracks, use at least double-buffering if no sample rate conversion,
1569            // or at least triple-buffering if there is sample rate conversion
1570            const int nBuffering = mOriginalSampleRate == mAfSampleRate ? 2 : 3;
1571            maxNotificationFrames = frameCount / nBuffering;
1572        }
1573        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1574            if (mNotificationFramesAct == 0) {
1575                ALOGD("Client defaulted notificationFrames to %zu for frameCount %zu",
1576                    maxNotificationFrames, frameCount);
1577            } else {
1578                ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1579                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1580            }
1581            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1582        }
1583    }
1584
1585    // We retain a copy of the I/O handle, but don't own the reference
1586    mOutput = output;
1587    mRefreshRemaining = true;
1588
1589    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1590    // is the value of pointer() for the shared buffer, otherwise buffers points
1591    // immediately after the control block.  This address is for the mapping within client
1592    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1593    void* buffers;
1594    if (mSharedBuffer == 0) {
1595        buffers = cblk + 1;
1596    } else {
1597        buffers = mSharedBuffer->pointer();
1598        if (buffers == NULL) {
1599            ALOGE("Could not get buffer pointer");
1600            return NO_INIT;
1601        }
1602    }
1603
1604    mAudioTrack->attachAuxEffect(mAuxEffectId);
1605    mFrameCount = frameCount;
1606    updateLatency_l();  // this refetches mAfLatency and sets mLatency
1607
1608    // If IAudioTrack is re-created, don't let the requested frameCount
1609    // decrease.  This can confuse clients that cache frameCount().
1610    if (frameCount > mReqFrameCount) {
1611        mReqFrameCount = frameCount;
1612    }
1613
1614    // reset server position to 0 as we have new cblk.
1615    mServer = 0;
1616
1617    // update proxy
1618    if (mSharedBuffer == 0) {
1619        mStaticProxy.clear();
1620        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1621    } else {
1622        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1623        mProxy = mStaticProxy;
1624    }
1625
1626    mProxy->setVolumeLR(gain_minifloat_pack(
1627            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1628            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1629
1630    mProxy->setSendLevel(mSendLevel);
1631    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1632    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1633    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1634    mProxy->setSampleRate(effectiveSampleRate);
1635
1636    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1637    playbackRateTemp.mSpeed = effectiveSpeed;
1638    playbackRateTemp.mPitch = effectivePitch;
1639    mProxy->setPlaybackRate(playbackRateTemp);
1640    mProxy->setMinimum(mNotificationFramesAct);
1641
1642    mDeathNotifier = new DeathNotifier(this);
1643    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1644
1645    if (mDeviceCallback != 0) {
1646        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1647    }
1648
1649    return NO_ERROR;
1650    }
1651
1652release:
1653    AudioSystem::releaseOutput(output, streamType, mSessionId);
1654    if (status == NO_ERROR) {
1655        status = NO_INIT;
1656    }
1657    return status;
1658}
1659
1660status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1661{
1662    if (audioBuffer == NULL) {
1663        if (nonContig != NULL) {
1664            *nonContig = 0;
1665        }
1666        return BAD_VALUE;
1667    }
1668    if (mTransfer != TRANSFER_OBTAIN) {
1669        audioBuffer->frameCount = 0;
1670        audioBuffer->size = 0;
1671        audioBuffer->raw = NULL;
1672        if (nonContig != NULL) {
1673            *nonContig = 0;
1674        }
1675        return INVALID_OPERATION;
1676    }
1677
1678    const struct timespec *requested;
1679    struct timespec timeout;
1680    if (waitCount == -1) {
1681        requested = &ClientProxy::kForever;
1682    } else if (waitCount == 0) {
1683        requested = &ClientProxy::kNonBlocking;
1684    } else if (waitCount > 0) {
1685        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1686        timeout.tv_sec = ms / 1000;
1687        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1688        requested = &timeout;
1689    } else {
1690        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1691        requested = NULL;
1692    }
1693    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1694}
1695
1696status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1697        struct timespec *elapsed, size_t *nonContig)
1698{
1699    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1700    uint32_t oldSequence = 0;
1701    uint32_t newSequence;
1702
1703    Proxy::Buffer buffer;
1704    status_t status = NO_ERROR;
1705
1706    static const int32_t kMaxTries = 5;
1707    int32_t tryCounter = kMaxTries;
1708
1709    do {
1710        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1711        // keep them from going away if another thread re-creates the track during obtainBuffer()
1712        sp<AudioTrackClientProxy> proxy;
1713        sp<IMemory> iMem;
1714
1715        {   // start of lock scope
1716            AutoMutex lock(mLock);
1717
1718            newSequence = mSequence;
1719            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1720            if (status == DEAD_OBJECT) {
1721                // re-create track, unless someone else has already done so
1722                if (newSequence == oldSequence) {
1723                    status = restoreTrack_l("obtainBuffer");
1724                    if (status != NO_ERROR) {
1725                        buffer.mFrameCount = 0;
1726                        buffer.mRaw = NULL;
1727                        buffer.mNonContig = 0;
1728                        break;
1729                    }
1730                }
1731            }
1732            oldSequence = newSequence;
1733
1734            if (status == NOT_ENOUGH_DATA) {
1735                restartIfDisabled();
1736            }
1737
1738            // Keep the extra references
1739            proxy = mProxy;
1740            iMem = mCblkMemory;
1741
1742            if (mState == STATE_STOPPING) {
1743                status = -EINTR;
1744                buffer.mFrameCount = 0;
1745                buffer.mRaw = NULL;
1746                buffer.mNonContig = 0;
1747                break;
1748            }
1749
1750            // Non-blocking if track is stopped or paused
1751            if (mState != STATE_ACTIVE) {
1752                requested = &ClientProxy::kNonBlocking;
1753            }
1754
1755        }   // end of lock scope
1756
1757        buffer.mFrameCount = audioBuffer->frameCount;
1758        // FIXME starts the requested timeout and elapsed over from scratch
1759        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1760    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1761
1762    audioBuffer->frameCount = buffer.mFrameCount;
1763    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1764    audioBuffer->raw = buffer.mRaw;
1765    if (nonContig != NULL) {
1766        *nonContig = buffer.mNonContig;
1767    }
1768    return status;
1769}
1770
1771void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1772{
1773    // FIXME add error checking on mode, by adding an internal version
1774    if (mTransfer == TRANSFER_SHARED) {
1775        return;
1776    }
1777
1778    size_t stepCount = audioBuffer->size / mFrameSize;
1779    if (stepCount == 0) {
1780        return;
1781    }
1782
1783    Proxy::Buffer buffer;
1784    buffer.mFrameCount = stepCount;
1785    buffer.mRaw = audioBuffer->raw;
1786
1787    AutoMutex lock(mLock);
1788    mReleased += stepCount;
1789    mInUnderrun = false;
1790    mProxy->releaseBuffer(&buffer);
1791
1792    // restart track if it was disabled by audioflinger due to previous underrun
1793    restartIfDisabled();
1794}
1795
1796void AudioTrack::restartIfDisabled()
1797{
1798    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1799    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1800        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1801        // FIXME ignoring status
1802        mAudioTrack->start();
1803    }
1804}
1805
1806// -------------------------------------------------------------------------
1807
1808ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1809{
1810    if (mTransfer != TRANSFER_SYNC) {
1811        return INVALID_OPERATION;
1812    }
1813
1814    if (isDirect()) {
1815        AutoMutex lock(mLock);
1816        int32_t flags = android_atomic_and(
1817                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1818                            &mCblk->mFlags);
1819        if (flags & CBLK_INVALID) {
1820            return DEAD_OBJECT;
1821        }
1822    }
1823
1824    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1825        // Sanity-check: user is most-likely passing an error code, and it would
1826        // make the return value ambiguous (actualSize vs error).
1827        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1828        return BAD_VALUE;
1829    }
1830
1831    size_t written = 0;
1832    Buffer audioBuffer;
1833
1834    while (userSize >= mFrameSize) {
1835        audioBuffer.frameCount = userSize / mFrameSize;
1836
1837        status_t err = obtainBuffer(&audioBuffer,
1838                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1839        if (err < 0) {
1840            if (written > 0) {
1841                break;
1842            }
1843            if (err == TIMED_OUT || err == -EINTR) {
1844                err = WOULD_BLOCK;
1845            }
1846            return ssize_t(err);
1847        }
1848
1849        size_t toWrite = audioBuffer.size;
1850        memcpy(audioBuffer.i8, buffer, toWrite);
1851        buffer = ((const char *) buffer) + toWrite;
1852        userSize -= toWrite;
1853        written += toWrite;
1854
1855        releaseBuffer(&audioBuffer);
1856    }
1857
1858    if (written > 0) {
1859        mFramesWritten += written / mFrameSize;
1860    }
1861    return written;
1862}
1863
1864// -------------------------------------------------------------------------
1865
1866nsecs_t AudioTrack::processAudioBuffer()
1867{
1868    // Currently the AudioTrack thread is not created if there are no callbacks.
1869    // Would it ever make sense to run the thread, even without callbacks?
1870    // If so, then replace this by checks at each use for mCbf != NULL.
1871    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1872
1873    mLock.lock();
1874    if (mAwaitBoost) {
1875        mAwaitBoost = false;
1876        mLock.unlock();
1877        static const int32_t kMaxTries = 5;
1878        int32_t tryCounter = kMaxTries;
1879        uint32_t pollUs = 10000;
1880        do {
1881            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1882            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1883                break;
1884            }
1885            usleep(pollUs);
1886            pollUs <<= 1;
1887        } while (tryCounter-- > 0);
1888        if (tryCounter < 0) {
1889            ALOGE("did not receive expected priority boost on time");
1890        }
1891        // Run again immediately
1892        return 0;
1893    }
1894
1895    // Can only reference mCblk while locked
1896    int32_t flags = android_atomic_and(
1897        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1898
1899    // Check for track invalidation
1900    if (flags & CBLK_INVALID) {
1901        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1902        // AudioSystem cache. We should not exit here but after calling the callback so
1903        // that the upper layers can recreate the track
1904        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1905            status_t status __unused = restoreTrack_l("processAudioBuffer");
1906            // FIXME unused status
1907            // after restoration, continue below to make sure that the loop and buffer events
1908            // are notified because they have been cleared from mCblk->mFlags above.
1909        }
1910    }
1911
1912    bool waitStreamEnd = mState == STATE_STOPPING;
1913    bool active = mState == STATE_ACTIVE;
1914
1915    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1916    bool newUnderrun = false;
1917    if (flags & CBLK_UNDERRUN) {
1918#if 0
1919        // Currently in shared buffer mode, when the server reaches the end of buffer,
1920        // the track stays active in continuous underrun state.  It's up to the application
1921        // to pause or stop the track, or set the position to a new offset within buffer.
1922        // This was some experimental code to auto-pause on underrun.   Keeping it here
1923        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1924        if (mTransfer == TRANSFER_SHARED) {
1925            mState = STATE_PAUSED;
1926            active = false;
1927        }
1928#endif
1929        if (!mInUnderrun) {
1930            mInUnderrun = true;
1931            newUnderrun = true;
1932        }
1933    }
1934
1935    // Get current position of server
1936    Modulo<uint32_t> position(updateAndGetPosition_l());
1937
1938    // Manage marker callback
1939    bool markerReached = false;
1940    Modulo<uint32_t> markerPosition(mMarkerPosition);
1941    // uses 32 bit wraparound for comparison with position.
1942    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1943        mMarkerReached = markerReached = true;
1944    }
1945
1946    // Determine number of new position callback(s) that will be needed, while locked
1947    size_t newPosCount = 0;
1948    Modulo<uint32_t> newPosition(mNewPosition);
1949    uint32_t updatePeriod = mUpdatePeriod;
1950    // FIXME fails for wraparound, need 64 bits
1951    if (updatePeriod > 0 && position >= newPosition) {
1952        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1953        mNewPosition += updatePeriod * newPosCount;
1954    }
1955
1956    // Cache other fields that will be needed soon
1957    uint32_t sampleRate = mSampleRate;
1958    float speed = mPlaybackRate.mSpeed;
1959    const uint32_t notificationFrames = mNotificationFramesAct;
1960    if (mRefreshRemaining) {
1961        mRefreshRemaining = false;
1962        mRemainingFrames = notificationFrames;
1963        mRetryOnPartialBuffer = false;
1964    }
1965    size_t misalignment = mProxy->getMisalignment();
1966    uint32_t sequence = mSequence;
1967    sp<AudioTrackClientProxy> proxy = mProxy;
1968
1969    // Determine the number of new loop callback(s) that will be needed, while locked.
1970    int loopCountNotifications = 0;
1971    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1972
1973    if (mLoopCount > 0) {
1974        int loopCount;
1975        size_t bufferPosition;
1976        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1977        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1978        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1979        mLoopCountNotified = loopCount; // discard any excess notifications
1980    } else if (mLoopCount < 0) {
1981        // FIXME: We're not accurate with notification count and position with infinite looping
1982        // since loopCount from server side will always return -1 (we could decrement it).
1983        size_t bufferPosition = mStaticProxy->getBufferPosition();
1984        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1985        loopPeriod = mLoopEnd - bufferPosition;
1986    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1987        size_t bufferPosition = mStaticProxy->getBufferPosition();
1988        loopPeriod = mFrameCount - bufferPosition;
1989    }
1990
1991    // These fields don't need to be cached, because they are assigned only by set():
1992    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1993    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1994
1995    mLock.unlock();
1996
1997    // get anchor time to account for callbacks.
1998    const nsecs_t timeBeforeCallbacks = systemTime();
1999
2000    if (waitStreamEnd) {
2001        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
2002        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
2003        // (and make sure we don't callback for more data while we're stopping).
2004        // This helps with position, marker notifications, and track invalidation.
2005        struct timespec timeout;
2006        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
2007        timeout.tv_nsec = 0;
2008
2009        status_t status = proxy->waitStreamEndDone(&timeout);
2010        switch (status) {
2011        case NO_ERROR:
2012        case DEAD_OBJECT:
2013        case TIMED_OUT:
2014            if (status != DEAD_OBJECT) {
2015                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
2016                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
2017                mCbf(EVENT_STREAM_END, mUserData, NULL);
2018            }
2019            {
2020                AutoMutex lock(mLock);
2021                // The previously assigned value of waitStreamEnd is no longer valid,
2022                // since the mutex has been unlocked and either the callback handler
2023                // or another thread could have re-started the AudioTrack during that time.
2024                waitStreamEnd = mState == STATE_STOPPING;
2025                if (waitStreamEnd) {
2026                    mState = STATE_STOPPED;
2027                    mReleased = 0;
2028                }
2029            }
2030            if (waitStreamEnd && status != DEAD_OBJECT) {
2031               return NS_INACTIVE;
2032            }
2033            break;
2034        }
2035        return 0;
2036    }
2037
2038    // perform callbacks while unlocked
2039    if (newUnderrun) {
2040        mCbf(EVENT_UNDERRUN, mUserData, NULL);
2041    }
2042    while (loopCountNotifications > 0) {
2043        mCbf(EVENT_LOOP_END, mUserData, NULL);
2044        --loopCountNotifications;
2045    }
2046    if (flags & CBLK_BUFFER_END) {
2047        mCbf(EVENT_BUFFER_END, mUserData, NULL);
2048    }
2049    if (markerReached) {
2050        mCbf(EVENT_MARKER, mUserData, &markerPosition);
2051    }
2052    while (newPosCount > 0) {
2053        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2054        mCbf(EVENT_NEW_POS, mUserData, &temp);
2055        newPosition += updatePeriod;
2056        newPosCount--;
2057    }
2058
2059    if (mObservedSequence != sequence) {
2060        mObservedSequence = sequence;
2061        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2062        // for offloaded tracks, just wait for the upper layers to recreate the track
2063        if (isOffloadedOrDirect()) {
2064            return NS_INACTIVE;
2065        }
2066    }
2067
2068    // if inactive, then don't run me again until re-started
2069    if (!active) {
2070        return NS_INACTIVE;
2071    }
2072
2073    // Compute the estimated time until the next timed event (position, markers, loops)
2074    // FIXME only for non-compressed audio
2075    uint32_t minFrames = ~0;
2076    if (!markerReached && position < markerPosition) {
2077        minFrames = (markerPosition - position).value();
2078    }
2079    if (loopPeriod > 0 && loopPeriod < minFrames) {
2080        // loopPeriod is already adjusted for actual position.
2081        minFrames = loopPeriod;
2082    }
2083    if (updatePeriod > 0) {
2084        minFrames = min(minFrames, (newPosition - position).value());
2085    }
2086
2087    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2088    static const uint32_t kPoll = 0;
2089    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2090        minFrames = kPoll * notificationFrames;
2091    }
2092
2093    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2094    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2095    const nsecs_t timeAfterCallbacks = systemTime();
2096
2097    // Convert frame units to time units
2098    nsecs_t ns = NS_WHENEVER;
2099    if (minFrames != (uint32_t) ~0) {
2100        // AudioFlinger consumption of client data may be irregular when coming out of device
2101        // standby since the kernel buffers require filling. This is throttled to no more than 2x
2102        // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2103        // half (but no more than half a second) to improve callback accuracy during these temporary
2104        // data surges.
2105        const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2106        constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2107        ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2108        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2109        // TODO: Should we warn if the callback time is too long?
2110        if (ns < 0) ns = 0;
2111    }
2112
2113    // If not supplying data by EVENT_MORE_DATA, then we're done
2114    if (mTransfer != TRANSFER_CALLBACK) {
2115        return ns;
2116    }
2117
2118    // EVENT_MORE_DATA callback handling.
2119    // Timing for linear pcm audio data formats can be derived directly from the
2120    // buffer fill level.
2121    // Timing for compressed data is not directly available from the buffer fill level,
2122    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2123    // to return a certain fill level.
2124
2125    struct timespec timeout;
2126    const struct timespec *requested = &ClientProxy::kForever;
2127    if (ns != NS_WHENEVER) {
2128        timeout.tv_sec = ns / 1000000000LL;
2129        timeout.tv_nsec = ns % 1000000000LL;
2130        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2131        requested = &timeout;
2132    }
2133
2134    size_t writtenFrames = 0;
2135    while (mRemainingFrames > 0) {
2136
2137        Buffer audioBuffer;
2138        audioBuffer.frameCount = mRemainingFrames;
2139        size_t nonContig;
2140        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2141        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2142                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2143        requested = &ClientProxy::kNonBlocking;
2144        size_t avail = audioBuffer.frameCount + nonContig;
2145        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2146                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2147        if (err != NO_ERROR) {
2148            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2149                    (isOffloaded() && (err == DEAD_OBJECT))) {
2150                // FIXME bug 25195759
2151                return 1000000;
2152            }
2153            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2154            return NS_NEVER;
2155        }
2156
2157        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2158            mRetryOnPartialBuffer = false;
2159            if (avail < mRemainingFrames) {
2160                if (ns > 0) { // account for obtain time
2161                    const nsecs_t timeNow = systemTime();
2162                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2163                }
2164                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2165                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2166                    ns = myns;
2167                }
2168                return ns;
2169            }
2170        }
2171
2172        size_t reqSize = audioBuffer.size;
2173        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2174        size_t writtenSize = audioBuffer.size;
2175
2176        // Sanity check on returned size
2177        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2178            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2179                    reqSize, ssize_t(writtenSize));
2180            return NS_NEVER;
2181        }
2182
2183        if (writtenSize == 0) {
2184            // The callback is done filling buffers
2185            // Keep this thread going to handle timed events and
2186            // still try to get more data in intervals of WAIT_PERIOD_MS
2187            // but don't just loop and block the CPU, so wait
2188
2189            // mCbf(EVENT_MORE_DATA, ...) might either
2190            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2191            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2192            // (3) Return 0 size when no data is available, does not wait for more data.
2193            //
2194            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2195            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2196            // especially for case (3).
2197            //
2198            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2199            // and this loop; whereas for case (3) we could simply check once with the full
2200            // buffer size and skip the loop entirely.
2201
2202            nsecs_t myns;
2203            if (audio_has_proportional_frames(mFormat)) {
2204                // time to wait based on buffer occupancy
2205                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2206                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2207                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2208                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2209                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2210                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2211                myns = datans + (afns / 2);
2212            } else {
2213                // FIXME: This could ping quite a bit if the buffer isn't full.
2214                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2215                myns = kWaitPeriodNs;
2216            }
2217            if (ns > 0) { // account for obtain and callback time
2218                const nsecs_t timeNow = systemTime();
2219                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2220            }
2221            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2222                ns = myns;
2223            }
2224            return ns;
2225        }
2226
2227        size_t releasedFrames = writtenSize / mFrameSize;
2228        audioBuffer.frameCount = releasedFrames;
2229        mRemainingFrames -= releasedFrames;
2230        if (misalignment >= releasedFrames) {
2231            misalignment -= releasedFrames;
2232        } else {
2233            misalignment = 0;
2234        }
2235
2236        releaseBuffer(&audioBuffer);
2237        writtenFrames += releasedFrames;
2238
2239        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2240        // if callback doesn't like to accept the full chunk
2241        if (writtenSize < reqSize) {
2242            continue;
2243        }
2244
2245        // There could be enough non-contiguous frames available to satisfy the remaining request
2246        if (mRemainingFrames <= nonContig) {
2247            continue;
2248        }
2249
2250#if 0
2251        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2252        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2253        // that total to a sum == notificationFrames.
2254        if (0 < misalignment && misalignment <= mRemainingFrames) {
2255            mRemainingFrames = misalignment;
2256            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2257        }
2258#endif
2259
2260    }
2261    if (writtenFrames > 0) {
2262        AutoMutex lock(mLock);
2263        mFramesWritten += writtenFrames;
2264    }
2265    mRemainingFrames = notificationFrames;
2266    mRetryOnPartialBuffer = true;
2267
2268    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2269    return 0;
2270}
2271
2272status_t AudioTrack::restoreTrack_l(const char *from)
2273{
2274    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2275          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2276    ++mSequence;
2277
2278    // refresh the audio configuration cache in this process to make sure we get new
2279    // output parameters and new IAudioFlinger in createTrack_l()
2280    AudioSystem::clearAudioConfigCache();
2281
2282    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2283        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2284        // reconsider enabling for linear PCM encodings when position can be preserved.
2285        return DEAD_OBJECT;
2286    }
2287
2288    // Save so we can return count since creation.
2289    mUnderrunCountOffset = getUnderrunCount_l();
2290
2291    // save the old static buffer position
2292    uint32_t staticPosition = 0;
2293    size_t bufferPosition = 0;
2294    int loopCount = 0;
2295    if (mStaticProxy != 0) {
2296        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2297        staticPosition = mStaticProxy->getPosition().unsignedValue();
2298    }
2299
2300    mFlags = mOrigFlags;
2301
2302    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2303    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2304    // It will also delete the strong references on previous IAudioTrack and IMemory.
2305    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2306    status_t result = createTrack_l();
2307
2308    if (result == NO_ERROR) {
2309        // take the frames that will be lost by track recreation into account in saved position
2310        // For streaming tracks, this is the amount we obtained from the user/client
2311        // (not the number actually consumed at the server - those are already lost).
2312        if (mStaticProxy == 0) {
2313            mPosition = mReleased;
2314        }
2315        // Continue playback from last known position and restore loop.
2316        if (mStaticProxy != 0) {
2317            if (loopCount != 0) {
2318                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2319                        mLoopStart, mLoopEnd, loopCount);
2320            } else {
2321                mStaticProxy->setBufferPosition(bufferPosition);
2322                if (bufferPosition == mFrameCount) {
2323                    ALOGD("restoring track at end of static buffer");
2324                }
2325            }
2326        }
2327        // restore volume handler
2328        mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2329            sp<VolumeShaper::Operation> operationToEnd =
2330                    new VolumeShaper::Operation(shaper.mOperation);
2331            // TODO: Ideally we would restore to the exact xOffset position
2332            // as returned by getVolumeShaperState(), but we don't have that
2333            // information when restoring at the client unless we periodically poll
2334            // the server or create shared memory state.
2335            //
2336            // For now, we simply advance to the end of the VolumeShaper effect
2337            // if it has been started.
2338            if (shaper.isStarted()) {
2339                operationToEnd->setNormalizedTime(1.f);
2340            }
2341            return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2342        });
2343
2344        if (mState == STATE_ACTIVE) {
2345            result = mAudioTrack->start();
2346        }
2347        // server resets to zero so we offset
2348        mFramesWrittenServerOffset =
2349                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2350        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2351    }
2352    if (result != NO_ERROR) {
2353        ALOGW("restoreTrack_l() failed status %d", result);
2354        mState = STATE_STOPPED;
2355        mReleased = 0;
2356    }
2357
2358    return result;
2359}
2360
2361Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2362{
2363    // This is the sole place to read server consumed frames
2364    Modulo<uint32_t> newServer(mProxy->getPosition());
2365    const int32_t delta = (newServer - mServer).signedValue();
2366    // TODO There is controversy about whether there can be "negative jitter" in server position.
2367    //      This should be investigated further, and if possible, it should be addressed.
2368    //      A more definite failure mode is infrequent polling by client.
2369    //      One could call (void)getPosition_l() in releaseBuffer(),
2370    //      so mReleased and mPosition are always lock-step as best possible.
2371    //      That should ensure delta never goes negative for infrequent polling
2372    //      unless the server has more than 2^31 frames in its buffer,
2373    //      in which case the use of uint32_t for these counters has bigger issues.
2374    ALOGE_IF(delta < 0,
2375            "detected illegal retrograde motion by the server: mServer advanced by %d",
2376            delta);
2377    mServer = newServer;
2378    if (delta > 0) { // avoid retrograde
2379        mPosition += delta;
2380    }
2381    return mPosition;
2382}
2383
2384bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2385{
2386    updateLatency_l();
2387    // applicable for mixing tracks only (not offloaded or direct)
2388    if (mStaticProxy != 0) {
2389        return true; // static tracks do not have issues with buffer sizing.
2390    }
2391    const size_t minFrameCount =
2392            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed
2393                /*, 0 mNotificationsPerBufferReq*/);
2394    const bool allowed = mFrameCount >= minFrameCount;
2395    ALOGD_IF(!allowed,
2396            "isSampleRateSpeedAllowed_l denied "
2397            "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2398            "mFrameCount:%zu < minFrameCount:%zu",
2399            mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2400            mFrameCount, minFrameCount);
2401    return allowed;
2402}
2403
2404status_t AudioTrack::setParameters(const String8& keyValuePairs)
2405{
2406    AutoMutex lock(mLock);
2407    return mAudioTrack->setParameters(keyValuePairs);
2408}
2409
2410VolumeShaper::Status AudioTrack::applyVolumeShaper(
2411        const sp<VolumeShaper::Configuration>& configuration,
2412        const sp<VolumeShaper::Operation>& operation)
2413{
2414    AutoMutex lock(mLock);
2415    mVolumeHandler->setIdIfNecessary(configuration);
2416    VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2417
2418    if (status == DEAD_OBJECT) {
2419        if (restoreTrack_l("applyVolumeShaper") == OK) {
2420            status = mAudioTrack->applyVolumeShaper(configuration, operation);
2421        }
2422    }
2423    if (status >= 0) {
2424        // save VolumeShaper for restore
2425        mVolumeHandler->applyVolumeShaper(configuration, operation);
2426        if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2427            mVolumeHandler->setStarted();
2428        }
2429    } else {
2430        // warn only if not an expected restore failure.
2431        ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2432                "applyVolumeShaper failed: %d", status);
2433    }
2434    return status;
2435}
2436
2437sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2438{
2439    AutoMutex lock(mLock);
2440    sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2441    if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2442        if (restoreTrack_l("getVolumeShaperState") == OK) {
2443            state = mAudioTrack->getVolumeShaperState(id);
2444        }
2445    }
2446    return state;
2447}
2448
2449status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2450{
2451    if (timestamp == nullptr) {
2452        return BAD_VALUE;
2453    }
2454    AutoMutex lock(mLock);
2455    return getTimestamp_l(timestamp);
2456}
2457
2458status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2459{
2460    if (mCblk->mFlags & CBLK_INVALID) {
2461        const status_t status = restoreTrack_l("getTimestampExtended");
2462        if (status != OK) {
2463            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2464            // recommending that the track be recreated.
2465            return DEAD_OBJECT;
2466        }
2467    }
2468    // check for offloaded/direct here in case restoring somehow changed those flags.
2469    if (isOffloadedOrDirect_l()) {
2470        return INVALID_OPERATION; // not supported
2471    }
2472    status_t status = mProxy->getTimestamp(timestamp);
2473    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2474    bool found = false;
2475    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2476    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2477    // server side frame offset in case AudioTrack has been restored.
2478    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2479            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2480        if (timestamp->mTimeNs[i] >= 0) {
2481            // apply server offset (frames flushed is ignored
2482            // so we don't report the jump when the flush occurs).
2483            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2484            found = true;
2485        }
2486    }
2487    return found ? OK : WOULD_BLOCK;
2488}
2489
2490status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2491{
2492    AutoMutex lock(mLock);
2493    return getTimestamp_l(timestamp);
2494}
2495
2496status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2497{
2498    bool previousTimestampValid = mPreviousTimestampValid;
2499    // Set false here to cover all the error return cases.
2500    mPreviousTimestampValid = false;
2501
2502    switch (mState) {
2503    case STATE_ACTIVE:
2504    case STATE_PAUSED:
2505        break; // handle below
2506    case STATE_FLUSHED:
2507    case STATE_STOPPED:
2508        return WOULD_BLOCK;
2509    case STATE_STOPPING:
2510    case STATE_PAUSED_STOPPING:
2511        if (!isOffloaded_l()) {
2512            return INVALID_OPERATION;
2513        }
2514        break; // offloaded tracks handled below
2515    default:
2516        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2517        break;
2518    }
2519
2520    if (mCblk->mFlags & CBLK_INVALID) {
2521        const status_t status = restoreTrack_l("getTimestamp");
2522        if (status != OK) {
2523            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2524            // recommending that the track be recreated.
2525            return DEAD_OBJECT;
2526        }
2527    }
2528
2529    // The presented frame count must always lag behind the consumed frame count.
2530    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2531
2532    status_t status;
2533    if (isOffloadedOrDirect_l()) {
2534        // use Binder to get timestamp
2535        status = mAudioTrack->getTimestamp(timestamp);
2536    } else {
2537        // read timestamp from shared memory
2538        ExtendedTimestamp ets;
2539        status = mProxy->getTimestamp(&ets);
2540        if (status == OK) {
2541            ExtendedTimestamp::Location location;
2542            status = ets.getBestTimestamp(&timestamp, &location);
2543
2544            if (status == OK) {
2545                updateLatency_l();
2546                // It is possible that the best location has moved from the kernel to the server.
2547                // In this case we adjust the position from the previous computed latency.
2548                if (location == ExtendedTimestamp::LOCATION_SERVER) {
2549                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2550                            "getTimestamp() location moved from kernel to server");
2551                    // check that the last kernel OK time info exists and the positions
2552                    // are valid (if they predate the current track, the positions may
2553                    // be zero or negative).
2554                    const int64_t frames =
2555                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2556                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2557                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2558                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2559                            ?
2560                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2561                                    / 1000)
2562                            :
2563                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2564                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2565                    ALOGV("frame adjustment:%lld  timestamp:%s",
2566                            (long long)frames, ets.toString().c_str());
2567                    if (frames >= ets.mPosition[location]) {
2568                        timestamp.mPosition = 0;
2569                    } else {
2570                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2571                    }
2572                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2573                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2574                            "getTimestamp() location moved from server to kernel");
2575                }
2576
2577                // We update the timestamp time even when paused.
2578                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2579                    const int64_t now = systemTime();
2580                    const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2581                    const int64_t lag =
2582                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2583                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2584                            ? int64_t(mAfLatency * 1000000LL)
2585                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2586                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2587                             * NANOS_PER_SECOND / mSampleRate;
2588                    const int64_t limit = now - lag; // no earlier than this limit
2589                    if (at < limit) {
2590                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2591                                (long long)lag, (long long)at, (long long)limit);
2592                        timestamp.mTime = convertNsToTimespec(limit);
2593                    }
2594                }
2595                mPreviousLocation = location;
2596            } else {
2597                // right after AudioTrack is started, one may not find a timestamp
2598                ALOGV("getBestTimestamp did not find timestamp");
2599            }
2600        }
2601        if (status == INVALID_OPERATION) {
2602            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2603            // other failures are signaled by a negative time.
2604            // If we come out of FLUSHED or STOPPED where the position is known
2605            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2606            // "zero" for NuPlayer).  We don't convert for track restoration as position
2607            // does not reset.
2608            ALOGV("timestamp server offset:%lld restore frames:%lld",
2609                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2610            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2611                status = WOULD_BLOCK;
2612            }
2613        }
2614    }
2615    if (status != NO_ERROR) {
2616        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2617        return status;
2618    }
2619    if (isOffloadedOrDirect_l()) {
2620        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2621            // use cached paused position in case another offloaded track is running.
2622            timestamp.mPosition = mPausedPosition;
2623            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2624            // TODO: adjust for delay
2625            return NO_ERROR;
2626        }
2627
2628        // Check whether a pending flush or stop has completed, as those commands may
2629        // be asynchronous or return near finish or exhibit glitchy behavior.
2630        //
2631        // Originally this showed up as the first timestamp being a continuation of
2632        // the previous song under gapless playback.
2633        // However, we sometimes see zero timestamps, then a glitch of
2634        // the previous song's position, and then correct timestamps afterwards.
2635        if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2636            static const int kTimeJitterUs = 100000; // 100 ms
2637            static const int k1SecUs = 1000000;
2638
2639            const int64_t timeNow = getNowUs();
2640
2641            if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2642                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2643                if (timestampTimeUs < mStartFromZeroUs) {
2644                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2645                }
2646                const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2647                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2648                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2649
2650                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2651                    // Verify that the counter can't count faster than the sample rate
2652                    // since the start time.  If greater, then that means we may have failed
2653                    // to completely flush or stop the previous playing track.
2654                    ALOGW_IF(!mTimestampStartupGlitchReported,
2655                            "getTimestamp startup glitch detected"
2656                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2657                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2658                            timestamp.mPosition);
2659                    mTimestampStartupGlitchReported = true;
2660                    if (previousTimestampValid
2661                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2662                        timestamp = mPreviousTimestamp;
2663                        mPreviousTimestampValid = true;
2664                        return NO_ERROR;
2665                    }
2666                    return WOULD_BLOCK;
2667                }
2668                if (deltaPositionByUs != 0) {
2669                    mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2670                }
2671            } else {
2672                mStartFromZeroUs = 0; // don't check again, start time expired.
2673            }
2674            mTimestampStartupGlitchReported = false;
2675        }
2676    } else {
2677        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2678        (void) updateAndGetPosition_l();
2679        // Server consumed (mServer) and presented both use the same server time base,
2680        // and server consumed is always >= presented.
2681        // The delta between these represents the number of frames in the buffer pipeline.
2682        // If this delta between these is greater than the client position, it means that
2683        // actually presented is still stuck at the starting line (figuratively speaking),
2684        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2685        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2686        // mPosition exceeds 32 bits.
2687        // TODO Remove when timestamp is updated to contain pipeline status info.
2688        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2689        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2690                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2691            return INVALID_OPERATION;
2692        }
2693        // Convert timestamp position from server time base to client time base.
2694        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2695        // But if we change it to 64-bit then this could fail.
2696        // Use Modulo computation here.
2697        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2698        // Immediately after a call to getPosition_l(), mPosition and
2699        // mServer both represent the same frame position.  mPosition is
2700        // in client's point of view, and mServer is in server's point of
2701        // view.  So the difference between them is the "fudge factor"
2702        // between client and server views due to stop() and/or new
2703        // IAudioTrack.  And timestamp.mPosition is initially in server's
2704        // point of view, so we need to apply the same fudge factor to it.
2705    }
2706
2707    // Prevent retrograde motion in timestamp.
2708    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2709    if (status == NO_ERROR) {
2710        // previousTimestampValid is set to false when starting after a stop or flush.
2711        if (previousTimestampValid) {
2712            const int64_t previousTimeNanos =
2713                    audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2714            int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2715
2716            // Fix stale time when checking timestamp right after start().
2717            //
2718            // For offload compatibility, use a default lag value here.
2719            // Any time discrepancy between this update and the pause timestamp is handled
2720            // by the retrograde check afterwards.
2721            const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2722            const int64_t limitNs = mStartNs - lagNs;
2723            if (currentTimeNanos < limitNs) {
2724                ALOGD("correcting timestamp time for pause, "
2725                        "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2726                        (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2727                timestamp.mTime = convertNsToTimespec(limitNs);
2728                currentTimeNanos = limitNs;
2729            }
2730
2731            // retrograde check
2732            if (currentTimeNanos < previousTimeNanos) {
2733                ALOGW("retrograde timestamp time corrected, %lld < %lld",
2734                        (long long)currentTimeNanos, (long long)previousTimeNanos);
2735                timestamp.mTime = mPreviousTimestamp.mTime;
2736                // currentTimeNanos not used below.
2737            }
2738
2739            // Looking at signed delta will work even when the timestamps
2740            // are wrapping around.
2741            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2742                    - mPreviousTimestamp.mPosition).signedValue();
2743            if (deltaPosition < 0) {
2744                // Only report once per position instead of spamming the log.
2745                if (!mRetrogradeMotionReported) {
2746                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2747                            deltaPosition,
2748                            timestamp.mPosition,
2749                            mPreviousTimestamp.mPosition);
2750                    mRetrogradeMotionReported = true;
2751                }
2752            } else {
2753                mRetrogradeMotionReported = false;
2754            }
2755            if (deltaPosition < 0) {
2756                timestamp.mPosition = mPreviousTimestamp.mPosition;
2757                deltaPosition = 0;
2758            }
2759#if 0
2760            // Uncomment this to verify audio timestamp rate.
2761            const int64_t deltaTime =
2762                    audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2763            if (deltaTime != 0) {
2764                const int64_t computedSampleRate =
2765                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2766                ALOGD("computedSampleRate:%u  sampleRate:%u",
2767                        (unsigned)computedSampleRate, mSampleRate);
2768            }
2769#endif
2770        }
2771        mPreviousTimestamp = timestamp;
2772        mPreviousTimestampValid = true;
2773    }
2774
2775    return status;
2776}
2777
2778String8 AudioTrack::getParameters(const String8& keys)
2779{
2780    audio_io_handle_t output = getOutput();
2781    if (output != AUDIO_IO_HANDLE_NONE) {
2782        return AudioSystem::getParameters(output, keys);
2783    } else {
2784        return String8::empty();
2785    }
2786}
2787
2788bool AudioTrack::isOffloaded() const
2789{
2790    AutoMutex lock(mLock);
2791    return isOffloaded_l();
2792}
2793
2794bool AudioTrack::isDirect() const
2795{
2796    AutoMutex lock(mLock);
2797    return isDirect_l();
2798}
2799
2800bool AudioTrack::isOffloadedOrDirect() const
2801{
2802    AutoMutex lock(mLock);
2803    return isOffloadedOrDirect_l();
2804}
2805
2806
2807status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2808{
2809
2810    const size_t SIZE = 256;
2811    char buffer[SIZE];
2812    String8 result;
2813
2814    result.append(" AudioTrack::dump\n");
2815    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2816            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2817    result.append(buffer);
2818    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2819            mChannelCount, mFrameCount);
2820    result.append(buffer);
2821    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2822            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2823    result.append(buffer);
2824    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2825    result.append(buffer);
2826    ::write(fd, result.string(), result.size());
2827    return NO_ERROR;
2828}
2829
2830uint32_t AudioTrack::getUnderrunCount() const
2831{
2832    AutoMutex lock(mLock);
2833    return getUnderrunCount_l();
2834}
2835
2836uint32_t AudioTrack::getUnderrunCount_l() const
2837{
2838    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2839}
2840
2841uint32_t AudioTrack::getUnderrunFrames() const
2842{
2843    AutoMutex lock(mLock);
2844    return mProxy->getUnderrunFrames();
2845}
2846
2847status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2848{
2849    if (callback == 0) {
2850        ALOGW("%s adding NULL callback!", __FUNCTION__);
2851        return BAD_VALUE;
2852    }
2853    AutoMutex lock(mLock);
2854    if (mDeviceCallback == callback) {
2855        ALOGW("%s adding same callback!", __FUNCTION__);
2856        return INVALID_OPERATION;
2857    }
2858    status_t status = NO_ERROR;
2859    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2860        if (mDeviceCallback != 0) {
2861            ALOGW("%s callback already present!", __FUNCTION__);
2862            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2863        }
2864        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2865    }
2866    mDeviceCallback = callback;
2867    return status;
2868}
2869
2870status_t AudioTrack::removeAudioDeviceCallback(
2871        const sp<AudioSystem::AudioDeviceCallback>& callback)
2872{
2873    if (callback == 0) {
2874        ALOGW("%s removing NULL callback!", __FUNCTION__);
2875        return BAD_VALUE;
2876    }
2877    AutoMutex lock(mLock);
2878    if (mDeviceCallback != callback) {
2879        ALOGW("%s removing different callback!", __FUNCTION__);
2880        return INVALID_OPERATION;
2881    }
2882    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2883        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2884    }
2885    mDeviceCallback = 0;
2886    return NO_ERROR;
2887}
2888
2889status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2890{
2891    if (msec == nullptr ||
2892            (location != ExtendedTimestamp::LOCATION_SERVER
2893                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2894        return BAD_VALUE;
2895    }
2896    AutoMutex lock(mLock);
2897    // inclusive of offloaded and direct tracks.
2898    //
2899    // It is possible, but not enabled, to allow duration computation for non-pcm
2900    // audio_has_proportional_frames() formats because currently they have
2901    // the drain rate equivalent to the pcm sample rate * framesize.
2902    if (!isPurePcmData_l()) {
2903        return INVALID_OPERATION;
2904    }
2905    ExtendedTimestamp ets;
2906    if (getTimestamp_l(&ets) == OK
2907            && ets.mTimeNs[location] > 0) {
2908        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2909                - ets.mPosition[location];
2910        if (diff < 0) {
2911            *msec = 0;
2912        } else {
2913            // ms is the playback time by frames
2914            int64_t ms = (int64_t)((double)diff * 1000 /
2915                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2916            // clockdiff is the timestamp age (negative)
2917            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2918                    ets.mTimeNs[location]
2919                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2920                    - systemTime(SYSTEM_TIME_MONOTONIC);
2921
2922            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2923            static const int NANOS_PER_MILLIS = 1000000;
2924            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2925        }
2926        return NO_ERROR;
2927    }
2928    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2929        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2930    }
2931    // use server position directly (offloaded and direct arrive here)
2932    updateAndGetPosition_l();
2933    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2934    *msec = (diff <= 0) ? 0
2935            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2936    return NO_ERROR;
2937}
2938
2939bool AudioTrack::hasStarted()
2940{
2941    AutoMutex lock(mLock);
2942    switch (mState) {
2943    case STATE_STOPPED:
2944        if (isOffloadedOrDirect_l()) {
2945            // check if we have started in the past to return true.
2946            return mStartFromZeroUs > 0;
2947        }
2948        // A normal audio track may still be draining, so
2949        // check if stream has ended.  This covers fasttrack position
2950        // instability and start/stop without any data written.
2951        if (mProxy->getStreamEndDone()) {
2952            return true;
2953        }
2954        // fall through
2955    case STATE_ACTIVE:
2956    case STATE_STOPPING:
2957        break;
2958    case STATE_PAUSED:
2959    case STATE_PAUSED_STOPPING:
2960    case STATE_FLUSHED:
2961        return false;  // we're not active
2962    default:
2963        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2964        break;
2965    }
2966
2967    // wait indicates whether we need to wait for a timestamp.
2968    // This is conservatively figured - if we encounter an unexpected error
2969    // then we will not wait.
2970    bool wait = false;
2971    if (isOffloadedOrDirect_l()) {
2972        AudioTimestamp ts;
2973        status_t status = getTimestamp_l(ts);
2974        if (status == WOULD_BLOCK) {
2975            wait = true;
2976        } else if (status == OK) {
2977            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2978        }
2979        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
2980                (int)wait,
2981                ts.mPosition,
2982                (long long)mStartTs.mPosition);
2983    } else {
2984        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2985        ExtendedTimestamp ets;
2986        status_t status = getTimestamp_l(&ets);
2987        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
2988            wait = true;
2989        } else if (status == OK) {
2990            for (location = ExtendedTimestamp::LOCATION_KERNEL;
2991                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2992                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2993                    continue;
2994                }
2995                wait = ets.mPosition[location] == 0
2996                        || ets.mPosition[location] == mStartEts.mPosition[location];
2997                break;
2998            }
2999        }
3000        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
3001                (int)wait,
3002                (long long)ets.mPosition[location],
3003                (long long)mStartEts.mPosition[location]);
3004    }
3005    return !wait;
3006}
3007
3008// =========================================================================
3009
3010void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
3011{
3012    sp<AudioTrack> audioTrack = mAudioTrack.promote();
3013    if (audioTrack != 0) {
3014        AutoMutex lock(audioTrack->mLock);
3015        audioTrack->mProxy->binderDied();
3016    }
3017}
3018
3019// =========================================================================
3020
3021AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
3022    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
3023      mIgnoreNextPausedInt(false)
3024{
3025}
3026
3027AudioTrack::AudioTrackThread::~AudioTrackThread()
3028{
3029}
3030
3031bool AudioTrack::AudioTrackThread::threadLoop()
3032{
3033    {
3034        AutoMutex _l(mMyLock);
3035        if (mPaused) {
3036            // TODO check return value and handle or log
3037            mMyCond.wait(mMyLock);
3038            // caller will check for exitPending()
3039            return true;
3040        }
3041        if (mIgnoreNextPausedInt) {
3042            mIgnoreNextPausedInt = false;
3043            mPausedInt = false;
3044        }
3045        if (mPausedInt) {
3046            // TODO use futex instead of condition, for event flag "or"
3047            if (mPausedNs > 0) {
3048                // TODO check return value and handle or log
3049                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3050            } else {
3051                // TODO check return value and handle or log
3052                mMyCond.wait(mMyLock);
3053            }
3054            mPausedInt = false;
3055            return true;
3056        }
3057    }
3058    if (exitPending()) {
3059        return false;
3060    }
3061    nsecs_t ns = mReceiver.processAudioBuffer();
3062    switch (ns) {
3063    case 0:
3064        return true;
3065    case NS_INACTIVE:
3066        pauseInternal();
3067        return true;
3068    case NS_NEVER:
3069        return false;
3070    case NS_WHENEVER:
3071        // Event driven: call wake() when callback notifications conditions change.
3072        ns = INT64_MAX;
3073        // fall through
3074    default:
3075        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3076        pauseInternal(ns);
3077        return true;
3078    }
3079}
3080
3081void AudioTrack::AudioTrackThread::requestExit()
3082{
3083    // must be in this order to avoid a race condition
3084    Thread::requestExit();
3085    resume();
3086}
3087
3088void AudioTrack::AudioTrackThread::pause()
3089{
3090    AutoMutex _l(mMyLock);
3091    mPaused = true;
3092}
3093
3094void AudioTrack::AudioTrackThread::resume()
3095{
3096    AutoMutex _l(mMyLock);
3097    mIgnoreNextPausedInt = true;
3098    if (mPaused || mPausedInt) {
3099        mPaused = false;
3100        mPausedInt = false;
3101        mMyCond.signal();
3102    }
3103}
3104
3105void AudioTrack::AudioTrackThread::wake()
3106{
3107    AutoMutex _l(mMyLock);
3108    if (!mPaused) {
3109        // wake() might be called while servicing a callback - ignore the next
3110        // pause time and call processAudioBuffer.
3111        mIgnoreNextPausedInt = true;
3112        if (mPausedInt && mPausedNs > 0) {
3113            // audio track is active and internally paused with timeout.
3114            mPausedInt = false;
3115            mMyCond.signal();
3116        }
3117    }
3118}
3119
3120void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3121{
3122    AutoMutex _l(mMyLock);
3123    mPausedInt = true;
3124    mPausedNs = ns;
3125}
3126
3127} // namespace android
3128