AudioTrack.cpp revision ea2b9c07b34079f0dbd8610a511e006e69a15adc
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41// TODO: Move to a separate .h
42
43template <typename T>
44static inline const T &min(const T &x, const T &y) {
45    return x < y ? x : y;
46}
47
48template <typename T>
49static inline const T &max(const T &x, const T &y) {
50    return x > y ? x : y;
51}
52
53static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
54{
55    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
56}
57
58static int64_t convertTimespecToUs(const struct timespec &tv)
59{
60    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
61}
62
63// current monotonic time in microseconds.
64static int64_t getNowUs()
65{
66    struct timespec tv;
67    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
68    return convertTimespecToUs(tv);
69}
70
71// FIXME: we don't use the pitch setting in the time stretcher (not working);
72// instead we emulate it using our sample rate converter.
73static const bool kFixPitch = true; // enable pitch fix
74static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
75{
76    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
77}
78
79static inline float adjustSpeed(float speed, float pitch)
80{
81    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
82}
83
84static inline float adjustPitch(float pitch)
85{
86    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
87}
88
89// Must match similar computation in createTrack_l in Threads.cpp.
90// TODO: Move to a common library
91static size_t calculateMinFrameCount(
92        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
93        uint32_t sampleRate, float speed)
94{
95    // Ensure that buffer depth covers at least audio hardware latency
96    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
97    if (minBufCount < 2) {
98        minBufCount = 2;
99    }
100    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
101            "sampleRate %u  speed %f  minBufCount: %u",
102            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
103    return minBufCount * sourceFramesNeededWithTimestretch(
104            sampleRate, afFrameCount, afSampleRate, speed);
105}
106
107// static
108status_t AudioTrack::getMinFrameCount(
109        size_t* frameCount,
110        audio_stream_type_t streamType,
111        uint32_t sampleRate)
112{
113    if (frameCount == NULL) {
114        return BAD_VALUE;
115    }
116
117    // FIXME handle in server, like createTrack_l(), possible missing info:
118    //          audio_io_handle_t output
119    //          audio_format_t format
120    //          audio_channel_mask_t channelMask
121    //          audio_output_flags_t flags (FAST)
122    uint32_t afSampleRate;
123    status_t status;
124    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
125    if (status != NO_ERROR) {
126        ALOGE("Unable to query output sample rate for stream type %d; status %d",
127                streamType, status);
128        return status;
129    }
130    size_t afFrameCount;
131    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
132    if (status != NO_ERROR) {
133        ALOGE("Unable to query output frame count for stream type %d; status %d",
134                streamType, status);
135        return status;
136    }
137    uint32_t afLatency;
138    status = AudioSystem::getOutputLatency(&afLatency, streamType);
139    if (status != NO_ERROR) {
140        ALOGE("Unable to query output latency for stream type %d; status %d",
141                streamType, status);
142        return status;
143    }
144
145    // When called from createTrack, speed is 1.0f (normal speed).
146    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
147    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
148
149    // The formula above should always produce a non-zero value under normal circumstances:
150    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151    // Return error in the unlikely event that it does not, as that's part of the API contract.
152    if (*frameCount == 0) {
153        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
154                streamType, sampleRate);
155        return BAD_VALUE;
156    }
157    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158            *frameCount, afFrameCount, afSampleRate, afLatency);
159    return NO_ERROR;
160}
161
162// ---------------------------------------------------------------------------
163
164AudioTrack::AudioTrack()
165    : mStatus(NO_INIT),
166      mState(STATE_STOPPED),
167      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
168      mPreviousSchedulingGroup(SP_DEFAULT),
169      mPausedPosition(0),
170      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
171{
172    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
173    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
174    mAttributes.flags = 0x0;
175    strcpy(mAttributes.tags, "");
176}
177
178AudioTrack::AudioTrack(
179        audio_stream_type_t streamType,
180        uint32_t sampleRate,
181        audio_format_t format,
182        audio_channel_mask_t channelMask,
183        size_t frameCount,
184        audio_output_flags_t flags,
185        callback_t cbf,
186        void* user,
187        uint32_t notificationFrames,
188        audio_session_t sessionId,
189        transfer_type transferType,
190        const audio_offload_info_t *offloadInfo,
191        int uid,
192        pid_t pid,
193        const audio_attributes_t* pAttributes,
194        bool doNotReconnect)
195    : mStatus(NO_INIT),
196      mState(STATE_STOPPED),
197      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
198      mPreviousSchedulingGroup(SP_DEFAULT),
199      mPausedPosition(0),
200      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
201{
202    mStatus = set(streamType, sampleRate, format, channelMask,
203            frameCount, flags, cbf, user, notificationFrames,
204            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
205            offloadInfo, uid, pid, pAttributes, doNotReconnect);
206}
207
208AudioTrack::AudioTrack(
209        audio_stream_type_t streamType,
210        uint32_t sampleRate,
211        audio_format_t format,
212        audio_channel_mask_t channelMask,
213        const sp<IMemory>& sharedBuffer,
214        audio_output_flags_t flags,
215        callback_t cbf,
216        void* user,
217        uint32_t notificationFrames,
218        audio_session_t sessionId,
219        transfer_type transferType,
220        const audio_offload_info_t *offloadInfo,
221        int uid,
222        pid_t pid,
223        const audio_attributes_t* pAttributes,
224        bool doNotReconnect)
225    : mStatus(NO_INIT),
226      mState(STATE_STOPPED),
227      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
228      mPreviousSchedulingGroup(SP_DEFAULT),
229      mPausedPosition(0),
230      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
231{
232    mStatus = set(streamType, sampleRate, format, channelMask,
233            0 /*frameCount*/, flags, cbf, user, notificationFrames,
234            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
235            uid, pid, pAttributes, doNotReconnect);
236}
237
238AudioTrack::~AudioTrack()
239{
240    if (mStatus == NO_ERROR) {
241        // Make sure that callback function exits in the case where
242        // it is looping on buffer full condition in obtainBuffer().
243        // Otherwise the callback thread will never exit.
244        stop();
245        if (mAudioTrackThread != 0) {
246            mProxy->interrupt();
247            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
248            mAudioTrackThread->requestExitAndWait();
249            mAudioTrackThread.clear();
250        }
251        // No lock here: worst case we remove a NULL callback which will be a nop
252        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
253            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
254        }
255        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
256        mAudioTrack.clear();
257        mCblkMemory.clear();
258        mSharedBuffer.clear();
259        IPCThreadState::self()->flushCommands();
260        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
261                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
262        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
263    }
264}
265
266status_t AudioTrack::set(
267        audio_stream_type_t streamType,
268        uint32_t sampleRate,
269        audio_format_t format,
270        audio_channel_mask_t channelMask,
271        size_t frameCount,
272        audio_output_flags_t flags,
273        callback_t cbf,
274        void* user,
275        uint32_t notificationFrames,
276        const sp<IMemory>& sharedBuffer,
277        bool threadCanCallJava,
278        audio_session_t sessionId,
279        transfer_type transferType,
280        const audio_offload_info_t *offloadInfo,
281        int uid,
282        pid_t pid,
283        const audio_attributes_t* pAttributes,
284        bool doNotReconnect)
285{
286    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
287          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
288          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
289          sessionId, transferType, uid, pid);
290
291    mThreadCanCallJava = threadCanCallJava;
292
293    switch (transferType) {
294    case TRANSFER_DEFAULT:
295        if (sharedBuffer != 0) {
296            transferType = TRANSFER_SHARED;
297        } else if (cbf == NULL || threadCanCallJava) {
298            transferType = TRANSFER_SYNC;
299        } else {
300            transferType = TRANSFER_CALLBACK;
301        }
302        break;
303    case TRANSFER_CALLBACK:
304        if (cbf == NULL || sharedBuffer != 0) {
305            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
306            return BAD_VALUE;
307        }
308        break;
309    case TRANSFER_OBTAIN:
310    case TRANSFER_SYNC:
311        if (sharedBuffer != 0) {
312            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
313            return BAD_VALUE;
314        }
315        break;
316    case TRANSFER_SHARED:
317        if (sharedBuffer == 0) {
318            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
319            return BAD_VALUE;
320        }
321        break;
322    default:
323        ALOGE("Invalid transfer type %d", transferType);
324        return BAD_VALUE;
325    }
326    mSharedBuffer = sharedBuffer;
327    mTransfer = transferType;
328    mDoNotReconnect = doNotReconnect;
329
330    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
331            sharedBuffer->size());
332
333    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
334
335    // invariant that mAudioTrack != 0 is true only after set() returns successfully
336    if (mAudioTrack != 0) {
337        ALOGE("Track already in use");
338        return INVALID_OPERATION;
339    }
340
341    // handle default values first.
342    if (streamType == AUDIO_STREAM_DEFAULT) {
343        streamType = AUDIO_STREAM_MUSIC;
344    }
345    if (pAttributes == NULL) {
346        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
347            ALOGE("Invalid stream type %d", streamType);
348            return BAD_VALUE;
349        }
350        mStreamType = streamType;
351
352    } else {
353        // stream type shouldn't be looked at, this track has audio attributes
354        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
355        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
356                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
357        mStreamType = AUDIO_STREAM_DEFAULT;
358        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
359            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
360        }
361        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
362            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
363        }
364    }
365
366    // these below should probably come from the audioFlinger too...
367    if (format == AUDIO_FORMAT_DEFAULT) {
368        format = AUDIO_FORMAT_PCM_16_BIT;
369    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
370        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
371    }
372
373    // validate parameters
374    if (!audio_is_valid_format(format)) {
375        ALOGE("Invalid format %#x", format);
376        return BAD_VALUE;
377    }
378    mFormat = format;
379
380    if (!audio_is_output_channel(channelMask)) {
381        ALOGE("Invalid channel mask %#x", channelMask);
382        return BAD_VALUE;
383    }
384    mChannelMask = channelMask;
385    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
386    mChannelCount = channelCount;
387
388    // force direct flag if format is not linear PCM
389    // or offload was requested
390    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
391            || !audio_is_linear_pcm(format)) {
392        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
393                    ? "Offload request, forcing to Direct Output"
394                    : "Not linear PCM, forcing to Direct Output");
395        flags = (audio_output_flags_t)
396                // FIXME why can't we allow direct AND fast?
397                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
398    }
399
400    // force direct flag if HW A/V sync requested
401    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
402        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
403    }
404
405    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
406        if (audio_has_proportional_frames(format)) {
407            mFrameSize = channelCount * audio_bytes_per_sample(format);
408        } else {
409            mFrameSize = sizeof(uint8_t);
410        }
411    } else {
412        ALOG_ASSERT(audio_has_proportional_frames(format));
413        mFrameSize = channelCount * audio_bytes_per_sample(format);
414        // createTrack will return an error if PCM format is not supported by server,
415        // so no need to check for specific PCM formats here
416    }
417
418    // sampling rate must be specified for direct outputs
419    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
420        return BAD_VALUE;
421    }
422    mSampleRate = sampleRate;
423    mOriginalSampleRate = sampleRate;
424    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
425
426    // Make copy of input parameter offloadInfo so that in the future:
427    //  (a) createTrack_l doesn't need it as an input parameter
428    //  (b) we can support re-creation of offloaded tracks
429    if (offloadInfo != NULL) {
430        mOffloadInfoCopy = *offloadInfo;
431        mOffloadInfo = &mOffloadInfoCopy;
432    } else {
433        mOffloadInfo = NULL;
434    }
435
436    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
437    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
438    mSendLevel = 0.0f;
439    // mFrameCount is initialized in createTrack_l
440    mReqFrameCount = frameCount;
441    mNotificationFramesReq = notificationFrames;
442    mNotificationFramesAct = 0;
443    if (sessionId == AUDIO_SESSION_ALLOCATE) {
444        mSessionId = (audio_session_t) AudioSystem::newAudioUniqueId(AUDIO_UNIQUE_ID_USE_SESSION);
445    } else {
446        mSessionId = sessionId;
447    }
448    int callingpid = IPCThreadState::self()->getCallingPid();
449    int mypid = getpid();
450    if (uid == -1 || (callingpid != mypid)) {
451        mClientUid = IPCThreadState::self()->getCallingUid();
452    } else {
453        mClientUid = uid;
454    }
455    if (pid == -1 || (callingpid != mypid)) {
456        mClientPid = callingpid;
457    } else {
458        mClientPid = pid;
459    }
460    mAuxEffectId = 0;
461    mOrigFlags = mFlags = flags;
462    mCbf = cbf;
463
464    if (cbf != NULL) {
465        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
466        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
467        // thread begins in paused state, and will not reference us until start()
468    }
469
470    // create the IAudioTrack
471    status_t status = createTrack_l();
472
473    if (status != NO_ERROR) {
474        if (mAudioTrackThread != 0) {
475            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
476            mAudioTrackThread->requestExitAndWait();
477            mAudioTrackThread.clear();
478        }
479        return status;
480    }
481
482    mStatus = NO_ERROR;
483    mUserData = user;
484    mLoopCount = 0;
485    mLoopStart = 0;
486    mLoopEnd = 0;
487    mLoopCountNotified = 0;
488    mMarkerPosition = 0;
489    mMarkerReached = false;
490    mNewPosition = 0;
491    mUpdatePeriod = 0;
492    mPosition = 0;
493    mReleased = 0;
494    mStartUs = 0;
495    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
496    mSequence = 1;
497    mObservedSequence = mSequence;
498    mInUnderrun = false;
499    mPreviousTimestampValid = false;
500    mTimestampStartupGlitchReported = false;
501    mRetrogradeMotionReported = false;
502    mUnderrunCountOffset = 0;
503    mFramesWritten = 0;
504    mFramesWrittenServerOffset = 0;
505
506    return NO_ERROR;
507}
508
509// -------------------------------------------------------------------------
510
511status_t AudioTrack::start()
512{
513    AutoMutex lock(mLock);
514
515    if (mState == STATE_ACTIVE) {
516        return INVALID_OPERATION;
517    }
518
519    mInUnderrun = true;
520
521    State previousState = mState;
522    if (previousState == STATE_PAUSED_STOPPING) {
523        mState = STATE_STOPPING;
524    } else {
525        mState = STATE_ACTIVE;
526    }
527    (void) updateAndGetPosition_l();
528    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
529        // reset current position as seen by client to 0
530        mPosition = 0;
531        mPreviousTimestampValid = false;
532        mTimestampStartupGlitchReported = false;
533        mRetrogradeMotionReported = false;
534
535        // If previousState == STATE_STOPPED, we clear the timestamp so that it
536        // needs a new server push. We also reactivate markers (mMarkerPosition != 0)
537        // as the position is reset to 0. This is legacy behavior. This is not done
538        // in stop() to avoid a race condition where the last marker event is issued twice.
539        // Note: the if is technically unnecessary because previousState == STATE_FLUSHED
540        // is only for streaming tracks, and mMarkerReached is already set to false.
541        if (previousState == STATE_STOPPED) {
542            // read last server side position change via timestamp
543            ExtendedTimestamp ets;
544            if (mProxy->getTimestamp(&ets) == OK &&
545                    ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
546                mFramesWrittenServerOffset = -(ets.mPosition[ExtendedTimestamp::LOCATION_SERVER]
547                                                             + ets.mFlushed);
548            }
549            mFramesWritten = 0;
550            mProxy->clearTimestamp(); // need new server push for valid timestamp
551            mMarkerReached = false;
552        }
553
554        // For offloaded tracks, we don't know if the hardware counters are really zero here,
555        // since the flush is asynchronous and stop may not fully drain.
556        // We save the time when the track is started to later verify whether
557        // the counters are realistic (i.e. start from zero after this time).
558        mStartUs = getNowUs();
559
560        // force refresh of remaining frames by processAudioBuffer() as last
561        // write before stop could be partial.
562        mRefreshRemaining = true;
563    }
564    mNewPosition = mPosition + mUpdatePeriod;
565    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
566
567    status_t status = NO_ERROR;
568    if (!(flags & CBLK_INVALID)) {
569        status = mAudioTrack->start();
570        if (status == DEAD_OBJECT) {
571            flags |= CBLK_INVALID;
572        }
573    }
574    if (flags & CBLK_INVALID) {
575        status = restoreTrack_l("start");
576    }
577
578    // resume or pause the callback thread as needed.
579    sp<AudioTrackThread> t = mAudioTrackThread;
580    if (status == NO_ERROR) {
581        if (t != 0) {
582            if (previousState == STATE_STOPPING) {
583                mProxy->interrupt();
584            } else {
585                t->resume();
586            }
587        } else {
588            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
589            get_sched_policy(0, &mPreviousSchedulingGroup);
590            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
591        }
592    } else {
593        ALOGE("start() status %d", status);
594        mState = previousState;
595        if (t != 0) {
596            if (previousState != STATE_STOPPING) {
597                t->pause();
598            }
599        } else {
600            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
601            set_sched_policy(0, mPreviousSchedulingGroup);
602        }
603    }
604
605    return status;
606}
607
608void AudioTrack::stop()
609{
610    AutoMutex lock(mLock);
611    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
612        return;
613    }
614
615    if (isOffloaded_l()) {
616        mState = STATE_STOPPING;
617    } else {
618        mState = STATE_STOPPED;
619        mReleased = 0;
620    }
621
622    mProxy->interrupt();
623    mAudioTrack->stop();
624
625    // Note: legacy handling - stop does not clear playback marker
626    // and periodic update counter, but flush does for streaming tracks.
627
628    if (mSharedBuffer != 0) {
629        // clear buffer position and loop count.
630        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
631                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
632    }
633
634    sp<AudioTrackThread> t = mAudioTrackThread;
635    if (t != 0) {
636        if (!isOffloaded_l()) {
637            t->pause();
638        }
639    } else {
640        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
641        set_sched_policy(0, mPreviousSchedulingGroup);
642    }
643}
644
645bool AudioTrack::stopped() const
646{
647    AutoMutex lock(mLock);
648    return mState != STATE_ACTIVE;
649}
650
651void AudioTrack::flush()
652{
653    if (mSharedBuffer != 0) {
654        return;
655    }
656    AutoMutex lock(mLock);
657    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
658        return;
659    }
660    flush_l();
661}
662
663void AudioTrack::flush_l()
664{
665    ALOG_ASSERT(mState != STATE_ACTIVE);
666
667    // clear playback marker and periodic update counter
668    mMarkerPosition = 0;
669    mMarkerReached = false;
670    mUpdatePeriod = 0;
671    mRefreshRemaining = true;
672
673    mState = STATE_FLUSHED;
674    mReleased = 0;
675    if (isOffloaded_l()) {
676        mProxy->interrupt();
677    }
678    mProxy->flush();
679    mAudioTrack->flush();
680}
681
682void AudioTrack::pause()
683{
684    AutoMutex lock(mLock);
685    if (mState == STATE_ACTIVE) {
686        mState = STATE_PAUSED;
687    } else if (mState == STATE_STOPPING) {
688        mState = STATE_PAUSED_STOPPING;
689    } else {
690        return;
691    }
692    mProxy->interrupt();
693    mAudioTrack->pause();
694
695    if (isOffloaded_l()) {
696        if (mOutput != AUDIO_IO_HANDLE_NONE) {
697            // An offload output can be re-used between two audio tracks having
698            // the same configuration. A timestamp query for a paused track
699            // while the other is running would return an incorrect time.
700            // To fix this, cache the playback position on a pause() and return
701            // this time when requested until the track is resumed.
702
703            // OffloadThread sends HAL pause in its threadLoop. Time saved
704            // here can be slightly off.
705
706            // TODO: check return code for getRenderPosition.
707
708            uint32_t halFrames;
709            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
710            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
711        }
712    }
713}
714
715status_t AudioTrack::setVolume(float left, float right)
716{
717    // This duplicates a test by AudioTrack JNI, but that is not the only caller
718    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
719            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
720        return BAD_VALUE;
721    }
722
723    AutoMutex lock(mLock);
724    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
725    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
726
727    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
728
729    if (isOffloaded_l()) {
730        mAudioTrack->signal();
731    }
732    return NO_ERROR;
733}
734
735status_t AudioTrack::setVolume(float volume)
736{
737    return setVolume(volume, volume);
738}
739
740status_t AudioTrack::setAuxEffectSendLevel(float level)
741{
742    // This duplicates a test by AudioTrack JNI, but that is not the only caller
743    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
744        return BAD_VALUE;
745    }
746
747    AutoMutex lock(mLock);
748    mSendLevel = level;
749    mProxy->setSendLevel(level);
750
751    return NO_ERROR;
752}
753
754void AudioTrack::getAuxEffectSendLevel(float* level) const
755{
756    if (level != NULL) {
757        *level = mSendLevel;
758    }
759}
760
761status_t AudioTrack::setSampleRate(uint32_t rate)
762{
763    AutoMutex lock(mLock);
764    if (rate == mSampleRate) {
765        return NO_ERROR;
766    }
767    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
768        return INVALID_OPERATION;
769    }
770    if (mOutput == AUDIO_IO_HANDLE_NONE) {
771        return NO_INIT;
772    }
773    // NOTE: it is theoretically possible, but highly unlikely, that a device change
774    // could mean a previously allowed sampling rate is no longer allowed.
775    uint32_t afSamplingRate;
776    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
777        return NO_INIT;
778    }
779    // pitch is emulated by adjusting speed and sampleRate
780    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
781    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
782        return BAD_VALUE;
783    }
784    // TODO: Should we also check if the buffer size is compatible?
785
786    mSampleRate = rate;
787    mProxy->setSampleRate(effectiveSampleRate);
788
789    return NO_ERROR;
790}
791
792uint32_t AudioTrack::getSampleRate() const
793{
794    AutoMutex lock(mLock);
795
796    // sample rate can be updated during playback by the offloaded decoder so we need to
797    // query the HAL and update if needed.
798// FIXME use Proxy return channel to update the rate from server and avoid polling here
799    if (isOffloadedOrDirect_l()) {
800        if (mOutput != AUDIO_IO_HANDLE_NONE) {
801            uint32_t sampleRate = 0;
802            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
803            if (status == NO_ERROR) {
804                mSampleRate = sampleRate;
805            }
806        }
807    }
808    return mSampleRate;
809}
810
811uint32_t AudioTrack::getOriginalSampleRate() const
812{
813    return mOriginalSampleRate;
814}
815
816status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
817{
818    AutoMutex lock(mLock);
819    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
820        return NO_ERROR;
821    }
822    if (isOffloadedOrDirect_l()) {
823        return INVALID_OPERATION;
824    }
825    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
826        return INVALID_OPERATION;
827    }
828    // pitch is emulated by adjusting speed and sampleRate
829    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
830    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
831    const float effectivePitch = adjustPitch(playbackRate.mPitch);
832    AudioPlaybackRate playbackRateTemp = playbackRate;
833    playbackRateTemp.mSpeed = effectiveSpeed;
834    playbackRateTemp.mPitch = effectivePitch;
835
836    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
837        return BAD_VALUE;
838    }
839    // Check if the buffer size is compatible.
840    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
841        ALOGV("setPlaybackRate(%f, %f) failed", playbackRate.mSpeed, playbackRate.mPitch);
842        return BAD_VALUE;
843    }
844
845    // Check resampler ratios are within bounds
846    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
847        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
848                playbackRate.mSpeed, playbackRate.mPitch);
849        return BAD_VALUE;
850    }
851
852    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
853        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
854                        playbackRate.mSpeed, playbackRate.mPitch);
855        return BAD_VALUE;
856    }
857    mPlaybackRate = playbackRate;
858    //set effective rates
859    mProxy->setPlaybackRate(playbackRateTemp);
860    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
861    return NO_ERROR;
862}
863
864const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
865{
866    AutoMutex lock(mLock);
867    return mPlaybackRate;
868}
869
870ssize_t AudioTrack::getBufferSizeInFrames()
871{
872    AutoMutex lock(mLock);
873    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
874        return NO_INIT;
875    }
876    return (ssize_t) mProxy->getBufferSizeInFrames();
877}
878
879ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
880{
881    AutoMutex lock(mLock);
882    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
883        return NO_INIT;
884    }
885    // Reject if timed track or compressed audio.
886    if (!audio_is_linear_pcm(mFormat)) {
887        return INVALID_OPERATION;
888    }
889    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
890}
891
892status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
893{
894    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
895        return INVALID_OPERATION;
896    }
897
898    if (loopCount == 0) {
899        ;
900    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
901            loopEnd - loopStart >= MIN_LOOP) {
902        ;
903    } else {
904        return BAD_VALUE;
905    }
906
907    AutoMutex lock(mLock);
908    // See setPosition() regarding setting parameters such as loop points or position while active
909    if (mState == STATE_ACTIVE) {
910        return INVALID_OPERATION;
911    }
912    setLoop_l(loopStart, loopEnd, loopCount);
913    return NO_ERROR;
914}
915
916void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
917{
918    // We do not update the periodic notification point.
919    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
920    mLoopCount = loopCount;
921    mLoopEnd = loopEnd;
922    mLoopStart = loopStart;
923    mLoopCountNotified = loopCount;
924    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
925
926    // Waking the AudioTrackThread is not needed as this cannot be called when active.
927}
928
929status_t AudioTrack::setMarkerPosition(uint32_t marker)
930{
931    // The only purpose of setting marker position is to get a callback
932    if (mCbf == NULL || isOffloadedOrDirect()) {
933        return INVALID_OPERATION;
934    }
935
936    AutoMutex lock(mLock);
937    mMarkerPosition = marker;
938    mMarkerReached = false;
939
940    sp<AudioTrackThread> t = mAudioTrackThread;
941    if (t != 0) {
942        t->wake();
943    }
944    return NO_ERROR;
945}
946
947status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
948{
949    if (isOffloadedOrDirect()) {
950        return INVALID_OPERATION;
951    }
952    if (marker == NULL) {
953        return BAD_VALUE;
954    }
955
956    AutoMutex lock(mLock);
957    mMarkerPosition.getValue(marker);
958
959    return NO_ERROR;
960}
961
962status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
963{
964    // The only purpose of setting position update period is to get a callback
965    if (mCbf == NULL || isOffloadedOrDirect()) {
966        return INVALID_OPERATION;
967    }
968
969    AutoMutex lock(mLock);
970    mNewPosition = updateAndGetPosition_l() + updatePeriod;
971    mUpdatePeriod = updatePeriod;
972
973    sp<AudioTrackThread> t = mAudioTrackThread;
974    if (t != 0) {
975        t->wake();
976    }
977    return NO_ERROR;
978}
979
980status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
981{
982    if (isOffloadedOrDirect()) {
983        return INVALID_OPERATION;
984    }
985    if (updatePeriod == NULL) {
986        return BAD_VALUE;
987    }
988
989    AutoMutex lock(mLock);
990    *updatePeriod = mUpdatePeriod;
991
992    return NO_ERROR;
993}
994
995status_t AudioTrack::setPosition(uint32_t position)
996{
997    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
998        return INVALID_OPERATION;
999    }
1000    if (position > mFrameCount) {
1001        return BAD_VALUE;
1002    }
1003
1004    AutoMutex lock(mLock);
1005    // Currently we require that the player is inactive before setting parameters such as position
1006    // or loop points.  Otherwise, there could be a race condition: the application could read the
1007    // current position, compute a new position or loop parameters, and then set that position or
1008    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1009    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1010    // to specify how it wants to handle such scenarios.
1011    if (mState == STATE_ACTIVE) {
1012        return INVALID_OPERATION;
1013    }
1014    // After setting the position, use full update period before notification.
1015    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1016    mStaticProxy->setBufferPosition(position);
1017
1018    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1019    return NO_ERROR;
1020}
1021
1022status_t AudioTrack::getPosition(uint32_t *position)
1023{
1024    if (position == NULL) {
1025        return BAD_VALUE;
1026    }
1027
1028    AutoMutex lock(mLock);
1029    // FIXME: offloaded and direct tracks call into the HAL for render positions
1030    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1031    // as we do not know the capability of the HAL for pcm position support and standby.
1032    // There may be some latency differences between the HAL position and the proxy position.
1033    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1034        uint32_t dspFrames = 0;
1035
1036        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1037            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1038            *position = mPausedPosition;
1039            return NO_ERROR;
1040        }
1041
1042        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1043            uint32_t halFrames; // actually unused
1044            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1045            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1046        }
1047        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1048        // due to hardware latency. We leave this behavior for now.
1049        *position = dspFrames;
1050    } else {
1051        if (mCblk->mFlags & CBLK_INVALID) {
1052            (void) restoreTrack_l("getPosition");
1053            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1054            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1055        }
1056
1057        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1058        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1059                0 : updateAndGetPosition_l().value();
1060    }
1061    return NO_ERROR;
1062}
1063
1064status_t AudioTrack::getBufferPosition(uint32_t *position)
1065{
1066    if (mSharedBuffer == 0) {
1067        return INVALID_OPERATION;
1068    }
1069    if (position == NULL) {
1070        return BAD_VALUE;
1071    }
1072
1073    AutoMutex lock(mLock);
1074    *position = mStaticProxy->getBufferPosition();
1075    return NO_ERROR;
1076}
1077
1078status_t AudioTrack::reload()
1079{
1080    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1081        return INVALID_OPERATION;
1082    }
1083
1084    AutoMutex lock(mLock);
1085    // See setPosition() regarding setting parameters such as loop points or position while active
1086    if (mState == STATE_ACTIVE) {
1087        return INVALID_OPERATION;
1088    }
1089    mNewPosition = mUpdatePeriod;
1090    (void) updateAndGetPosition_l();
1091    mPosition = 0;
1092    mPreviousTimestampValid = false;
1093#if 0
1094    // The documentation is not clear on the behavior of reload() and the restoration
1095    // of loop count. Historically we have not restored loop count, start, end,
1096    // but it makes sense if one desires to repeat playing a particular sound.
1097    if (mLoopCount != 0) {
1098        mLoopCountNotified = mLoopCount;
1099        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1100    }
1101#endif
1102    mStaticProxy->setBufferPosition(0);
1103    return NO_ERROR;
1104}
1105
1106audio_io_handle_t AudioTrack::getOutput() const
1107{
1108    AutoMutex lock(mLock);
1109    return mOutput;
1110}
1111
1112status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1113    AutoMutex lock(mLock);
1114    if (mSelectedDeviceId != deviceId) {
1115        mSelectedDeviceId = deviceId;
1116        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1117    }
1118    return NO_ERROR;
1119}
1120
1121audio_port_handle_t AudioTrack::getOutputDevice() {
1122    AutoMutex lock(mLock);
1123    return mSelectedDeviceId;
1124}
1125
1126audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1127    AutoMutex lock(mLock);
1128    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1129        return AUDIO_PORT_HANDLE_NONE;
1130    }
1131    return AudioSystem::getDeviceIdForIo(mOutput);
1132}
1133
1134status_t AudioTrack::attachAuxEffect(int effectId)
1135{
1136    AutoMutex lock(mLock);
1137    status_t status = mAudioTrack->attachAuxEffect(effectId);
1138    if (status == NO_ERROR) {
1139        mAuxEffectId = effectId;
1140    }
1141    return status;
1142}
1143
1144audio_stream_type_t AudioTrack::streamType() const
1145{
1146    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1147        return audio_attributes_to_stream_type(&mAttributes);
1148    }
1149    return mStreamType;
1150}
1151
1152// -------------------------------------------------------------------------
1153
1154// must be called with mLock held
1155status_t AudioTrack::createTrack_l()
1156{
1157    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1158    if (audioFlinger == 0) {
1159        ALOGE("Could not get audioflinger");
1160        return NO_INIT;
1161    }
1162
1163    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1164        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1165    }
1166    audio_io_handle_t output;
1167    audio_stream_type_t streamType = mStreamType;
1168    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1169
1170    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1171    // After fast request is denied, we will request again if IAudioTrack is re-created.
1172
1173    status_t status;
1174    status = AudioSystem::getOutputForAttr(attr, &output,
1175                                           mSessionId, &streamType, mClientUid,
1176                                           mSampleRate, mFormat, mChannelMask,
1177                                           mFlags, mSelectedDeviceId, mOffloadInfo);
1178
1179    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1180        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1181              " channel mask %#x, flags %#x",
1182              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1183        return BAD_VALUE;
1184    }
1185    {
1186    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1187    // we must release it ourselves if anything goes wrong.
1188
1189    // Not all of these values are needed under all conditions, but it is easier to get them all
1190    status = AudioSystem::getLatency(output, &mAfLatency);
1191    if (status != NO_ERROR) {
1192        ALOGE("getLatency(%d) failed status %d", output, status);
1193        goto release;
1194    }
1195    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1196
1197    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1198    if (status != NO_ERROR) {
1199        ALOGE("getFrameCount(output=%d) status %d", output, status);
1200        goto release;
1201    }
1202
1203    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1204    if (status != NO_ERROR) {
1205        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1206        goto release;
1207    }
1208    if (mSampleRate == 0) {
1209        mSampleRate = mAfSampleRate;
1210        mOriginalSampleRate = mAfSampleRate;
1211    }
1212
1213    // Client can only express a preference for FAST.  Server will perform additional tests.
1214    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1215        bool useCaseAllowed =
1216            // either of these use cases:
1217            // use case 1: shared buffer
1218            (mSharedBuffer != 0) ||
1219            // use case 2: callback transfer mode
1220            (mTransfer == TRANSFER_CALLBACK) ||
1221            // use case 3: obtain/release mode
1222            (mTransfer == TRANSFER_OBTAIN) ||
1223            // use case 4: synchronous write
1224            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1225        // sample rates must also match
1226        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1227        if (!fastAllowed) {
1228            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, "
1229                "track %u Hz, output %u Hz",
1230                mTransfer, mSampleRate, mAfSampleRate);
1231            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1232        }
1233    }
1234
1235    mNotificationFramesAct = mNotificationFramesReq;
1236
1237    size_t frameCount = mReqFrameCount;
1238    if (!audio_has_proportional_frames(mFormat)) {
1239
1240        if (mSharedBuffer != 0) {
1241            // Same comment as below about ignoring frameCount parameter for set()
1242            frameCount = mSharedBuffer->size();
1243        } else if (frameCount == 0) {
1244            frameCount = mAfFrameCount;
1245        }
1246        if (mNotificationFramesAct != frameCount) {
1247            mNotificationFramesAct = frameCount;
1248        }
1249    } else if (mSharedBuffer != 0) {
1250        // FIXME: Ensure client side memory buffers need
1251        // not have additional alignment beyond sample
1252        // (e.g. 16 bit stereo accessed as 32 bit frame).
1253        size_t alignment = audio_bytes_per_sample(mFormat);
1254        if (alignment & 1) {
1255            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1256            alignment = 1;
1257        }
1258        if (mChannelCount > 1) {
1259            // More than 2 channels does not require stronger alignment than stereo
1260            alignment <<= 1;
1261        }
1262        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1263            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1264                    mSharedBuffer->pointer(), mChannelCount);
1265            status = BAD_VALUE;
1266            goto release;
1267        }
1268
1269        // When initializing a shared buffer AudioTrack via constructors,
1270        // there's no frameCount parameter.
1271        // But when initializing a shared buffer AudioTrack via set(),
1272        // there _is_ a frameCount parameter.  We silently ignore it.
1273        frameCount = mSharedBuffer->size() / mFrameSize;
1274    } else {
1275        // For fast tracks the frame count calculations and checks are done by server
1276
1277        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1278            // for normal tracks precompute the frame count based on speed.
1279            const size_t minFrameCount = calculateMinFrameCount(
1280                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1281                    mPlaybackRate.mSpeed);
1282            if (frameCount < minFrameCount) {
1283                frameCount = minFrameCount;
1284            }
1285        }
1286    }
1287
1288    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1289
1290    pid_t tid = -1;
1291    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1292        trackFlags |= IAudioFlinger::TRACK_FAST;
1293        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1294            tid = mAudioTrackThread->getTid();
1295        }
1296    }
1297
1298    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1299        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1300    }
1301
1302    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1303        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1304    }
1305
1306    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1307                                // but we will still need the original value also
1308    audio_session_t originalSessionId = mSessionId;
1309    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1310                                                      mSampleRate,
1311                                                      mFormat,
1312                                                      mChannelMask,
1313                                                      &temp,
1314                                                      &trackFlags,
1315                                                      mSharedBuffer,
1316                                                      output,
1317                                                      tid,
1318                                                      &mSessionId,
1319                                                      mClientUid,
1320                                                      &status);
1321    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1322            "session ID changed from %d to %d", originalSessionId, mSessionId);
1323
1324    if (status != NO_ERROR) {
1325        ALOGE("AudioFlinger could not create track, status: %d", status);
1326        goto release;
1327    }
1328    ALOG_ASSERT(track != 0);
1329
1330    // AudioFlinger now owns the reference to the I/O handle,
1331    // so we are no longer responsible for releasing it.
1332
1333    // FIXME compare to AudioRecord
1334    sp<IMemory> iMem = track->getCblk();
1335    if (iMem == 0) {
1336        ALOGE("Could not get control block");
1337        return NO_INIT;
1338    }
1339    void *iMemPointer = iMem->pointer();
1340    if (iMemPointer == NULL) {
1341        ALOGE("Could not get control block pointer");
1342        return NO_INIT;
1343    }
1344    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1345    if (mAudioTrack != 0) {
1346        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1347        mDeathNotifier.clear();
1348    }
1349    mAudioTrack = track;
1350    mCblkMemory = iMem;
1351    IPCThreadState::self()->flushCommands();
1352
1353    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1354    mCblk = cblk;
1355    // note that temp is the (possibly revised) value of frameCount
1356    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1357        // In current design, AudioTrack client checks and ensures frame count validity before
1358        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1359        // for fast track as it uses a special method of assigning frame count.
1360        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1361    }
1362    frameCount = temp;
1363
1364    mAwaitBoost = false;
1365    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1366        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1367            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1368            if (!mThreadCanCallJava) {
1369                mAwaitBoost = true;
1370            }
1371        } else {
1372            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1373            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1374        }
1375    }
1376
1377    // Make sure that application is notified with sufficient margin before underrun.
1378    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1379    //  n = 1   fast track with single buffering; nBuffering is ignored
1380    //  n = 2   fast track with double buffering
1381    //  n = 2   normal track, (including those with sample rate conversion)
1382    //  n >= 3  very high latency or very small notification interval (unused).
1383    // FIXME Move the computation from client side to server side,
1384    //       and allow nBuffering to be larger than 1 for OpenSL ES, like it can be for Java.
1385    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1386        size_t maxNotificationFrames = frameCount;
1387        if (!(trackFlags & IAudioFlinger::TRACK_FAST)) {
1388            const uint32_t nBuffering = 2;
1389            maxNotificationFrames /= nBuffering;
1390        }
1391        if (mNotificationFramesAct == 0 || mNotificationFramesAct > maxNotificationFrames) {
1392            ALOGW("Client adjusted notificationFrames from %u to %zu for frameCount %zu",
1393                    mNotificationFramesAct, maxNotificationFrames, frameCount);
1394            mNotificationFramesAct = (uint32_t) maxNotificationFrames;
1395        }
1396    }
1397
1398    // We retain a copy of the I/O handle, but don't own the reference
1399    mOutput = output;
1400    mRefreshRemaining = true;
1401
1402    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1403    // is the value of pointer() for the shared buffer, otherwise buffers points
1404    // immediately after the control block.  This address is for the mapping within client
1405    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1406    void* buffers;
1407    if (mSharedBuffer == 0) {
1408        buffers = cblk + 1;
1409    } else {
1410        buffers = mSharedBuffer->pointer();
1411        if (buffers == NULL) {
1412            ALOGE("Could not get buffer pointer");
1413            return NO_INIT;
1414        }
1415    }
1416
1417    mAudioTrack->attachAuxEffect(mAuxEffectId);
1418    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1419    // FIXME don't believe this lie
1420    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1421
1422    mFrameCount = frameCount;
1423    // If IAudioTrack is re-created, don't let the requested frameCount
1424    // decrease.  This can confuse clients that cache frameCount().
1425    if (frameCount > mReqFrameCount) {
1426        mReqFrameCount = frameCount;
1427    }
1428
1429    // reset server position to 0 as we have new cblk.
1430    mServer = 0;
1431
1432    // update proxy
1433    if (mSharedBuffer == 0) {
1434        mStaticProxy.clear();
1435        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1436    } else {
1437        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1438        mProxy = mStaticProxy;
1439    }
1440
1441    mProxy->setVolumeLR(gain_minifloat_pack(
1442            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1443            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1444
1445    mProxy->setSendLevel(mSendLevel);
1446    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1447    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1448    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1449    mProxy->setSampleRate(effectiveSampleRate);
1450
1451    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1452    playbackRateTemp.mSpeed = effectiveSpeed;
1453    playbackRateTemp.mPitch = effectivePitch;
1454    mProxy->setPlaybackRate(playbackRateTemp);
1455    mProxy->setMinimum(mNotificationFramesAct);
1456
1457    mDeathNotifier = new DeathNotifier(this);
1458    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1459
1460    if (mDeviceCallback != 0) {
1461        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1462    }
1463
1464    return NO_ERROR;
1465    }
1466
1467release:
1468    AudioSystem::releaseOutput(output, streamType, mSessionId);
1469    if (status == NO_ERROR) {
1470        status = NO_INIT;
1471    }
1472    return status;
1473}
1474
1475status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1476{
1477    if (audioBuffer == NULL) {
1478        if (nonContig != NULL) {
1479            *nonContig = 0;
1480        }
1481        return BAD_VALUE;
1482    }
1483    if (mTransfer != TRANSFER_OBTAIN) {
1484        audioBuffer->frameCount = 0;
1485        audioBuffer->size = 0;
1486        audioBuffer->raw = NULL;
1487        if (nonContig != NULL) {
1488            *nonContig = 0;
1489        }
1490        return INVALID_OPERATION;
1491    }
1492
1493    const struct timespec *requested;
1494    struct timespec timeout;
1495    if (waitCount == -1) {
1496        requested = &ClientProxy::kForever;
1497    } else if (waitCount == 0) {
1498        requested = &ClientProxy::kNonBlocking;
1499    } else if (waitCount > 0) {
1500        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1501        timeout.tv_sec = ms / 1000;
1502        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1503        requested = &timeout;
1504    } else {
1505        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1506        requested = NULL;
1507    }
1508    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1509}
1510
1511status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1512        struct timespec *elapsed, size_t *nonContig)
1513{
1514    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1515    uint32_t oldSequence = 0;
1516    uint32_t newSequence;
1517
1518    Proxy::Buffer buffer;
1519    status_t status = NO_ERROR;
1520
1521    static const int32_t kMaxTries = 5;
1522    int32_t tryCounter = kMaxTries;
1523
1524    do {
1525        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1526        // keep them from going away if another thread re-creates the track during obtainBuffer()
1527        sp<AudioTrackClientProxy> proxy;
1528        sp<IMemory> iMem;
1529
1530        {   // start of lock scope
1531            AutoMutex lock(mLock);
1532
1533            newSequence = mSequence;
1534            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1535            if (status == DEAD_OBJECT) {
1536                // re-create track, unless someone else has already done so
1537                if (newSequence == oldSequence) {
1538                    status = restoreTrack_l("obtainBuffer");
1539                    if (status != NO_ERROR) {
1540                        buffer.mFrameCount = 0;
1541                        buffer.mRaw = NULL;
1542                        buffer.mNonContig = 0;
1543                        break;
1544                    }
1545                }
1546            }
1547            oldSequence = newSequence;
1548
1549            if (status == NOT_ENOUGH_DATA) {
1550                restartIfDisabled();
1551            }
1552
1553            // Keep the extra references
1554            proxy = mProxy;
1555            iMem = mCblkMemory;
1556
1557            if (mState == STATE_STOPPING) {
1558                status = -EINTR;
1559                buffer.mFrameCount = 0;
1560                buffer.mRaw = NULL;
1561                buffer.mNonContig = 0;
1562                break;
1563            }
1564
1565            // Non-blocking if track is stopped or paused
1566            if (mState != STATE_ACTIVE) {
1567                requested = &ClientProxy::kNonBlocking;
1568            }
1569
1570        }   // end of lock scope
1571
1572        buffer.mFrameCount = audioBuffer->frameCount;
1573        // FIXME starts the requested timeout and elapsed over from scratch
1574        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1575    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1576
1577    audioBuffer->frameCount = buffer.mFrameCount;
1578    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1579    audioBuffer->raw = buffer.mRaw;
1580    if (nonContig != NULL) {
1581        *nonContig = buffer.mNonContig;
1582    }
1583    return status;
1584}
1585
1586void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1587{
1588    // FIXME add error checking on mode, by adding an internal version
1589    if (mTransfer == TRANSFER_SHARED) {
1590        return;
1591    }
1592
1593    size_t stepCount = audioBuffer->size / mFrameSize;
1594    if (stepCount == 0) {
1595        return;
1596    }
1597
1598    Proxy::Buffer buffer;
1599    buffer.mFrameCount = stepCount;
1600    buffer.mRaw = audioBuffer->raw;
1601
1602    AutoMutex lock(mLock);
1603    mReleased += stepCount;
1604    mInUnderrun = false;
1605    mProxy->releaseBuffer(&buffer);
1606
1607    // restart track if it was disabled by audioflinger due to previous underrun
1608    restartIfDisabled();
1609}
1610
1611void AudioTrack::restartIfDisabled()
1612{
1613    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1614    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1615        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1616        // FIXME ignoring status
1617        mAudioTrack->start();
1618    }
1619}
1620
1621// -------------------------------------------------------------------------
1622
1623ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1624{
1625    if (mTransfer != TRANSFER_SYNC) {
1626        return INVALID_OPERATION;
1627    }
1628
1629    if (isDirect()) {
1630        AutoMutex lock(mLock);
1631        int32_t flags = android_atomic_and(
1632                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1633                            &mCblk->mFlags);
1634        if (flags & CBLK_INVALID) {
1635            return DEAD_OBJECT;
1636        }
1637    }
1638
1639    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1640        // Sanity-check: user is most-likely passing an error code, and it would
1641        // make the return value ambiguous (actualSize vs error).
1642        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1643        return BAD_VALUE;
1644    }
1645
1646    size_t written = 0;
1647    Buffer audioBuffer;
1648
1649    while (userSize >= mFrameSize) {
1650        audioBuffer.frameCount = userSize / mFrameSize;
1651
1652        status_t err = obtainBuffer(&audioBuffer,
1653                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1654        if (err < 0) {
1655            if (written > 0) {
1656                break;
1657            }
1658            return ssize_t(err);
1659        }
1660
1661        size_t toWrite = audioBuffer.size;
1662        memcpy(audioBuffer.i8, buffer, toWrite);
1663        buffer = ((const char *) buffer) + toWrite;
1664        userSize -= toWrite;
1665        written += toWrite;
1666
1667        releaseBuffer(&audioBuffer);
1668    }
1669
1670    if (written > 0) {
1671        mFramesWritten += written / mFrameSize;
1672    }
1673    return written;
1674}
1675
1676// -------------------------------------------------------------------------
1677
1678nsecs_t AudioTrack::processAudioBuffer()
1679{
1680    // Currently the AudioTrack thread is not created if there are no callbacks.
1681    // Would it ever make sense to run the thread, even without callbacks?
1682    // If so, then replace this by checks at each use for mCbf != NULL.
1683    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1684
1685    mLock.lock();
1686    if (mAwaitBoost) {
1687        mAwaitBoost = false;
1688        mLock.unlock();
1689        static const int32_t kMaxTries = 5;
1690        int32_t tryCounter = kMaxTries;
1691        uint32_t pollUs = 10000;
1692        do {
1693            int policy = sched_getscheduler(0);
1694            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1695                break;
1696            }
1697            usleep(pollUs);
1698            pollUs <<= 1;
1699        } while (tryCounter-- > 0);
1700        if (tryCounter < 0) {
1701            ALOGE("did not receive expected priority boost on time");
1702        }
1703        // Run again immediately
1704        return 0;
1705    }
1706
1707    // Can only reference mCblk while locked
1708    int32_t flags = android_atomic_and(
1709        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1710
1711    // Check for track invalidation
1712    if (flags & CBLK_INVALID) {
1713        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1714        // AudioSystem cache. We should not exit here but after calling the callback so
1715        // that the upper layers can recreate the track
1716        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1717            status_t status __unused = restoreTrack_l("processAudioBuffer");
1718            // FIXME unused status
1719            // after restoration, continue below to make sure that the loop and buffer events
1720            // are notified because they have been cleared from mCblk->mFlags above.
1721        }
1722    }
1723
1724    bool waitStreamEnd = mState == STATE_STOPPING;
1725    bool active = mState == STATE_ACTIVE;
1726
1727    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1728    bool newUnderrun = false;
1729    if (flags & CBLK_UNDERRUN) {
1730#if 0
1731        // Currently in shared buffer mode, when the server reaches the end of buffer,
1732        // the track stays active in continuous underrun state.  It's up to the application
1733        // to pause or stop the track, or set the position to a new offset within buffer.
1734        // This was some experimental code to auto-pause on underrun.   Keeping it here
1735        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1736        if (mTransfer == TRANSFER_SHARED) {
1737            mState = STATE_PAUSED;
1738            active = false;
1739        }
1740#endif
1741        if (!mInUnderrun) {
1742            mInUnderrun = true;
1743            newUnderrun = true;
1744        }
1745    }
1746
1747    // Get current position of server
1748    Modulo<uint32_t> position(updateAndGetPosition_l());
1749
1750    // Manage marker callback
1751    bool markerReached = false;
1752    Modulo<uint32_t> markerPosition(mMarkerPosition);
1753    // uses 32 bit wraparound for comparison with position.
1754    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1755        mMarkerReached = markerReached = true;
1756    }
1757
1758    // Determine number of new position callback(s) that will be needed, while locked
1759    size_t newPosCount = 0;
1760    Modulo<uint32_t> newPosition(mNewPosition);
1761    uint32_t updatePeriod = mUpdatePeriod;
1762    // FIXME fails for wraparound, need 64 bits
1763    if (updatePeriod > 0 && position >= newPosition) {
1764        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1765        mNewPosition += updatePeriod * newPosCount;
1766    }
1767
1768    // Cache other fields that will be needed soon
1769    uint32_t sampleRate = mSampleRate;
1770    float speed = mPlaybackRate.mSpeed;
1771    const uint32_t notificationFrames = mNotificationFramesAct;
1772    if (mRefreshRemaining) {
1773        mRefreshRemaining = false;
1774        mRemainingFrames = notificationFrames;
1775        mRetryOnPartialBuffer = false;
1776    }
1777    size_t misalignment = mProxy->getMisalignment();
1778    uint32_t sequence = mSequence;
1779    sp<AudioTrackClientProxy> proxy = mProxy;
1780
1781    // Determine the number of new loop callback(s) that will be needed, while locked.
1782    int loopCountNotifications = 0;
1783    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1784
1785    if (mLoopCount > 0) {
1786        int loopCount;
1787        size_t bufferPosition;
1788        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1789        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1790        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1791        mLoopCountNotified = loopCount; // discard any excess notifications
1792    } else if (mLoopCount < 0) {
1793        // FIXME: We're not accurate with notification count and position with infinite looping
1794        // since loopCount from server side will always return -1 (we could decrement it).
1795        size_t bufferPosition = mStaticProxy->getBufferPosition();
1796        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1797        loopPeriod = mLoopEnd - bufferPosition;
1798    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1799        size_t bufferPosition = mStaticProxy->getBufferPosition();
1800        loopPeriod = mFrameCount - bufferPosition;
1801    }
1802
1803    // These fields don't need to be cached, because they are assigned only by set():
1804    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1805    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1806
1807    mLock.unlock();
1808
1809    // get anchor time to account for callbacks.
1810    const nsecs_t timeBeforeCallbacks = systemTime();
1811
1812    if (waitStreamEnd) {
1813        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1814        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1815        // (and make sure we don't callback for more data while we're stopping).
1816        // This helps with position, marker notifications, and track invalidation.
1817        struct timespec timeout;
1818        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1819        timeout.tv_nsec = 0;
1820
1821        status_t status = proxy->waitStreamEndDone(&timeout);
1822        switch (status) {
1823        case NO_ERROR:
1824        case DEAD_OBJECT:
1825        case TIMED_OUT:
1826            if (status != DEAD_OBJECT) {
1827                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1828                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1829                mCbf(EVENT_STREAM_END, mUserData, NULL);
1830            }
1831            {
1832                AutoMutex lock(mLock);
1833                // The previously assigned value of waitStreamEnd is no longer valid,
1834                // since the mutex has been unlocked and either the callback handler
1835                // or another thread could have re-started the AudioTrack during that time.
1836                waitStreamEnd = mState == STATE_STOPPING;
1837                if (waitStreamEnd) {
1838                    mState = STATE_STOPPED;
1839                    mReleased = 0;
1840                }
1841            }
1842            if (waitStreamEnd && status != DEAD_OBJECT) {
1843               return NS_INACTIVE;
1844            }
1845            break;
1846        }
1847        return 0;
1848    }
1849
1850    // perform callbacks while unlocked
1851    if (newUnderrun) {
1852        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1853    }
1854    while (loopCountNotifications > 0) {
1855        mCbf(EVENT_LOOP_END, mUserData, NULL);
1856        --loopCountNotifications;
1857    }
1858    if (flags & CBLK_BUFFER_END) {
1859        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1860    }
1861    if (markerReached) {
1862        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1863    }
1864    while (newPosCount > 0) {
1865        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1866        mCbf(EVENT_NEW_POS, mUserData, &temp);
1867        newPosition += updatePeriod;
1868        newPosCount--;
1869    }
1870
1871    if (mObservedSequence != sequence) {
1872        mObservedSequence = sequence;
1873        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1874        // for offloaded tracks, just wait for the upper layers to recreate the track
1875        if (isOffloadedOrDirect()) {
1876            return NS_INACTIVE;
1877        }
1878    }
1879
1880    // if inactive, then don't run me again until re-started
1881    if (!active) {
1882        return NS_INACTIVE;
1883    }
1884
1885    // Compute the estimated time until the next timed event (position, markers, loops)
1886    // FIXME only for non-compressed audio
1887    uint32_t minFrames = ~0;
1888    if (!markerReached && position < markerPosition) {
1889        minFrames = (markerPosition - position).value();
1890    }
1891    if (loopPeriod > 0 && loopPeriod < minFrames) {
1892        // loopPeriod is already adjusted for actual position.
1893        minFrames = loopPeriod;
1894    }
1895    if (updatePeriod > 0) {
1896        minFrames = min(minFrames, (newPosition - position).value());
1897    }
1898
1899    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1900    static const uint32_t kPoll = 0;
1901    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1902        minFrames = kPoll * notificationFrames;
1903    }
1904
1905    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1906    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
1907    const nsecs_t timeAfterCallbacks = systemTime();
1908
1909    // Convert frame units to time units
1910    nsecs_t ns = NS_WHENEVER;
1911    if (minFrames != (uint32_t) ~0) {
1912        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
1913        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
1914        // TODO: Should we warn if the callback time is too long?
1915        if (ns < 0) ns = 0;
1916    }
1917
1918    // If not supplying data by EVENT_MORE_DATA, then we're done
1919    if (mTransfer != TRANSFER_CALLBACK) {
1920        return ns;
1921    }
1922
1923    // EVENT_MORE_DATA callback handling.
1924    // Timing for linear pcm audio data formats can be derived directly from the
1925    // buffer fill level.
1926    // Timing for compressed data is not directly available from the buffer fill level,
1927    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
1928    // to return a certain fill level.
1929
1930    struct timespec timeout;
1931    const struct timespec *requested = &ClientProxy::kForever;
1932    if (ns != NS_WHENEVER) {
1933        timeout.tv_sec = ns / 1000000000LL;
1934        timeout.tv_nsec = ns % 1000000000LL;
1935        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1936        requested = &timeout;
1937    }
1938
1939    size_t writtenFrames = 0;
1940    while (mRemainingFrames > 0) {
1941
1942        Buffer audioBuffer;
1943        audioBuffer.frameCount = mRemainingFrames;
1944        size_t nonContig;
1945        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1946        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1947                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1948        requested = &ClientProxy::kNonBlocking;
1949        size_t avail = audioBuffer.frameCount + nonContig;
1950        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1951                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1952        if (err != NO_ERROR) {
1953            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1954                    (isOffloaded() && (err == DEAD_OBJECT))) {
1955                // FIXME bug 25195759
1956                return 1000000;
1957            }
1958            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1959            return NS_NEVER;
1960        }
1961
1962        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
1963            mRetryOnPartialBuffer = false;
1964            if (avail < mRemainingFrames) {
1965                if (ns > 0) { // account for obtain time
1966                    const nsecs_t timeNow = systemTime();
1967                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
1968                }
1969                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
1970                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
1971                    ns = myns;
1972                }
1973                return ns;
1974            }
1975        }
1976
1977        size_t reqSize = audioBuffer.size;
1978        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1979        size_t writtenSize = audioBuffer.size;
1980
1981        // Sanity check on returned size
1982        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1983            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1984                    reqSize, ssize_t(writtenSize));
1985            return NS_NEVER;
1986        }
1987
1988        if (writtenSize == 0) {
1989            // The callback is done filling buffers
1990            // Keep this thread going to handle timed events and
1991            // still try to get more data in intervals of WAIT_PERIOD_MS
1992            // but don't just loop and block the CPU, so wait
1993
1994            // mCbf(EVENT_MORE_DATA, ...) might either
1995            // (1) Block until it can fill the buffer, returning 0 size on EOS.
1996            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
1997            // (3) Return 0 size when no data is available, does not wait for more data.
1998            //
1999            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2000            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2001            // especially for case (3).
2002            //
2003            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2004            // and this loop; whereas for case (3) we could simply check once with the full
2005            // buffer size and skip the loop entirely.
2006
2007            nsecs_t myns;
2008            if (audio_has_proportional_frames(mFormat)) {
2009                // time to wait based on buffer occupancy
2010                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2011                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2012                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2013                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2014                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2015                myns = datans + (afns / 2);
2016            } else {
2017                // FIXME: This could ping quite a bit if the buffer isn't full.
2018                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2019                myns = kWaitPeriodNs;
2020            }
2021            if (ns > 0) { // account for obtain and callback time
2022                const nsecs_t timeNow = systemTime();
2023                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2024            }
2025            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2026                ns = myns;
2027            }
2028            return ns;
2029        }
2030
2031        size_t releasedFrames = writtenSize / mFrameSize;
2032        audioBuffer.frameCount = releasedFrames;
2033        mRemainingFrames -= releasedFrames;
2034        if (misalignment >= releasedFrames) {
2035            misalignment -= releasedFrames;
2036        } else {
2037            misalignment = 0;
2038        }
2039
2040        releaseBuffer(&audioBuffer);
2041        writtenFrames += releasedFrames;
2042
2043        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2044        // if callback doesn't like to accept the full chunk
2045        if (writtenSize < reqSize) {
2046            continue;
2047        }
2048
2049        // There could be enough non-contiguous frames available to satisfy the remaining request
2050        if (mRemainingFrames <= nonContig) {
2051            continue;
2052        }
2053
2054#if 0
2055        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2056        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2057        // that total to a sum == notificationFrames.
2058        if (0 < misalignment && misalignment <= mRemainingFrames) {
2059            mRemainingFrames = misalignment;
2060            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2061        }
2062#endif
2063
2064    }
2065    if (writtenFrames > 0) {
2066        AutoMutex lock(mLock);
2067        mFramesWritten += writtenFrames;
2068    }
2069    mRemainingFrames = notificationFrames;
2070    mRetryOnPartialBuffer = true;
2071
2072    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2073    return 0;
2074}
2075
2076status_t AudioTrack::restoreTrack_l(const char *from)
2077{
2078    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2079          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2080    ++mSequence;
2081
2082    // refresh the audio configuration cache in this process to make sure we get new
2083    // output parameters and new IAudioFlinger in createTrack_l()
2084    AudioSystem::clearAudioConfigCache();
2085
2086    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2087        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2088        // reconsider enabling for linear PCM encodings when position can be preserved.
2089        return DEAD_OBJECT;
2090    }
2091
2092    // Save so we can return count since creation.
2093    mUnderrunCountOffset = getUnderrunCount_l();
2094
2095    // save the old static buffer position
2096    size_t bufferPosition = 0;
2097    int loopCount = 0;
2098    if (mStaticProxy != 0) {
2099        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2100    }
2101
2102    mFlags = mOrigFlags;
2103
2104    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2105    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2106    // It will also delete the strong references on previous IAudioTrack and IMemory.
2107    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2108    status_t result = createTrack_l();
2109
2110    if (result == NO_ERROR) {
2111        // take the frames that will be lost by track recreation into account in saved position
2112        // For streaming tracks, this is the amount we obtained from the user/client
2113        // (not the number actually consumed at the server - those are already lost).
2114        if (mStaticProxy == 0) {
2115            mPosition = mReleased;
2116        }
2117        // Continue playback from last known position and restore loop.
2118        if (mStaticProxy != 0) {
2119            if (loopCount != 0) {
2120                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2121                        mLoopStart, mLoopEnd, loopCount);
2122            } else {
2123                mStaticProxy->setBufferPosition(bufferPosition);
2124                if (bufferPosition == mFrameCount) {
2125                    ALOGD("restoring track at end of static buffer");
2126                }
2127            }
2128        }
2129        if (mState == STATE_ACTIVE) {
2130            result = mAudioTrack->start();
2131            mFramesWrittenServerOffset = mFramesWritten; // server resets to zero so we offset
2132        }
2133    }
2134    if (result != NO_ERROR) {
2135        ALOGW("restoreTrack_l() failed status %d", result);
2136        mState = STATE_STOPPED;
2137        mReleased = 0;
2138    }
2139
2140    return result;
2141}
2142
2143Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2144{
2145    // This is the sole place to read server consumed frames
2146    Modulo<uint32_t> newServer(mProxy->getPosition());
2147    const int32_t delta = (newServer - mServer).signedValue();
2148    // TODO There is controversy about whether there can be "negative jitter" in server position.
2149    //      This should be investigated further, and if possible, it should be addressed.
2150    //      A more definite failure mode is infrequent polling by client.
2151    //      One could call (void)getPosition_l() in releaseBuffer(),
2152    //      so mReleased and mPosition are always lock-step as best possible.
2153    //      That should ensure delta never goes negative for infrequent polling
2154    //      unless the server has more than 2^31 frames in its buffer,
2155    //      in which case the use of uint32_t for these counters has bigger issues.
2156    ALOGE_IF(delta < 0,
2157            "detected illegal retrograde motion by the server: mServer advanced by %d",
2158            delta);
2159    mServer = newServer;
2160    if (delta > 0) { // avoid retrograde
2161        mPosition += delta;
2162    }
2163    return mPosition;
2164}
2165
2166bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2167{
2168    // applicable for mixing tracks only (not offloaded or direct)
2169    if (mStaticProxy != 0) {
2170        return true; // static tracks do not have issues with buffer sizing.
2171    }
2172    const size_t minFrameCount =
2173            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed);
2174    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
2175            mFrameCount, minFrameCount);
2176    return mFrameCount >= minFrameCount;
2177}
2178
2179status_t AudioTrack::setParameters(const String8& keyValuePairs)
2180{
2181    AutoMutex lock(mLock);
2182    return mAudioTrack->setParameters(keyValuePairs);
2183}
2184
2185status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2186{
2187    if (timestamp == nullptr) {
2188        return BAD_VALUE;
2189    }
2190    AutoMutex lock(mLock);
2191    if (mCblk->mFlags & CBLK_INVALID) {
2192        const status_t status = restoreTrack_l("getTimestampExtended");
2193        if (status != OK) {
2194            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2195            // recommending that the track be recreated.
2196            return DEAD_OBJECT;
2197        }
2198    }
2199    // check for offloaded/direct here in case restoring somehow changed those flags.
2200    if (isOffloadedOrDirect_l()) {
2201        return INVALID_OPERATION; // not supported
2202    }
2203    status_t status = mProxy->getTimestamp(timestamp);
2204    bool found = false;
2205    if (status == OK) {
2206        timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2207        timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2208        // server side frame offset in case AudioTrack has been restored.
2209        for (int i = ExtendedTimestamp::LOCATION_SERVER;
2210                i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2211            if (timestamp->mTimeNs[i] >= 0) {
2212                // apply server offset and the "flush frame correction here"
2213                timestamp->mPosition[i] += mFramesWrittenServerOffset + timestamp->mFlushed;
2214                found = true;
2215            }
2216        }
2217    }
2218    return found ? OK : WOULD_BLOCK;
2219}
2220
2221status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2222{
2223    AutoMutex lock(mLock);
2224
2225    bool previousTimestampValid = mPreviousTimestampValid;
2226    // Set false here to cover all the error return cases.
2227    mPreviousTimestampValid = false;
2228
2229    switch (mState) {
2230    case STATE_ACTIVE:
2231    case STATE_PAUSED:
2232        break; // handle below
2233    case STATE_FLUSHED:
2234    case STATE_STOPPED:
2235        return WOULD_BLOCK;
2236    case STATE_STOPPING:
2237    case STATE_PAUSED_STOPPING:
2238        if (!isOffloaded_l()) {
2239            return INVALID_OPERATION;
2240        }
2241        break; // offloaded tracks handled below
2242    default:
2243        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2244        break;
2245    }
2246
2247    if (mCblk->mFlags & CBLK_INVALID) {
2248        const status_t status = restoreTrack_l("getTimestamp");
2249        if (status != OK) {
2250            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2251            // recommending that the track be recreated.
2252            return DEAD_OBJECT;
2253        }
2254    }
2255
2256    // The presented frame count must always lag behind the consumed frame count.
2257    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2258
2259    status_t status;
2260    if (isOffloadedOrDirect_l()) {
2261        // use Binder to get timestamp
2262        status = mAudioTrack->getTimestamp(timestamp);
2263    } else {
2264        // read timestamp from shared memory
2265        ExtendedTimestamp ets;
2266        status = mProxy->getTimestamp(&ets);
2267        if (status == OK) {
2268            status = ets.getBestTimestamp(&timestamp);
2269        }
2270        if (status == INVALID_OPERATION) {
2271            status = WOULD_BLOCK;
2272        }
2273    }
2274    if (status != NO_ERROR) {
2275        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2276        return status;
2277    }
2278    if (isOffloadedOrDirect_l()) {
2279        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2280            // use cached paused position in case another offloaded track is running.
2281            timestamp.mPosition = mPausedPosition;
2282            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2283            return NO_ERROR;
2284        }
2285
2286        // Check whether a pending flush or stop has completed, as those commands may
2287        // be asynchronous or return near finish or exhibit glitchy behavior.
2288        //
2289        // Originally this showed up as the first timestamp being a continuation of
2290        // the previous song under gapless playback.
2291        // However, we sometimes see zero timestamps, then a glitch of
2292        // the previous song's position, and then correct timestamps afterwards.
2293        if (mStartUs != 0 && mSampleRate != 0) {
2294            static const int kTimeJitterUs = 100000; // 100 ms
2295            static const int k1SecUs = 1000000;
2296
2297            const int64_t timeNow = getNowUs();
2298
2299            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2300                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2301                if (timestampTimeUs < mStartUs) {
2302                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2303                }
2304                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2305                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2306                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2307
2308                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2309                    // Verify that the counter can't count faster than the sample rate
2310                    // since the start time.  If greater, then that means we may have failed
2311                    // to completely flush or stop the previous playing track.
2312                    ALOGW_IF(!mTimestampStartupGlitchReported,
2313                            "getTimestamp startup glitch detected"
2314                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2315                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2316                            timestamp.mPosition);
2317                    mTimestampStartupGlitchReported = true;
2318                    if (previousTimestampValid
2319                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2320                        timestamp = mPreviousTimestamp;
2321                        mPreviousTimestampValid = true;
2322                        return NO_ERROR;
2323                    }
2324                    return WOULD_BLOCK;
2325                }
2326                if (deltaPositionByUs != 0) {
2327                    mStartUs = 0; // don't check again, we got valid nonzero position.
2328                }
2329            } else {
2330                mStartUs = 0; // don't check again, start time expired.
2331            }
2332            mTimestampStartupGlitchReported = false;
2333        }
2334    } else {
2335        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2336        (void) updateAndGetPosition_l();
2337        // Server consumed (mServer) and presented both use the same server time base,
2338        // and server consumed is always >= presented.
2339        // The delta between these represents the number of frames in the buffer pipeline.
2340        // If this delta between these is greater than the client position, it means that
2341        // actually presented is still stuck at the starting line (figuratively speaking),
2342        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2343        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2344        // mPosition exceeds 32 bits.
2345        // TODO Remove when timestamp is updated to contain pipeline status info.
2346        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2347        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2348                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2349            return INVALID_OPERATION;
2350        }
2351        // Convert timestamp position from server time base to client time base.
2352        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2353        // But if we change it to 64-bit then this could fail.
2354        // Use Modulo computation here.
2355        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2356        // Immediately after a call to getPosition_l(), mPosition and
2357        // mServer both represent the same frame position.  mPosition is
2358        // in client's point of view, and mServer is in server's point of
2359        // view.  So the difference between them is the "fudge factor"
2360        // between client and server views due to stop() and/or new
2361        // IAudioTrack.  And timestamp.mPosition is initially in server's
2362        // point of view, so we need to apply the same fudge factor to it.
2363    }
2364
2365    // Prevent retrograde motion in timestamp.
2366    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2367    if (status == NO_ERROR) {
2368        if (previousTimestampValid) {
2369#define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
2370            const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
2371            const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
2372#undef TIME_TO_NANOS
2373            if (currentTimeNanos < previousTimeNanos) {
2374                ALOGW("retrograde timestamp time");
2375                // FIXME Consider blocking this from propagating upwards.
2376            }
2377
2378            // Looking at signed delta will work even when the timestamps
2379            // are wrapping around.
2380            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2381                    - mPreviousTimestamp.mPosition).signedValue();
2382            // position can bobble slightly as an artifact; this hides the bobble
2383            static const int32_t MINIMUM_POSITION_DELTA = 8;
2384            if (deltaPosition < 0) {
2385                // Only report once per position instead of spamming the log.
2386                if (!mRetrogradeMotionReported) {
2387                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2388                            deltaPosition,
2389                            timestamp.mPosition,
2390                            mPreviousTimestamp.mPosition);
2391                    mRetrogradeMotionReported = true;
2392                }
2393            } else {
2394                mRetrogradeMotionReported = false;
2395            }
2396            if (deltaPosition < MINIMUM_POSITION_DELTA) {
2397                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
2398            }
2399        }
2400        mPreviousTimestamp = timestamp;
2401        mPreviousTimestampValid = true;
2402    }
2403
2404    return status;
2405}
2406
2407String8 AudioTrack::getParameters(const String8& keys)
2408{
2409    audio_io_handle_t output = getOutput();
2410    if (output != AUDIO_IO_HANDLE_NONE) {
2411        return AudioSystem::getParameters(output, keys);
2412    } else {
2413        return String8::empty();
2414    }
2415}
2416
2417bool AudioTrack::isOffloaded() const
2418{
2419    AutoMutex lock(mLock);
2420    return isOffloaded_l();
2421}
2422
2423bool AudioTrack::isDirect() const
2424{
2425    AutoMutex lock(mLock);
2426    return isDirect_l();
2427}
2428
2429bool AudioTrack::isOffloadedOrDirect() const
2430{
2431    AutoMutex lock(mLock);
2432    return isOffloadedOrDirect_l();
2433}
2434
2435
2436status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2437{
2438
2439    const size_t SIZE = 256;
2440    char buffer[SIZE];
2441    String8 result;
2442
2443    result.append(" AudioTrack::dump\n");
2444    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2445            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2446    result.append(buffer);
2447    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2448            mChannelCount, mFrameCount);
2449    result.append(buffer);
2450    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2451            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2452    result.append(buffer);
2453    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2454    result.append(buffer);
2455    ::write(fd, result.string(), result.size());
2456    return NO_ERROR;
2457}
2458
2459uint32_t AudioTrack::getUnderrunCount() const
2460{
2461    AutoMutex lock(mLock);
2462    return getUnderrunCount_l();
2463}
2464
2465uint32_t AudioTrack::getUnderrunCount_l() const
2466{
2467    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2468}
2469
2470uint32_t AudioTrack::getUnderrunFrames() const
2471{
2472    AutoMutex lock(mLock);
2473    return mProxy->getUnderrunFrames();
2474}
2475
2476status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2477{
2478    if (callback == 0) {
2479        ALOGW("%s adding NULL callback!", __FUNCTION__);
2480        return BAD_VALUE;
2481    }
2482    AutoMutex lock(mLock);
2483    if (mDeviceCallback == callback) {
2484        ALOGW("%s adding same callback!", __FUNCTION__);
2485        return INVALID_OPERATION;
2486    }
2487    status_t status = NO_ERROR;
2488    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2489        if (mDeviceCallback != 0) {
2490            ALOGW("%s callback already present!", __FUNCTION__);
2491            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2492        }
2493        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2494    }
2495    mDeviceCallback = callback;
2496    return status;
2497}
2498
2499status_t AudioTrack::removeAudioDeviceCallback(
2500        const sp<AudioSystem::AudioDeviceCallback>& callback)
2501{
2502    if (callback == 0) {
2503        ALOGW("%s removing NULL callback!", __FUNCTION__);
2504        return BAD_VALUE;
2505    }
2506    AutoMutex lock(mLock);
2507    if (mDeviceCallback != callback) {
2508        ALOGW("%s removing different callback!", __FUNCTION__);
2509        return INVALID_OPERATION;
2510    }
2511    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2512        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2513    }
2514    mDeviceCallback = 0;
2515    return NO_ERROR;
2516}
2517
2518// =========================================================================
2519
2520void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2521{
2522    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2523    if (audioTrack != 0) {
2524        AutoMutex lock(audioTrack->mLock);
2525        audioTrack->mProxy->binderDied();
2526    }
2527}
2528
2529// =========================================================================
2530
2531AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2532    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2533      mIgnoreNextPausedInt(false)
2534{
2535}
2536
2537AudioTrack::AudioTrackThread::~AudioTrackThread()
2538{
2539}
2540
2541bool AudioTrack::AudioTrackThread::threadLoop()
2542{
2543    {
2544        AutoMutex _l(mMyLock);
2545        if (mPaused) {
2546            mMyCond.wait(mMyLock);
2547            // caller will check for exitPending()
2548            return true;
2549        }
2550        if (mIgnoreNextPausedInt) {
2551            mIgnoreNextPausedInt = false;
2552            mPausedInt = false;
2553        }
2554        if (mPausedInt) {
2555            if (mPausedNs > 0) {
2556                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2557            } else {
2558                mMyCond.wait(mMyLock);
2559            }
2560            mPausedInt = false;
2561            return true;
2562        }
2563    }
2564    if (exitPending()) {
2565        return false;
2566    }
2567    nsecs_t ns = mReceiver.processAudioBuffer();
2568    switch (ns) {
2569    case 0:
2570        return true;
2571    case NS_INACTIVE:
2572        pauseInternal();
2573        return true;
2574    case NS_NEVER:
2575        return false;
2576    case NS_WHENEVER:
2577        // Event driven: call wake() when callback notifications conditions change.
2578        ns = INT64_MAX;
2579        // fall through
2580    default:
2581        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2582        pauseInternal(ns);
2583        return true;
2584    }
2585}
2586
2587void AudioTrack::AudioTrackThread::requestExit()
2588{
2589    // must be in this order to avoid a race condition
2590    Thread::requestExit();
2591    resume();
2592}
2593
2594void AudioTrack::AudioTrackThread::pause()
2595{
2596    AutoMutex _l(mMyLock);
2597    mPaused = true;
2598}
2599
2600void AudioTrack::AudioTrackThread::resume()
2601{
2602    AutoMutex _l(mMyLock);
2603    mIgnoreNextPausedInt = true;
2604    if (mPaused || mPausedInt) {
2605        mPaused = false;
2606        mPausedInt = false;
2607        mMyCond.signal();
2608    }
2609}
2610
2611void AudioTrack::AudioTrackThread::wake()
2612{
2613    AutoMutex _l(mMyLock);
2614    if (!mPaused) {
2615        // wake() might be called while servicing a callback - ignore the next
2616        // pause time and call processAudioBuffer.
2617        mIgnoreNextPausedInt = true;
2618        if (mPausedInt && mPausedNs > 0) {
2619            // audio track is active and internally paused with timeout.
2620            mPausedInt = false;
2621            mMyCond.signal();
2622        }
2623    }
2624}
2625
2626void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2627{
2628    AutoMutex _l(mMyLock);
2629    mPausedInt = true;
2630    mPausedNs = ns;
2631}
2632
2633} // namespace android
2634