AudioTrack.cpp revision 6ae5843c281301a9ffd1059d185620a9337e15a2
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41// TODO: Move to a separate .h
42
43template <typename T>
44static inline const T &min(const T &x, const T &y) {
45    return x < y ? x : y;
46}
47
48template <typename T>
49static inline const T &max(const T &x, const T &y) {
50    return x > y ? x : y;
51}
52
53static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
54{
55    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
56}
57
58static int64_t convertTimespecToUs(const struct timespec &tv)
59{
60    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
61}
62
63// current monotonic time in microseconds.
64static int64_t getNowUs()
65{
66    struct timespec tv;
67    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
68    return convertTimespecToUs(tv);
69}
70
71// FIXME: we don't use the pitch setting in the time stretcher (not working);
72// instead we emulate it using our sample rate converter.
73static const bool kFixPitch = true; // enable pitch fix
74static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
75{
76    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
77}
78
79static inline float adjustSpeed(float speed, float pitch)
80{
81    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
82}
83
84static inline float adjustPitch(float pitch)
85{
86    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
87}
88
89// Must match similar computation in createTrack_l in Threads.cpp.
90// TODO: Move to a common library
91static size_t calculateMinFrameCount(
92        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
93        uint32_t sampleRate, float speed)
94{
95    // Ensure that buffer depth covers at least audio hardware latency
96    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
97    if (minBufCount < 2) {
98        minBufCount = 2;
99    }
100    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
101            "sampleRate %u  speed %f  minBufCount: %u",
102            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
103    return minBufCount * sourceFramesNeededWithTimestretch(
104            sampleRate, afFrameCount, afSampleRate, speed);
105}
106
107// static
108status_t AudioTrack::getMinFrameCount(
109        size_t* frameCount,
110        audio_stream_type_t streamType,
111        uint32_t sampleRate)
112{
113    if (frameCount == NULL) {
114        return BAD_VALUE;
115    }
116
117    // FIXME handle in server, like createTrack_l(), possible missing info:
118    //          audio_io_handle_t output
119    //          audio_format_t format
120    //          audio_channel_mask_t channelMask
121    //          audio_output_flags_t flags (FAST)
122    uint32_t afSampleRate;
123    status_t status;
124    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
125    if (status != NO_ERROR) {
126        ALOGE("Unable to query output sample rate for stream type %d; status %d",
127                streamType, status);
128        return status;
129    }
130    size_t afFrameCount;
131    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
132    if (status != NO_ERROR) {
133        ALOGE("Unable to query output frame count for stream type %d; status %d",
134                streamType, status);
135        return status;
136    }
137    uint32_t afLatency;
138    status = AudioSystem::getOutputLatency(&afLatency, streamType);
139    if (status != NO_ERROR) {
140        ALOGE("Unable to query output latency for stream type %d; status %d",
141                streamType, status);
142        return status;
143    }
144
145    // When called from createTrack, speed is 1.0f (normal speed).
146    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
147    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
148
149    // The formula above should always produce a non-zero value under normal circumstances:
150    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151    // Return error in the unlikely event that it does not, as that's part of the API contract.
152    if (*frameCount == 0) {
153        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
154                streamType, sampleRate);
155        return BAD_VALUE;
156    }
157    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158            *frameCount, afFrameCount, afSampleRate, afLatency);
159    return NO_ERROR;
160}
161
162// ---------------------------------------------------------------------------
163
164AudioTrack::AudioTrack()
165    : mStatus(NO_INIT),
166      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
167      mPreviousSchedulingGroup(SP_DEFAULT),
168      mPausedPosition(0),
169      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
170{
171    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
172    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
173    mAttributes.flags = 0x0;
174    strcpy(mAttributes.tags, "");
175}
176
177AudioTrack::AudioTrack(
178        audio_stream_type_t streamType,
179        uint32_t sampleRate,
180        audio_format_t format,
181        audio_channel_mask_t channelMask,
182        size_t frameCount,
183        audio_output_flags_t flags,
184        callback_t cbf,
185        void* user,
186        uint32_t notificationFrames,
187        int sessionId,
188        transfer_type transferType,
189        const audio_offload_info_t *offloadInfo,
190        int uid,
191        pid_t pid,
192        const audio_attributes_t* pAttributes,
193        bool doNotReconnect)
194    : mStatus(NO_INIT),
195      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
196      mPreviousSchedulingGroup(SP_DEFAULT),
197      mPausedPosition(0),
198      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
199{
200    mStatus = set(streamType, sampleRate, format, channelMask,
201            frameCount, flags, cbf, user, notificationFrames,
202            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
203            offloadInfo, uid, pid, pAttributes, doNotReconnect);
204}
205
206AudioTrack::AudioTrack(
207        audio_stream_type_t streamType,
208        uint32_t sampleRate,
209        audio_format_t format,
210        audio_channel_mask_t channelMask,
211        const sp<IMemory>& sharedBuffer,
212        audio_output_flags_t flags,
213        callback_t cbf,
214        void* user,
215        uint32_t notificationFrames,
216        int sessionId,
217        transfer_type transferType,
218        const audio_offload_info_t *offloadInfo,
219        int uid,
220        pid_t pid,
221        const audio_attributes_t* pAttributes,
222        bool doNotReconnect)
223    : mStatus(NO_INIT),
224      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
225      mPreviousSchedulingGroup(SP_DEFAULT),
226      mPausedPosition(0),
227      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
228{
229    mStatus = set(streamType, sampleRate, format, channelMask,
230            0 /*frameCount*/, flags, cbf, user, notificationFrames,
231            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
232            uid, pid, pAttributes, doNotReconnect);
233}
234
235AudioTrack::~AudioTrack()
236{
237    if (mStatus == NO_ERROR) {
238        // Make sure that callback function exits in the case where
239        // it is looping on buffer full condition in obtainBuffer().
240        // Otherwise the callback thread will never exit.
241        stop();
242        if (mAudioTrackThread != 0) {
243            mProxy->interrupt();
244            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
245            mAudioTrackThread->requestExitAndWait();
246            mAudioTrackThread.clear();
247        }
248        // No lock here: worst case we remove a NULL callback which will be a nop
249        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
250            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
251        }
252        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
253        mAudioTrack.clear();
254        mCblkMemory.clear();
255        mSharedBuffer.clear();
256        IPCThreadState::self()->flushCommands();
257        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
258                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
259        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
260    }
261}
262
263status_t AudioTrack::set(
264        audio_stream_type_t streamType,
265        uint32_t sampleRate,
266        audio_format_t format,
267        audio_channel_mask_t channelMask,
268        size_t frameCount,
269        audio_output_flags_t flags,
270        callback_t cbf,
271        void* user,
272        uint32_t notificationFrames,
273        const sp<IMemory>& sharedBuffer,
274        bool threadCanCallJava,
275        int sessionId,
276        transfer_type transferType,
277        const audio_offload_info_t *offloadInfo,
278        int uid,
279        pid_t pid,
280        const audio_attributes_t* pAttributes,
281        bool doNotReconnect)
282{
283    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
284          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
285          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
286          sessionId, transferType, uid, pid);
287
288    mThreadCanCallJava = threadCanCallJava;
289
290    switch (transferType) {
291    case TRANSFER_DEFAULT:
292        if (sharedBuffer != 0) {
293            transferType = TRANSFER_SHARED;
294        } else if (cbf == NULL || threadCanCallJava) {
295            transferType = TRANSFER_SYNC;
296        } else {
297            transferType = TRANSFER_CALLBACK;
298        }
299        break;
300    case TRANSFER_CALLBACK:
301        if (cbf == NULL || sharedBuffer != 0) {
302            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
303            return BAD_VALUE;
304        }
305        break;
306    case TRANSFER_OBTAIN:
307    case TRANSFER_SYNC:
308        if (sharedBuffer != 0) {
309            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
310            return BAD_VALUE;
311        }
312        break;
313    case TRANSFER_SHARED:
314        if (sharedBuffer == 0) {
315            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
316            return BAD_VALUE;
317        }
318        break;
319    default:
320        ALOGE("Invalid transfer type %d", transferType);
321        return BAD_VALUE;
322    }
323    mSharedBuffer = sharedBuffer;
324    mTransfer = transferType;
325    mDoNotReconnect = doNotReconnect;
326
327    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
328            sharedBuffer->size());
329
330    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
331
332    // invariant that mAudioTrack != 0 is true only after set() returns successfully
333    if (mAudioTrack != 0) {
334        ALOGE("Track already in use");
335        return INVALID_OPERATION;
336    }
337
338    // handle default values first.
339    if (streamType == AUDIO_STREAM_DEFAULT) {
340        streamType = AUDIO_STREAM_MUSIC;
341    }
342    if (pAttributes == NULL) {
343        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
344            ALOGE("Invalid stream type %d", streamType);
345            return BAD_VALUE;
346        }
347        mStreamType = streamType;
348
349    } else {
350        // stream type shouldn't be looked at, this track has audio attributes
351        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
352        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
353                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
354        mStreamType = AUDIO_STREAM_DEFAULT;
355        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
356            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
357        }
358        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
359            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
360        }
361    }
362
363    // these below should probably come from the audioFlinger too...
364    if (format == AUDIO_FORMAT_DEFAULT) {
365        format = AUDIO_FORMAT_PCM_16_BIT;
366    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
367        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
368    }
369
370    // validate parameters
371    if (!audio_is_valid_format(format)) {
372        ALOGE("Invalid format %#x", format);
373        return BAD_VALUE;
374    }
375    mFormat = format;
376
377    if (!audio_is_output_channel(channelMask)) {
378        ALOGE("Invalid channel mask %#x", channelMask);
379        return BAD_VALUE;
380    }
381    mChannelMask = channelMask;
382    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
383    mChannelCount = channelCount;
384
385    // force direct flag if format is not linear PCM
386    // or offload was requested
387    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
388            || !audio_is_linear_pcm(format)) {
389        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
390                    ? "Offload request, forcing to Direct Output"
391                    : "Not linear PCM, forcing to Direct Output");
392        flags = (audio_output_flags_t)
393                // FIXME why can't we allow direct AND fast?
394                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
395    }
396
397    // force direct flag if HW A/V sync requested
398    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
399        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
400    }
401
402    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
403        if (audio_has_proportional_frames(format)) {
404            mFrameSize = channelCount * audio_bytes_per_sample(format);
405        } else {
406            mFrameSize = sizeof(uint8_t);
407        }
408    } else {
409        ALOG_ASSERT(audio_has_proportional_frames(format));
410        mFrameSize = channelCount * audio_bytes_per_sample(format);
411        // createTrack will return an error if PCM format is not supported by server,
412        // so no need to check for specific PCM formats here
413    }
414
415    // sampling rate must be specified for direct outputs
416    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
417        return BAD_VALUE;
418    }
419    mSampleRate = sampleRate;
420    mOriginalSampleRate = sampleRate;
421    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
422
423    // Make copy of input parameter offloadInfo so that in the future:
424    //  (a) createTrack_l doesn't need it as an input parameter
425    //  (b) we can support re-creation of offloaded tracks
426    if (offloadInfo != NULL) {
427        mOffloadInfoCopy = *offloadInfo;
428        mOffloadInfo = &mOffloadInfoCopy;
429    } else {
430        mOffloadInfo = NULL;
431    }
432
433    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
434    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
435    mSendLevel = 0.0f;
436    // mFrameCount is initialized in createTrack_l
437    mReqFrameCount = frameCount;
438    mNotificationFramesReq = notificationFrames;
439    mNotificationFramesAct = 0;
440    if (sessionId == AUDIO_SESSION_ALLOCATE) {
441        mSessionId = AudioSystem::newAudioUniqueId();
442    } else {
443        mSessionId = sessionId;
444    }
445    int callingpid = IPCThreadState::self()->getCallingPid();
446    int mypid = getpid();
447    if (uid == -1 || (callingpid != mypid)) {
448        mClientUid = IPCThreadState::self()->getCallingUid();
449    } else {
450        mClientUid = uid;
451    }
452    if (pid == -1 || (callingpid != mypid)) {
453        mClientPid = callingpid;
454    } else {
455        mClientPid = pid;
456    }
457    mAuxEffectId = 0;
458    mFlags = flags;
459    mCbf = cbf;
460
461    if (cbf != NULL) {
462        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
463        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
464        // thread begins in paused state, and will not reference us until start()
465    }
466
467    // create the IAudioTrack
468    status_t status = createTrack_l();
469
470    if (status != NO_ERROR) {
471        if (mAudioTrackThread != 0) {
472            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
473            mAudioTrackThread->requestExitAndWait();
474            mAudioTrackThread.clear();
475        }
476        return status;
477    }
478
479    mStatus = NO_ERROR;
480    mState = STATE_STOPPED;
481    mUserData = user;
482    mLoopCount = 0;
483    mLoopStart = 0;
484    mLoopEnd = 0;
485    mLoopCountNotified = 0;
486    mMarkerPosition = 0;
487    mMarkerReached = false;
488    mNewPosition = 0;
489    mUpdatePeriod = 0;
490    mPosition = 0;
491    mReleased = 0;
492    mStartUs = 0;
493    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
494    mSequence = 1;
495    mObservedSequence = mSequence;
496    mInUnderrun = false;
497    mPreviousTimestampValid = false;
498    mTimestampStartupGlitchReported = false;
499    mRetrogradeMotionReported = false;
500    mUnderrunCountOffset = 0;
501
502    return NO_ERROR;
503}
504
505// -------------------------------------------------------------------------
506
507status_t AudioTrack::start()
508{
509    AutoMutex lock(mLock);
510
511    if (mState == STATE_ACTIVE) {
512        return INVALID_OPERATION;
513    }
514
515    mInUnderrun = true;
516
517    State previousState = mState;
518    if (previousState == STATE_PAUSED_STOPPING) {
519        mState = STATE_STOPPING;
520    } else {
521        mState = STATE_ACTIVE;
522    }
523    (void) updateAndGetPosition_l();
524    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
525        // reset current position as seen by client to 0
526        mPosition = 0;
527        mPreviousTimestampValid = false;
528        mTimestampStartupGlitchReported = false;
529        mRetrogradeMotionReported = false;
530
531        // If previousState == STATE_STOPPED, we clear the timestamp so that it
532        // needs a new server push. We also reactivate markers (mMarkerPosition != 0)
533        // as the position is reset to 0. This is legacy behavior. This is not done
534        // in stop() to avoid a race condition where the last marker event is issued twice.
535        // Note: the if is technically unnecessary because previousState == STATE_FLUSHED
536        // is only for streaming tracks, and mMarkerReached is already set to false.
537        if (previousState == STATE_STOPPED) {
538            mProxy->clearTimestamp(); // need new server push for valid timestamp
539            mMarkerReached = false;
540        }
541
542        // For offloaded tracks, we don't know if the hardware counters are really zero here,
543        // since the flush is asynchronous and stop may not fully drain.
544        // We save the time when the track is started to later verify whether
545        // the counters are realistic (i.e. start from zero after this time).
546        mStartUs = getNowUs();
547
548        // force refresh of remaining frames by processAudioBuffer() as last
549        // write before stop could be partial.
550        mRefreshRemaining = true;
551    }
552    mNewPosition = mPosition + mUpdatePeriod;
553    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
554
555    sp<AudioTrackThread> t = mAudioTrackThread;
556    if (t != 0) {
557        if (previousState == STATE_STOPPING) {
558            mProxy->interrupt();
559        } else {
560            t->resume();
561        }
562    } else {
563        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
564        get_sched_policy(0, &mPreviousSchedulingGroup);
565        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
566    }
567
568    status_t status = NO_ERROR;
569    if (!(flags & CBLK_INVALID)) {
570        status = mAudioTrack->start();
571        if (status == DEAD_OBJECT) {
572            flags |= CBLK_INVALID;
573        }
574    }
575    if (flags & CBLK_INVALID) {
576        status = restoreTrack_l("start");
577    }
578
579    if (status != NO_ERROR) {
580        ALOGE("start() status %d", status);
581        mState = previousState;
582        if (t != 0) {
583            if (previousState != STATE_STOPPING) {
584                t->pause();
585            }
586        } else {
587            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
588            set_sched_policy(0, mPreviousSchedulingGroup);
589        }
590    }
591
592    return status;
593}
594
595void AudioTrack::stop()
596{
597    AutoMutex lock(mLock);
598    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
599        return;
600    }
601
602    if (isOffloaded_l()) {
603        mState = STATE_STOPPING;
604    } else {
605        mState = STATE_STOPPED;
606        mReleased = 0;
607    }
608
609    mProxy->interrupt();
610    mAudioTrack->stop();
611
612    // Note: legacy handling - stop does not clear playback marker
613    // and periodic update counter, but flush does for streaming tracks.
614
615    if (mSharedBuffer != 0) {
616        // clear buffer position and loop count.
617        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
618                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
619    }
620
621    sp<AudioTrackThread> t = mAudioTrackThread;
622    if (t != 0) {
623        if (!isOffloaded_l()) {
624            t->pause();
625        }
626    } else {
627        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
628        set_sched_policy(0, mPreviousSchedulingGroup);
629    }
630}
631
632bool AudioTrack::stopped() const
633{
634    AutoMutex lock(mLock);
635    return mState != STATE_ACTIVE;
636}
637
638void AudioTrack::flush()
639{
640    if (mSharedBuffer != 0) {
641        return;
642    }
643    AutoMutex lock(mLock);
644    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
645        return;
646    }
647    flush_l();
648}
649
650void AudioTrack::flush_l()
651{
652    ALOG_ASSERT(mState != STATE_ACTIVE);
653
654    // clear playback marker and periodic update counter
655    mMarkerPosition = 0;
656    mMarkerReached = false;
657    mUpdatePeriod = 0;
658    mRefreshRemaining = true;
659
660    mState = STATE_FLUSHED;
661    mReleased = 0;
662    if (isOffloaded_l()) {
663        mProxy->interrupt();
664    }
665    mProxy->flush();
666    mAudioTrack->flush();
667}
668
669void AudioTrack::pause()
670{
671    AutoMutex lock(mLock);
672    if (mState == STATE_ACTIVE) {
673        mState = STATE_PAUSED;
674    } else if (mState == STATE_STOPPING) {
675        mState = STATE_PAUSED_STOPPING;
676    } else {
677        return;
678    }
679    mProxy->interrupt();
680    mAudioTrack->pause();
681
682    if (isOffloaded_l()) {
683        if (mOutput != AUDIO_IO_HANDLE_NONE) {
684            // An offload output can be re-used between two audio tracks having
685            // the same configuration. A timestamp query for a paused track
686            // while the other is running would return an incorrect time.
687            // To fix this, cache the playback position on a pause() and return
688            // this time when requested until the track is resumed.
689
690            // OffloadThread sends HAL pause in its threadLoop. Time saved
691            // here can be slightly off.
692
693            // TODO: check return code for getRenderPosition.
694
695            uint32_t halFrames;
696            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
697            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
698        }
699    }
700}
701
702status_t AudioTrack::setVolume(float left, float right)
703{
704    // This duplicates a test by AudioTrack JNI, but that is not the only caller
705    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
706            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
707        return BAD_VALUE;
708    }
709
710    AutoMutex lock(mLock);
711    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
712    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
713
714    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
715
716    if (isOffloaded_l()) {
717        mAudioTrack->signal();
718    }
719    return NO_ERROR;
720}
721
722status_t AudioTrack::setVolume(float volume)
723{
724    return setVolume(volume, volume);
725}
726
727status_t AudioTrack::setAuxEffectSendLevel(float level)
728{
729    // This duplicates a test by AudioTrack JNI, but that is not the only caller
730    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
731        return BAD_VALUE;
732    }
733
734    AutoMutex lock(mLock);
735    mSendLevel = level;
736    mProxy->setSendLevel(level);
737
738    return NO_ERROR;
739}
740
741void AudioTrack::getAuxEffectSendLevel(float* level) const
742{
743    if (level != NULL) {
744        *level = mSendLevel;
745    }
746}
747
748status_t AudioTrack::setSampleRate(uint32_t rate)
749{
750    AutoMutex lock(mLock);
751    if (rate == mSampleRate) {
752        return NO_ERROR;
753    }
754    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
755        return INVALID_OPERATION;
756    }
757    if (mOutput == AUDIO_IO_HANDLE_NONE) {
758        return NO_INIT;
759    }
760    // NOTE: it is theoretically possible, but highly unlikely, that a device change
761    // could mean a previously allowed sampling rate is no longer allowed.
762    uint32_t afSamplingRate;
763    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
764        return NO_INIT;
765    }
766    // pitch is emulated by adjusting speed and sampleRate
767    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
768    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
769        return BAD_VALUE;
770    }
771    // TODO: Should we also check if the buffer size is compatible?
772
773    mSampleRate = rate;
774    mProxy->setSampleRate(effectiveSampleRate);
775
776    return NO_ERROR;
777}
778
779uint32_t AudioTrack::getSampleRate() const
780{
781    AutoMutex lock(mLock);
782
783    // sample rate can be updated during playback by the offloaded decoder so we need to
784    // query the HAL and update if needed.
785// FIXME use Proxy return channel to update the rate from server and avoid polling here
786    if (isOffloadedOrDirect_l()) {
787        if (mOutput != AUDIO_IO_HANDLE_NONE) {
788            uint32_t sampleRate = 0;
789            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
790            if (status == NO_ERROR) {
791                mSampleRate = sampleRate;
792            }
793        }
794    }
795    return mSampleRate;
796}
797
798uint32_t AudioTrack::getOriginalSampleRate() const
799{
800    return mOriginalSampleRate;
801}
802
803status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
804{
805    AutoMutex lock(mLock);
806    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
807        return NO_ERROR;
808    }
809    if (isOffloadedOrDirect_l()) {
810        return INVALID_OPERATION;
811    }
812    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
813        return INVALID_OPERATION;
814    }
815    // pitch is emulated by adjusting speed and sampleRate
816    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
817    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
818    const float effectivePitch = adjustPitch(playbackRate.mPitch);
819    AudioPlaybackRate playbackRateTemp = playbackRate;
820    playbackRateTemp.mSpeed = effectiveSpeed;
821    playbackRateTemp.mPitch = effectivePitch;
822
823    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
824        return BAD_VALUE;
825    }
826    // Check if the buffer size is compatible.
827    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
828        ALOGV("setPlaybackRate(%f, %f) failed", playbackRate.mSpeed, playbackRate.mPitch);
829        return BAD_VALUE;
830    }
831
832    // Check resampler ratios are within bounds
833    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate * (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
834        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
835                playbackRate.mSpeed, playbackRate.mPitch);
836        return BAD_VALUE;
837    }
838
839    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
840        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
841                        playbackRate.mSpeed, playbackRate.mPitch);
842        return BAD_VALUE;
843    }
844    mPlaybackRate = playbackRate;
845    //set effective rates
846    mProxy->setPlaybackRate(playbackRateTemp);
847    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
848    return NO_ERROR;
849}
850
851const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
852{
853    AutoMutex lock(mLock);
854    return mPlaybackRate;
855}
856
857ssize_t AudioTrack::getBufferSizeInFrames()
858{
859    AutoMutex lock(mLock);
860    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
861        return NO_INIT;
862    }
863    return mProxy->getBufferSizeInFrames();
864}
865
866ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
867{
868    AutoMutex lock(mLock);
869    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
870        return NO_INIT;
871    }
872    // Reject if timed track or compressed audio.
873    if (!audio_is_linear_pcm(mFormat)) {
874        return INVALID_OPERATION;
875    }
876    // TODO also need to inform the server side (through mAudioTrack) that
877    // the buffer count is reduced, otherwise the track may never start
878    // because the server thinks it is never filled.
879    return mProxy->setBufferSizeInFrames(bufferSizeInFrames);
880}
881
882status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
883{
884    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
885        return INVALID_OPERATION;
886    }
887
888    if (loopCount == 0) {
889        ;
890    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
891            loopEnd - loopStart >= MIN_LOOP) {
892        ;
893    } else {
894        return BAD_VALUE;
895    }
896
897    AutoMutex lock(mLock);
898    // See setPosition() regarding setting parameters such as loop points or position while active
899    if (mState == STATE_ACTIVE) {
900        return INVALID_OPERATION;
901    }
902    setLoop_l(loopStart, loopEnd, loopCount);
903    return NO_ERROR;
904}
905
906void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
907{
908    // We do not update the periodic notification point.
909    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
910    mLoopCount = loopCount;
911    mLoopEnd = loopEnd;
912    mLoopStart = loopStart;
913    mLoopCountNotified = loopCount;
914    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
915
916    // Waking the AudioTrackThread is not needed as this cannot be called when active.
917}
918
919status_t AudioTrack::setMarkerPosition(uint32_t marker)
920{
921    // The only purpose of setting marker position is to get a callback
922    if (mCbf == NULL || isOffloadedOrDirect()) {
923        return INVALID_OPERATION;
924    }
925
926    AutoMutex lock(mLock);
927    mMarkerPosition = marker;
928    mMarkerReached = false;
929
930    sp<AudioTrackThread> t = mAudioTrackThread;
931    if (t != 0) {
932        t->wake();
933    }
934    return NO_ERROR;
935}
936
937status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
938{
939    if (isOffloadedOrDirect()) {
940        return INVALID_OPERATION;
941    }
942    if (marker == NULL) {
943        return BAD_VALUE;
944    }
945
946    AutoMutex lock(mLock);
947    mMarkerPosition.getValue(marker);
948
949    return NO_ERROR;
950}
951
952status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
953{
954    // The only purpose of setting position update period is to get a callback
955    if (mCbf == NULL || isOffloadedOrDirect()) {
956        return INVALID_OPERATION;
957    }
958
959    AutoMutex lock(mLock);
960    mNewPosition = updateAndGetPosition_l() + updatePeriod;
961    mUpdatePeriod = updatePeriod;
962
963    sp<AudioTrackThread> t = mAudioTrackThread;
964    if (t != 0) {
965        t->wake();
966    }
967    return NO_ERROR;
968}
969
970status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
971{
972    if (isOffloadedOrDirect()) {
973        return INVALID_OPERATION;
974    }
975    if (updatePeriod == NULL) {
976        return BAD_VALUE;
977    }
978
979    AutoMutex lock(mLock);
980    *updatePeriod = mUpdatePeriod;
981
982    return NO_ERROR;
983}
984
985status_t AudioTrack::setPosition(uint32_t position)
986{
987    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
988        return INVALID_OPERATION;
989    }
990    if (position > mFrameCount) {
991        return BAD_VALUE;
992    }
993
994    AutoMutex lock(mLock);
995    // Currently we require that the player is inactive before setting parameters such as position
996    // or loop points.  Otherwise, there could be a race condition: the application could read the
997    // current position, compute a new position or loop parameters, and then set that position or
998    // loop parameters but it would do the "wrong" thing since the position has continued to advance
999    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1000    // to specify how it wants to handle such scenarios.
1001    if (mState == STATE_ACTIVE) {
1002        return INVALID_OPERATION;
1003    }
1004    // After setting the position, use full update period before notification.
1005    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1006    mStaticProxy->setBufferPosition(position);
1007
1008    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1009    return NO_ERROR;
1010}
1011
1012status_t AudioTrack::getPosition(uint32_t *position)
1013{
1014    if (position == NULL) {
1015        return BAD_VALUE;
1016    }
1017
1018    AutoMutex lock(mLock);
1019    if (isOffloadedOrDirect_l()) {
1020        uint32_t dspFrames = 0;
1021
1022        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1023            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1024            *position = mPausedPosition;
1025            return NO_ERROR;
1026        }
1027
1028        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1029            uint32_t halFrames; // actually unused
1030            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1031            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1032        }
1033        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1034        // due to hardware latency. We leave this behavior for now.
1035        *position = dspFrames;
1036    } else {
1037        if (mCblk->mFlags & CBLK_INVALID) {
1038            (void) restoreTrack_l("getPosition");
1039            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1040            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1041        }
1042
1043        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1044        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1045                0 : updateAndGetPosition_l().value();
1046    }
1047    return NO_ERROR;
1048}
1049
1050status_t AudioTrack::getBufferPosition(uint32_t *position)
1051{
1052    if (mSharedBuffer == 0) {
1053        return INVALID_OPERATION;
1054    }
1055    if (position == NULL) {
1056        return BAD_VALUE;
1057    }
1058
1059    AutoMutex lock(mLock);
1060    *position = mStaticProxy->getBufferPosition();
1061    return NO_ERROR;
1062}
1063
1064status_t AudioTrack::reload()
1065{
1066    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1067        return INVALID_OPERATION;
1068    }
1069
1070    AutoMutex lock(mLock);
1071    // See setPosition() regarding setting parameters such as loop points or position while active
1072    if (mState == STATE_ACTIVE) {
1073        return INVALID_OPERATION;
1074    }
1075    mNewPosition = mUpdatePeriod;
1076    (void) updateAndGetPosition_l();
1077    mPosition = 0;
1078    mPreviousTimestampValid = false;
1079#if 0
1080    // The documentation is not clear on the behavior of reload() and the restoration
1081    // of loop count. Historically we have not restored loop count, start, end,
1082    // but it makes sense if one desires to repeat playing a particular sound.
1083    if (mLoopCount != 0) {
1084        mLoopCountNotified = mLoopCount;
1085        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1086    }
1087#endif
1088    mStaticProxy->setBufferPosition(0);
1089    return NO_ERROR;
1090}
1091
1092audio_io_handle_t AudioTrack::getOutput() const
1093{
1094    AutoMutex lock(mLock);
1095    return mOutput;
1096}
1097
1098status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1099    AutoMutex lock(mLock);
1100    if (mSelectedDeviceId != deviceId) {
1101        mSelectedDeviceId = deviceId;
1102        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1103    }
1104    return NO_ERROR;
1105}
1106
1107audio_port_handle_t AudioTrack::getOutputDevice() {
1108    AutoMutex lock(mLock);
1109    return mSelectedDeviceId;
1110}
1111
1112audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1113    AutoMutex lock(mLock);
1114    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1115        return AUDIO_PORT_HANDLE_NONE;
1116    }
1117    return AudioSystem::getDeviceIdForIo(mOutput);
1118}
1119
1120status_t AudioTrack::attachAuxEffect(int effectId)
1121{
1122    AutoMutex lock(mLock);
1123    status_t status = mAudioTrack->attachAuxEffect(effectId);
1124    if (status == NO_ERROR) {
1125        mAuxEffectId = effectId;
1126    }
1127    return status;
1128}
1129
1130audio_stream_type_t AudioTrack::streamType() const
1131{
1132    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1133        return audio_attributes_to_stream_type(&mAttributes);
1134    }
1135    return mStreamType;
1136}
1137
1138// -------------------------------------------------------------------------
1139
1140// must be called with mLock held
1141status_t AudioTrack::createTrack_l()
1142{
1143    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1144    if (audioFlinger == 0) {
1145        ALOGE("Could not get audioflinger");
1146        return NO_INIT;
1147    }
1148
1149    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1150        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1151    }
1152    audio_io_handle_t output;
1153    audio_stream_type_t streamType = mStreamType;
1154    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1155
1156    status_t status;
1157    status = AudioSystem::getOutputForAttr(attr, &output,
1158                                           (audio_session_t)mSessionId, &streamType, mClientUid,
1159                                           mSampleRate, mFormat, mChannelMask,
1160                                           mFlags, mSelectedDeviceId, mOffloadInfo);
1161
1162    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1163        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1164              " channel mask %#x, flags %#x",
1165              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1166        return BAD_VALUE;
1167    }
1168    {
1169    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1170    // we must release it ourselves if anything goes wrong.
1171
1172    // Not all of these values are needed under all conditions, but it is easier to get them all
1173    status = AudioSystem::getLatency(output, &mAfLatency);
1174    if (status != NO_ERROR) {
1175        ALOGE("getLatency(%d) failed status %d", output, status);
1176        goto release;
1177    }
1178    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1179
1180    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1181    if (status != NO_ERROR) {
1182        ALOGE("getFrameCount(output=%d) status %d", output, status);
1183        goto release;
1184    }
1185
1186    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1187    if (status != NO_ERROR) {
1188        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1189        goto release;
1190    }
1191    if (mSampleRate == 0) {
1192        mSampleRate = mAfSampleRate;
1193        mOriginalSampleRate = mAfSampleRate;
1194    }
1195    // Client can only express a preference for FAST.  Server will perform additional tests.
1196    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1197        bool useCaseAllowed =
1198            // either of these use cases:
1199            // use case 1: shared buffer
1200            (mSharedBuffer != 0) ||
1201            // use case 2: callback transfer mode
1202            (mTransfer == TRANSFER_CALLBACK) ||
1203            // use case 3: obtain/release mode
1204            (mTransfer == TRANSFER_OBTAIN) ||
1205            // use case 4: synchronous write
1206            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1207        // sample rates must also match
1208        bool fastAllowed = useCaseAllowed && (mSampleRate == mAfSampleRate);
1209        if (!fastAllowed) {
1210            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d,"
1211                "track %u Hz, output %u Hz",
1212                mTransfer, mSampleRate, mAfSampleRate);
1213            // once denied, do not request again if IAudioTrack is re-created
1214            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1215        }
1216    }
1217
1218    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1219    //  n = 1   fast track with single buffering; nBuffering is ignored
1220    //  n = 2   fast track with double buffering
1221    //  n = 2   normal track, (including those with sample rate conversion)
1222    //  n >= 3  very high latency or very small notification interval (unused).
1223    const uint32_t nBuffering = 2;
1224
1225    mNotificationFramesAct = mNotificationFramesReq;
1226
1227    size_t frameCount = mReqFrameCount;
1228    if (!audio_has_proportional_frames(mFormat)) {
1229
1230        if (mSharedBuffer != 0) {
1231            // Same comment as below about ignoring frameCount parameter for set()
1232            frameCount = mSharedBuffer->size();
1233        } else if (frameCount == 0) {
1234            frameCount = mAfFrameCount;
1235        }
1236        if (mNotificationFramesAct != frameCount) {
1237            mNotificationFramesAct = frameCount;
1238        }
1239    } else if (mSharedBuffer != 0) {
1240        // FIXME: Ensure client side memory buffers need
1241        // not have additional alignment beyond sample
1242        // (e.g. 16 bit stereo accessed as 32 bit frame).
1243        size_t alignment = audio_bytes_per_sample(mFormat);
1244        if (alignment & 1) {
1245            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1246            alignment = 1;
1247        }
1248        if (mChannelCount > 1) {
1249            // More than 2 channels does not require stronger alignment than stereo
1250            alignment <<= 1;
1251        }
1252        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1253            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1254                    mSharedBuffer->pointer(), mChannelCount);
1255            status = BAD_VALUE;
1256            goto release;
1257        }
1258
1259        // When initializing a shared buffer AudioTrack via constructors,
1260        // there's no frameCount parameter.
1261        // But when initializing a shared buffer AudioTrack via set(),
1262        // there _is_ a frameCount parameter.  We silently ignore it.
1263        frameCount = mSharedBuffer->size() / mFrameSize;
1264    } else {
1265        // For fast tracks the frame count calculations and checks are done by server
1266
1267        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1268            // for normal tracks precompute the frame count based on speed.
1269            const size_t minFrameCount = calculateMinFrameCount(
1270                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1271                    mPlaybackRate.mSpeed);
1272            if (frameCount < minFrameCount) {
1273                frameCount = minFrameCount;
1274            }
1275        }
1276    }
1277
1278    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1279
1280    pid_t tid = -1;
1281    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1282        trackFlags |= IAudioFlinger::TRACK_FAST;
1283        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1284            tid = mAudioTrackThread->getTid();
1285        }
1286    }
1287
1288    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1289        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1290    }
1291
1292    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1293        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1294    }
1295
1296    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1297                                // but we will still need the original value also
1298    int originalSessionId = mSessionId;
1299    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1300                                                      mSampleRate,
1301                                                      mFormat,
1302                                                      mChannelMask,
1303                                                      &temp,
1304                                                      &trackFlags,
1305                                                      mSharedBuffer,
1306                                                      output,
1307                                                      tid,
1308                                                      &mSessionId,
1309                                                      mClientUid,
1310                                                      &status);
1311    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1312            "session ID changed from %d to %d", originalSessionId, mSessionId);
1313
1314    if (status != NO_ERROR) {
1315        ALOGE("AudioFlinger could not create track, status: %d", status);
1316        goto release;
1317    }
1318    ALOG_ASSERT(track != 0);
1319
1320    // AudioFlinger now owns the reference to the I/O handle,
1321    // so we are no longer responsible for releasing it.
1322
1323    sp<IMemory> iMem = track->getCblk();
1324    if (iMem == 0) {
1325        ALOGE("Could not get control block");
1326        return NO_INIT;
1327    }
1328    void *iMemPointer = iMem->pointer();
1329    if (iMemPointer == NULL) {
1330        ALOGE("Could not get control block pointer");
1331        return NO_INIT;
1332    }
1333    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1334    if (mAudioTrack != 0) {
1335        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1336        mDeathNotifier.clear();
1337    }
1338    mAudioTrack = track;
1339    mCblkMemory = iMem;
1340    IPCThreadState::self()->flushCommands();
1341
1342    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1343    mCblk = cblk;
1344    // note that temp is the (possibly revised) value of frameCount
1345    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1346        // In current design, AudioTrack client checks and ensures frame count validity before
1347        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1348        // for fast track as it uses a special method of assigning frame count.
1349        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1350    }
1351    frameCount = temp;
1352
1353    mAwaitBoost = false;
1354    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1355        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1356            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1357            if (!mThreadCanCallJava) {
1358                mAwaitBoost = true;
1359            }
1360        } else {
1361            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1362            // once denied, do not request again if IAudioTrack is re-created
1363            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1364        }
1365    }
1366    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1367        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1368            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1369        } else {
1370            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1371            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1372            // FIXME This is a warning, not an error, so don't return error status
1373            //return NO_INIT;
1374        }
1375    }
1376    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1377        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1378            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1379        } else {
1380            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1381            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1382            // FIXME This is a warning, not an error, so don't return error status
1383            //return NO_INIT;
1384        }
1385    }
1386    // Make sure that application is notified with sufficient margin before underrun
1387    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1388        // Theoretically double-buffering is not required for fast tracks,
1389        // due to tighter scheduling.  But in practice, to accommodate kernels with
1390        // scheduling jitter, and apps with computation jitter, we use double-buffering
1391        // for fast tracks just like normal streaming tracks.
1392        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
1393            mNotificationFramesAct = frameCount / nBuffering;
1394        }
1395    }
1396
1397    // We retain a copy of the I/O handle, but don't own the reference
1398    mOutput = output;
1399    mRefreshRemaining = true;
1400
1401    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1402    // is the value of pointer() for the shared buffer, otherwise buffers points
1403    // immediately after the control block.  This address is for the mapping within client
1404    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1405    void* buffers;
1406    if (mSharedBuffer == 0) {
1407        buffers = cblk + 1;
1408    } else {
1409        buffers = mSharedBuffer->pointer();
1410        if (buffers == NULL) {
1411            ALOGE("Could not get buffer pointer");
1412            return NO_INIT;
1413        }
1414    }
1415
1416    mAudioTrack->attachAuxEffect(mAuxEffectId);
1417    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1418    // FIXME don't believe this lie
1419    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1420
1421    mFrameCount = frameCount;
1422    // If IAudioTrack is re-created, don't let the requested frameCount
1423    // decrease.  This can confuse clients that cache frameCount().
1424    if (frameCount > mReqFrameCount) {
1425        mReqFrameCount = frameCount;
1426    }
1427
1428    // reset server position to 0 as we have new cblk.
1429    mServer = 0;
1430
1431    // update proxy
1432    if (mSharedBuffer == 0) {
1433        mStaticProxy.clear();
1434        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1435    } else {
1436        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1437        mProxy = mStaticProxy;
1438    }
1439
1440    mProxy->setVolumeLR(gain_minifloat_pack(
1441            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1442            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1443
1444    mProxy->setSendLevel(mSendLevel);
1445    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1446    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1447    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1448    mProxy->setSampleRate(effectiveSampleRate);
1449
1450    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1451    playbackRateTemp.mSpeed = effectiveSpeed;
1452    playbackRateTemp.mPitch = effectivePitch;
1453    mProxy->setPlaybackRate(playbackRateTemp);
1454    mProxy->setMinimum(mNotificationFramesAct);
1455
1456    mDeathNotifier = new DeathNotifier(this);
1457    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1458
1459    if (mDeviceCallback != 0) {
1460        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1461    }
1462
1463    return NO_ERROR;
1464    }
1465
1466release:
1467    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1468    if (status == NO_ERROR) {
1469        status = NO_INIT;
1470    }
1471    return status;
1472}
1473
1474status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1475{
1476    if (audioBuffer == NULL) {
1477        if (nonContig != NULL) {
1478            *nonContig = 0;
1479        }
1480        return BAD_VALUE;
1481    }
1482    if (mTransfer != TRANSFER_OBTAIN) {
1483        audioBuffer->frameCount = 0;
1484        audioBuffer->size = 0;
1485        audioBuffer->raw = NULL;
1486        if (nonContig != NULL) {
1487            *nonContig = 0;
1488        }
1489        return INVALID_OPERATION;
1490    }
1491
1492    const struct timespec *requested;
1493    struct timespec timeout;
1494    if (waitCount == -1) {
1495        requested = &ClientProxy::kForever;
1496    } else if (waitCount == 0) {
1497        requested = &ClientProxy::kNonBlocking;
1498    } else if (waitCount > 0) {
1499        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1500        timeout.tv_sec = ms / 1000;
1501        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1502        requested = &timeout;
1503    } else {
1504        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1505        requested = NULL;
1506    }
1507    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1508}
1509
1510status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1511        struct timespec *elapsed, size_t *nonContig)
1512{
1513    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1514    uint32_t oldSequence = 0;
1515    uint32_t newSequence;
1516
1517    Proxy::Buffer buffer;
1518    status_t status = NO_ERROR;
1519
1520    static const int32_t kMaxTries = 5;
1521    int32_t tryCounter = kMaxTries;
1522
1523    do {
1524        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1525        // keep them from going away if another thread re-creates the track during obtainBuffer()
1526        sp<AudioTrackClientProxy> proxy;
1527        sp<IMemory> iMem;
1528
1529        {   // start of lock scope
1530            AutoMutex lock(mLock);
1531
1532            newSequence = mSequence;
1533            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1534            if (status == DEAD_OBJECT) {
1535                // re-create track, unless someone else has already done so
1536                if (newSequence == oldSequence) {
1537                    status = restoreTrack_l("obtainBuffer");
1538                    if (status != NO_ERROR) {
1539                        buffer.mFrameCount = 0;
1540                        buffer.mRaw = NULL;
1541                        buffer.mNonContig = 0;
1542                        break;
1543                    }
1544                }
1545            }
1546            oldSequence = newSequence;
1547
1548            // Keep the extra references
1549            proxy = mProxy;
1550            iMem = mCblkMemory;
1551
1552            if (mState == STATE_STOPPING) {
1553                status = -EINTR;
1554                buffer.mFrameCount = 0;
1555                buffer.mRaw = NULL;
1556                buffer.mNonContig = 0;
1557                break;
1558            }
1559
1560            // Non-blocking if track is stopped or paused
1561            if (mState != STATE_ACTIVE) {
1562                requested = &ClientProxy::kNonBlocking;
1563            }
1564
1565        }   // end of lock scope
1566
1567        buffer.mFrameCount = audioBuffer->frameCount;
1568        // FIXME starts the requested timeout and elapsed over from scratch
1569        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1570
1571    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1572
1573    audioBuffer->frameCount = buffer.mFrameCount;
1574    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1575    audioBuffer->raw = buffer.mRaw;
1576    if (nonContig != NULL) {
1577        *nonContig = buffer.mNonContig;
1578    }
1579    return status;
1580}
1581
1582void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1583{
1584    // FIXME add error checking on mode, by adding an internal version
1585    if (mTransfer == TRANSFER_SHARED) {
1586        return;
1587    }
1588
1589    size_t stepCount = audioBuffer->size / mFrameSize;
1590    if (stepCount == 0) {
1591        return;
1592    }
1593
1594    Proxy::Buffer buffer;
1595    buffer.mFrameCount = stepCount;
1596    buffer.mRaw = audioBuffer->raw;
1597
1598    AutoMutex lock(mLock);
1599    mReleased += stepCount;
1600    mInUnderrun = false;
1601    mProxy->releaseBuffer(&buffer);
1602
1603    // restart track if it was disabled by audioflinger due to previous underrun
1604    if (mState == STATE_ACTIVE) {
1605        audio_track_cblk_t* cblk = mCblk;
1606        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1607            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1608            // FIXME ignoring status
1609            mAudioTrack->start();
1610        }
1611    }
1612}
1613
1614// -------------------------------------------------------------------------
1615
1616ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1617{
1618    if (mTransfer != TRANSFER_SYNC) {
1619        return INVALID_OPERATION;
1620    }
1621
1622    if (isDirect()) {
1623        AutoMutex lock(mLock);
1624        int32_t flags = android_atomic_and(
1625                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1626                            &mCblk->mFlags);
1627        if (flags & CBLK_INVALID) {
1628            return DEAD_OBJECT;
1629        }
1630    }
1631
1632    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1633        // Sanity-check: user is most-likely passing an error code, and it would
1634        // make the return value ambiguous (actualSize vs error).
1635        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1636        return BAD_VALUE;
1637    }
1638
1639    size_t written = 0;
1640    Buffer audioBuffer;
1641
1642    while (userSize >= mFrameSize) {
1643        audioBuffer.frameCount = userSize / mFrameSize;
1644
1645        status_t err = obtainBuffer(&audioBuffer,
1646                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1647        if (err < 0) {
1648            if (written > 0) {
1649                break;
1650            }
1651            return ssize_t(err);
1652        }
1653
1654        size_t toWrite = audioBuffer.size;
1655        memcpy(audioBuffer.i8, buffer, toWrite);
1656        buffer = ((const char *) buffer) + toWrite;
1657        userSize -= toWrite;
1658        written += toWrite;
1659
1660        releaseBuffer(&audioBuffer);
1661    }
1662
1663    return written;
1664}
1665
1666// -------------------------------------------------------------------------
1667
1668nsecs_t AudioTrack::processAudioBuffer()
1669{
1670    // Currently the AudioTrack thread is not created if there are no callbacks.
1671    // Would it ever make sense to run the thread, even without callbacks?
1672    // If so, then replace this by checks at each use for mCbf != NULL.
1673    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1674
1675    mLock.lock();
1676    if (mAwaitBoost) {
1677        mAwaitBoost = false;
1678        mLock.unlock();
1679        static const int32_t kMaxTries = 5;
1680        int32_t tryCounter = kMaxTries;
1681        uint32_t pollUs = 10000;
1682        do {
1683            int policy = sched_getscheduler(0);
1684            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1685                break;
1686            }
1687            usleep(pollUs);
1688            pollUs <<= 1;
1689        } while (tryCounter-- > 0);
1690        if (tryCounter < 0) {
1691            ALOGE("did not receive expected priority boost on time");
1692        }
1693        // Run again immediately
1694        return 0;
1695    }
1696
1697    // Can only reference mCblk while locked
1698    int32_t flags = android_atomic_and(
1699        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1700
1701    // Check for track invalidation
1702    if (flags & CBLK_INVALID) {
1703        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1704        // AudioSystem cache. We should not exit here but after calling the callback so
1705        // that the upper layers can recreate the track
1706        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1707            status_t status __unused = restoreTrack_l("processAudioBuffer");
1708            // FIXME unused status
1709            // after restoration, continue below to make sure that the loop and buffer events
1710            // are notified because they have been cleared from mCblk->mFlags above.
1711        }
1712    }
1713
1714    bool waitStreamEnd = mState == STATE_STOPPING;
1715    bool active = mState == STATE_ACTIVE;
1716
1717    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1718    bool newUnderrun = false;
1719    if (flags & CBLK_UNDERRUN) {
1720#if 0
1721        // Currently in shared buffer mode, when the server reaches the end of buffer,
1722        // the track stays active in continuous underrun state.  It's up to the application
1723        // to pause or stop the track, or set the position to a new offset within buffer.
1724        // This was some experimental code to auto-pause on underrun.   Keeping it here
1725        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1726        if (mTransfer == TRANSFER_SHARED) {
1727            mState = STATE_PAUSED;
1728            active = false;
1729        }
1730#endif
1731        if (!mInUnderrun) {
1732            mInUnderrun = true;
1733            newUnderrun = true;
1734        }
1735    }
1736
1737    // Get current position of server
1738    Modulo<uint32_t> position(updateAndGetPosition_l());
1739
1740    // Manage marker callback
1741    bool markerReached = false;
1742    Modulo<uint32_t> markerPosition(mMarkerPosition);
1743    // uses 32 bit wraparound for comparison with position.
1744    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1745        mMarkerReached = markerReached = true;
1746    }
1747
1748    // Determine number of new position callback(s) that will be needed, while locked
1749    size_t newPosCount = 0;
1750    Modulo<uint32_t> newPosition(mNewPosition);
1751    uint32_t updatePeriod = mUpdatePeriod;
1752    // FIXME fails for wraparound, need 64 bits
1753    if (updatePeriod > 0 && position >= newPosition) {
1754        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1755        mNewPosition += updatePeriod * newPosCount;
1756    }
1757
1758    // Cache other fields that will be needed soon
1759    uint32_t sampleRate = mSampleRate;
1760    float speed = mPlaybackRate.mSpeed;
1761    const uint32_t notificationFrames = mNotificationFramesAct;
1762    if (mRefreshRemaining) {
1763        mRefreshRemaining = false;
1764        mRemainingFrames = notificationFrames;
1765        mRetryOnPartialBuffer = false;
1766    }
1767    size_t misalignment = mProxy->getMisalignment();
1768    uint32_t sequence = mSequence;
1769    sp<AudioTrackClientProxy> proxy = mProxy;
1770
1771    // Determine the number of new loop callback(s) that will be needed, while locked.
1772    int loopCountNotifications = 0;
1773    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1774
1775    if (mLoopCount > 0) {
1776        int loopCount;
1777        size_t bufferPosition;
1778        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1779        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1780        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1781        mLoopCountNotified = loopCount; // discard any excess notifications
1782    } else if (mLoopCount < 0) {
1783        // FIXME: We're not accurate with notification count and position with infinite looping
1784        // since loopCount from server side will always return -1 (we could decrement it).
1785        size_t bufferPosition = mStaticProxy->getBufferPosition();
1786        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1787        loopPeriod = mLoopEnd - bufferPosition;
1788    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1789        size_t bufferPosition = mStaticProxy->getBufferPosition();
1790        loopPeriod = mFrameCount - bufferPosition;
1791    }
1792
1793    // These fields don't need to be cached, because they are assigned only by set():
1794    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1795    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1796
1797    mLock.unlock();
1798
1799    // get anchor time to account for callbacks.
1800    const nsecs_t timeBeforeCallbacks = systemTime();
1801
1802    if (waitStreamEnd) {
1803        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1804        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1805        // (and make sure we don't callback for more data while we're stopping).
1806        // This helps with position, marker notifications, and track invalidation.
1807        struct timespec timeout;
1808        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1809        timeout.tv_nsec = 0;
1810
1811        status_t status = proxy->waitStreamEndDone(&timeout);
1812        switch (status) {
1813        case NO_ERROR:
1814        case DEAD_OBJECT:
1815        case TIMED_OUT:
1816            if (status != DEAD_OBJECT) {
1817                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1818                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1819                mCbf(EVENT_STREAM_END, mUserData, NULL);
1820            }
1821            {
1822                AutoMutex lock(mLock);
1823                // The previously assigned value of waitStreamEnd is no longer valid,
1824                // since the mutex has been unlocked and either the callback handler
1825                // or another thread could have re-started the AudioTrack during that time.
1826                waitStreamEnd = mState == STATE_STOPPING;
1827                if (waitStreamEnd) {
1828                    mState = STATE_STOPPED;
1829                    mReleased = 0;
1830                }
1831            }
1832            if (waitStreamEnd && status != DEAD_OBJECT) {
1833               return NS_INACTIVE;
1834            }
1835            break;
1836        }
1837        return 0;
1838    }
1839
1840    // perform callbacks while unlocked
1841    if (newUnderrun) {
1842        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1843    }
1844    while (loopCountNotifications > 0) {
1845        mCbf(EVENT_LOOP_END, mUserData, NULL);
1846        --loopCountNotifications;
1847    }
1848    if (flags & CBLK_BUFFER_END) {
1849        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1850    }
1851    if (markerReached) {
1852        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1853    }
1854    while (newPosCount > 0) {
1855        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
1856        mCbf(EVENT_NEW_POS, mUserData, &temp);
1857        newPosition += updatePeriod;
1858        newPosCount--;
1859    }
1860
1861    if (mObservedSequence != sequence) {
1862        mObservedSequence = sequence;
1863        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1864        // for offloaded tracks, just wait for the upper layers to recreate the track
1865        if (isOffloadedOrDirect()) {
1866            return NS_INACTIVE;
1867        }
1868    }
1869
1870    // if inactive, then don't run me again until re-started
1871    if (!active) {
1872        return NS_INACTIVE;
1873    }
1874
1875    // Compute the estimated time until the next timed event (position, markers, loops)
1876    // FIXME only for non-compressed audio
1877    uint32_t minFrames = ~0;
1878    if (!markerReached && position < markerPosition) {
1879        minFrames = (markerPosition - position).value();
1880    }
1881    if (loopPeriod > 0 && loopPeriod < minFrames) {
1882        // loopPeriod is already adjusted for actual position.
1883        minFrames = loopPeriod;
1884    }
1885    if (updatePeriod > 0) {
1886        minFrames = min(minFrames, (newPosition - position).value());
1887    }
1888
1889    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1890    static const uint32_t kPoll = 0;
1891    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1892        minFrames = kPoll * notificationFrames;
1893    }
1894
1895    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1896    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
1897    const nsecs_t timeAfterCallbacks = systemTime();
1898
1899    // Convert frame units to time units
1900    nsecs_t ns = NS_WHENEVER;
1901    if (minFrames != (uint32_t) ~0) {
1902        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
1903        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
1904        // TODO: Should we warn if the callback time is too long?
1905        if (ns < 0) ns = 0;
1906    }
1907
1908    // If not supplying data by EVENT_MORE_DATA, then we're done
1909    if (mTransfer != TRANSFER_CALLBACK) {
1910        return ns;
1911    }
1912
1913    // EVENT_MORE_DATA callback handling.
1914    // Timing for linear pcm audio data formats can be derived directly from the
1915    // buffer fill level.
1916    // Timing for compressed data is not directly available from the buffer fill level,
1917    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
1918    // to return a certain fill level.
1919
1920    struct timespec timeout;
1921    const struct timespec *requested = &ClientProxy::kForever;
1922    if (ns != NS_WHENEVER) {
1923        timeout.tv_sec = ns / 1000000000LL;
1924        timeout.tv_nsec = ns % 1000000000LL;
1925        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1926        requested = &timeout;
1927    }
1928
1929    while (mRemainingFrames > 0) {
1930
1931        Buffer audioBuffer;
1932        audioBuffer.frameCount = mRemainingFrames;
1933        size_t nonContig;
1934        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1935        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1936                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1937        requested = &ClientProxy::kNonBlocking;
1938        size_t avail = audioBuffer.frameCount + nonContig;
1939        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1940                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1941        if (err != NO_ERROR) {
1942            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1943                    (isOffloaded() && (err == DEAD_OBJECT))) {
1944                // FIXME bug 25195759
1945                return 1000000;
1946            }
1947            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1948            return NS_NEVER;
1949        }
1950
1951        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
1952            mRetryOnPartialBuffer = false;
1953            if (avail < mRemainingFrames) {
1954                if (ns > 0) { // account for obtain time
1955                    const nsecs_t timeNow = systemTime();
1956                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
1957                }
1958                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
1959                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
1960                    ns = myns;
1961                }
1962                return ns;
1963            }
1964        }
1965
1966        size_t reqSize = audioBuffer.size;
1967        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1968        size_t writtenSize = audioBuffer.size;
1969
1970        // Sanity check on returned size
1971        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1972            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1973                    reqSize, ssize_t(writtenSize));
1974            return NS_NEVER;
1975        }
1976
1977        if (writtenSize == 0) {
1978            // The callback is done filling buffers
1979            // Keep this thread going to handle timed events and
1980            // still try to get more data in intervals of WAIT_PERIOD_MS
1981            // but don't just loop and block the CPU, so wait
1982
1983            // mCbf(EVENT_MORE_DATA, ...) might either
1984            // (1) Block until it can fill the buffer, returning 0 size on EOS.
1985            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
1986            // (3) Return 0 size when no data is available, does not wait for more data.
1987            //
1988            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
1989            // We try to compute the wait time to avoid a tight sleep-wait cycle,
1990            // especially for case (3).
1991            //
1992            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
1993            // and this loop; whereas for case (3) we could simply check once with the full
1994            // buffer size and skip the loop entirely.
1995
1996            nsecs_t myns;
1997            if (audio_has_proportional_frames(mFormat)) {
1998                // time to wait based on buffer occupancy
1999                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2000                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2001                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2002                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2003                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2004                myns = datans + (afns / 2);
2005            } else {
2006                // FIXME: This could ping quite a bit if the buffer isn't full.
2007                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2008                myns = kWaitPeriodNs;
2009            }
2010            if (ns > 0) { // account for obtain and callback time
2011                const nsecs_t timeNow = systemTime();
2012                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2013            }
2014            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2015                ns = myns;
2016            }
2017            return ns;
2018        }
2019
2020        size_t releasedFrames = writtenSize / mFrameSize;
2021        audioBuffer.frameCount = releasedFrames;
2022        mRemainingFrames -= releasedFrames;
2023        if (misalignment >= releasedFrames) {
2024            misalignment -= releasedFrames;
2025        } else {
2026            misalignment = 0;
2027        }
2028
2029        releaseBuffer(&audioBuffer);
2030
2031        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2032        // if callback doesn't like to accept the full chunk
2033        if (writtenSize < reqSize) {
2034            continue;
2035        }
2036
2037        // There could be enough non-contiguous frames available to satisfy the remaining request
2038        if (mRemainingFrames <= nonContig) {
2039            continue;
2040        }
2041
2042#if 0
2043        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2044        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2045        // that total to a sum == notificationFrames.
2046        if (0 < misalignment && misalignment <= mRemainingFrames) {
2047            mRemainingFrames = misalignment;
2048            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2049        }
2050#endif
2051
2052    }
2053    mRemainingFrames = notificationFrames;
2054    mRetryOnPartialBuffer = true;
2055
2056    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2057    return 0;
2058}
2059
2060status_t AudioTrack::restoreTrack_l(const char *from)
2061{
2062    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2063          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2064    ++mSequence;
2065
2066    // refresh the audio configuration cache in this process to make sure we get new
2067    // output parameters and new IAudioFlinger in createTrack_l()
2068    AudioSystem::clearAudioConfigCache();
2069
2070    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2071        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2072        // reconsider enabling for linear PCM encodings when position can be preserved.
2073        return DEAD_OBJECT;
2074    }
2075
2076    // Save so we can return count since creation.
2077    mUnderrunCountOffset = getUnderrunCount_l();
2078
2079    // save the old static buffer position
2080    size_t bufferPosition = 0;
2081    int loopCount = 0;
2082    if (mStaticProxy != 0) {
2083        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2084    }
2085
2086    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2087    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2088    // It will also delete the strong references on previous IAudioTrack and IMemory.
2089    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2090    status_t result = createTrack_l();
2091
2092    if (result == NO_ERROR) {
2093        // take the frames that will be lost by track recreation into account in saved position
2094        // For streaming tracks, this is the amount we obtained from the user/client
2095        // (not the number actually consumed at the server - those are already lost).
2096        if (mStaticProxy == 0) {
2097            mPosition = mReleased;
2098        }
2099        // Continue playback from last known position and restore loop.
2100        if (mStaticProxy != 0) {
2101            if (loopCount != 0) {
2102                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2103                        mLoopStart, mLoopEnd, loopCount);
2104            } else {
2105                mStaticProxy->setBufferPosition(bufferPosition);
2106                if (bufferPosition == mFrameCount) {
2107                    ALOGD("restoring track at end of static buffer");
2108                }
2109            }
2110        }
2111        if (mState == STATE_ACTIVE) {
2112            result = mAudioTrack->start();
2113        }
2114    }
2115    if (result != NO_ERROR) {
2116        ALOGW("restoreTrack_l() failed status %d", result);
2117        mState = STATE_STOPPED;
2118        mReleased = 0;
2119    }
2120
2121    return result;
2122}
2123
2124Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2125{
2126    // This is the sole place to read server consumed frames
2127    Modulo<uint32_t> newServer(mProxy->getPosition());
2128    const int32_t delta = (newServer - mServer).signedValue();
2129    // TODO There is controversy about whether there can be "negative jitter" in server position.
2130    //      This should be investigated further, and if possible, it should be addressed.
2131    //      A more definite failure mode is infrequent polling by client.
2132    //      One could call (void)getPosition_l() in releaseBuffer(),
2133    //      so mReleased and mPosition are always lock-step as best possible.
2134    //      That should ensure delta never goes negative for infrequent polling
2135    //      unless the server has more than 2^31 frames in its buffer,
2136    //      in which case the use of uint32_t for these counters has bigger issues.
2137    ALOGE_IF(delta < 0,
2138            "detected illegal retrograde motion by the server: mServer advanced by %d",
2139            delta);
2140    mServer = newServer;
2141    if (delta > 0) { // avoid retrograde
2142        mPosition += delta;
2143    }
2144    return mPosition;
2145}
2146
2147bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2148{
2149    // applicable for mixing tracks only (not offloaded or direct)
2150    if (mStaticProxy != 0) {
2151        return true; // static tracks do not have issues with buffer sizing.
2152    }
2153    const size_t minFrameCount =
2154            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed);
2155    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
2156            mFrameCount, minFrameCount);
2157    return mFrameCount >= minFrameCount;
2158}
2159
2160status_t AudioTrack::setParameters(const String8& keyValuePairs)
2161{
2162    AutoMutex lock(mLock);
2163    return mAudioTrack->setParameters(keyValuePairs);
2164}
2165
2166status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2167{
2168    AutoMutex lock(mLock);
2169
2170    bool previousTimestampValid = mPreviousTimestampValid;
2171    // Set false here to cover all the error return cases.
2172    mPreviousTimestampValid = false;
2173
2174    switch (mState) {
2175    case STATE_ACTIVE:
2176    case STATE_PAUSED:
2177        break; // handle below
2178    case STATE_FLUSHED:
2179    case STATE_STOPPED:
2180        return WOULD_BLOCK;
2181    case STATE_STOPPING:
2182    case STATE_PAUSED_STOPPING:
2183        if (!isOffloaded_l()) {
2184            return INVALID_OPERATION;
2185        }
2186        break; // offloaded tracks handled below
2187    default:
2188        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2189        break;
2190    }
2191
2192    if (mCblk->mFlags & CBLK_INVALID) {
2193        const status_t status = restoreTrack_l("getTimestamp");
2194        if (status != OK) {
2195            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2196            // recommending that the track be recreated.
2197            return DEAD_OBJECT;
2198        }
2199    }
2200
2201    // The presented frame count must always lag behind the consumed frame count.
2202    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2203
2204    status_t status;
2205    if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
2206        // use Binder to get timestamp
2207        status = mAudioTrack->getTimestamp(timestamp);
2208    } else {
2209        // read timestamp from shared memory
2210        ExtendedTimestamp ets;
2211        status = mProxy->getTimestamp(&ets);
2212        if (status == OK) {
2213            status = ets.getBestTimestamp(&timestamp);
2214        }
2215        if (status == INVALID_OPERATION) {
2216            status = WOULD_BLOCK;
2217        }
2218    }
2219    if (status != NO_ERROR) {
2220        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2221        return status;
2222    }
2223    if (isOffloadedOrDirect_l()) {
2224        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2225            // use cached paused position in case another offloaded track is running.
2226            timestamp.mPosition = mPausedPosition;
2227            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2228            return NO_ERROR;
2229        }
2230
2231        // Check whether a pending flush or stop has completed, as those commands may
2232        // be asynchronous or return near finish or exhibit glitchy behavior.
2233        //
2234        // Originally this showed up as the first timestamp being a continuation of
2235        // the previous song under gapless playback.
2236        // However, we sometimes see zero timestamps, then a glitch of
2237        // the previous song's position, and then correct timestamps afterwards.
2238        if (mStartUs != 0 && mSampleRate != 0) {
2239            static const int kTimeJitterUs = 100000; // 100 ms
2240            static const int k1SecUs = 1000000;
2241
2242            const int64_t timeNow = getNowUs();
2243
2244            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2245                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2246                if (timestampTimeUs < mStartUs) {
2247                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2248                }
2249                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2250                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2251                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2252
2253                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2254                    // Verify that the counter can't count faster than the sample rate
2255                    // since the start time.  If greater, then that means we may have failed
2256                    // to completely flush or stop the previous playing track.
2257                    ALOGW_IF(!mTimestampStartupGlitchReported,
2258                            "getTimestamp startup glitch detected"
2259                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2260                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2261                            timestamp.mPosition);
2262                    mTimestampStartupGlitchReported = true;
2263                    if (previousTimestampValid
2264                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2265                        timestamp = mPreviousTimestamp;
2266                        mPreviousTimestampValid = true;
2267                        return NO_ERROR;
2268                    }
2269                    return WOULD_BLOCK;
2270                }
2271                if (deltaPositionByUs != 0) {
2272                    mStartUs = 0; // don't check again, we got valid nonzero position.
2273                }
2274            } else {
2275                mStartUs = 0; // don't check again, start time expired.
2276            }
2277            mTimestampStartupGlitchReported = false;
2278        }
2279    } else {
2280        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2281        (void) updateAndGetPosition_l();
2282        // Server consumed (mServer) and presented both use the same server time base,
2283        // and server consumed is always >= presented.
2284        // The delta between these represents the number of frames in the buffer pipeline.
2285        // If this delta between these is greater than the client position, it means that
2286        // actually presented is still stuck at the starting line (figuratively speaking),
2287        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2288        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2289        // mPosition exceeds 32 bits.
2290        // TODO Remove when timestamp is updated to contain pipeline status info.
2291        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2292        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2293                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2294            return INVALID_OPERATION;
2295        }
2296        // Convert timestamp position from server time base to client time base.
2297        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2298        // But if we change it to 64-bit then this could fail.
2299        // Use Modulo computation here.
2300        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2301        // Immediately after a call to getPosition_l(), mPosition and
2302        // mServer both represent the same frame position.  mPosition is
2303        // in client's point of view, and mServer is in server's point of
2304        // view.  So the difference between them is the "fudge factor"
2305        // between client and server views due to stop() and/or new
2306        // IAudioTrack.  And timestamp.mPosition is initially in server's
2307        // point of view, so we need to apply the same fudge factor to it.
2308    }
2309
2310    // Prevent retrograde motion in timestamp.
2311    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2312    if (status == NO_ERROR) {
2313        if (previousTimestampValid) {
2314#define TIME_TO_NANOS(time) ((int64_t)time.tv_sec * 1000000000 + time.tv_nsec)
2315            const int64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
2316            const int64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
2317#undef TIME_TO_NANOS
2318            if (currentTimeNanos < previousTimeNanos) {
2319                ALOGW("retrograde timestamp time");
2320                // FIXME Consider blocking this from propagating upwards.
2321            }
2322
2323            // Looking at signed delta will work even when the timestamps
2324            // are wrapping around.
2325            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2326                    - mPreviousTimestamp.mPosition).signedValue();
2327            // position can bobble slightly as an artifact; this hides the bobble
2328            static const int32_t MINIMUM_POSITION_DELTA = 8;
2329            if (deltaPosition < 0) {
2330                // Only report once per position instead of spamming the log.
2331                if (!mRetrogradeMotionReported) {
2332                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2333                            deltaPosition,
2334                            timestamp.mPosition,
2335                            mPreviousTimestamp.mPosition);
2336                    mRetrogradeMotionReported = true;
2337                }
2338            } else {
2339                mRetrogradeMotionReported = false;
2340            }
2341            if (deltaPosition < MINIMUM_POSITION_DELTA) {
2342                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
2343            }
2344        }
2345        mPreviousTimestamp = timestamp;
2346        mPreviousTimestampValid = true;
2347    }
2348
2349    return status;
2350}
2351
2352String8 AudioTrack::getParameters(const String8& keys)
2353{
2354    audio_io_handle_t output = getOutput();
2355    if (output != AUDIO_IO_HANDLE_NONE) {
2356        return AudioSystem::getParameters(output, keys);
2357    } else {
2358        return String8::empty();
2359    }
2360}
2361
2362bool AudioTrack::isOffloaded() const
2363{
2364    AutoMutex lock(mLock);
2365    return isOffloaded_l();
2366}
2367
2368bool AudioTrack::isDirect() const
2369{
2370    AutoMutex lock(mLock);
2371    return isDirect_l();
2372}
2373
2374bool AudioTrack::isOffloadedOrDirect() const
2375{
2376    AutoMutex lock(mLock);
2377    return isOffloadedOrDirect_l();
2378}
2379
2380
2381status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2382{
2383
2384    const size_t SIZE = 256;
2385    char buffer[SIZE];
2386    String8 result;
2387
2388    result.append(" AudioTrack::dump\n");
2389    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2390            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2391    result.append(buffer);
2392    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2393            mChannelCount, mFrameCount);
2394    result.append(buffer);
2395    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2396            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2397    result.append(buffer);
2398    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2399    result.append(buffer);
2400    ::write(fd, result.string(), result.size());
2401    return NO_ERROR;
2402}
2403
2404uint32_t AudioTrack::getUnderrunCount() const
2405{
2406    AutoMutex lock(mLock);
2407    return getUnderrunCount_l();
2408}
2409
2410uint32_t AudioTrack::getUnderrunCount_l() const
2411{
2412    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2413}
2414
2415uint32_t AudioTrack::getUnderrunFrames() const
2416{
2417    AutoMutex lock(mLock);
2418    return mProxy->getUnderrunFrames();
2419}
2420
2421status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2422{
2423    if (callback == 0) {
2424        ALOGW("%s adding NULL callback!", __FUNCTION__);
2425        return BAD_VALUE;
2426    }
2427    AutoMutex lock(mLock);
2428    if (mDeviceCallback == callback) {
2429        ALOGW("%s adding same callback!", __FUNCTION__);
2430        return INVALID_OPERATION;
2431    }
2432    status_t status = NO_ERROR;
2433    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2434        if (mDeviceCallback != 0) {
2435            ALOGW("%s callback already present!", __FUNCTION__);
2436            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2437        }
2438        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2439    }
2440    mDeviceCallback = callback;
2441    return status;
2442}
2443
2444status_t AudioTrack::removeAudioDeviceCallback(
2445        const sp<AudioSystem::AudioDeviceCallback>& callback)
2446{
2447    if (callback == 0) {
2448        ALOGW("%s removing NULL callback!", __FUNCTION__);
2449        return BAD_VALUE;
2450    }
2451    AutoMutex lock(mLock);
2452    if (mDeviceCallback != callback) {
2453        ALOGW("%s removing different callback!", __FUNCTION__);
2454        return INVALID_OPERATION;
2455    }
2456    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2457        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2458    }
2459    mDeviceCallback = 0;
2460    return NO_ERROR;
2461}
2462
2463// =========================================================================
2464
2465void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2466{
2467    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2468    if (audioTrack != 0) {
2469        AutoMutex lock(audioTrack->mLock);
2470        audioTrack->mProxy->binderDied();
2471    }
2472}
2473
2474// =========================================================================
2475
2476AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2477    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2478      mIgnoreNextPausedInt(false)
2479{
2480}
2481
2482AudioTrack::AudioTrackThread::~AudioTrackThread()
2483{
2484}
2485
2486bool AudioTrack::AudioTrackThread::threadLoop()
2487{
2488    {
2489        AutoMutex _l(mMyLock);
2490        if (mPaused) {
2491            mMyCond.wait(mMyLock);
2492            // caller will check for exitPending()
2493            return true;
2494        }
2495        if (mIgnoreNextPausedInt) {
2496            mIgnoreNextPausedInt = false;
2497            mPausedInt = false;
2498        }
2499        if (mPausedInt) {
2500            if (mPausedNs > 0) {
2501                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2502            } else {
2503                mMyCond.wait(mMyLock);
2504            }
2505            mPausedInt = false;
2506            return true;
2507        }
2508    }
2509    if (exitPending()) {
2510        return false;
2511    }
2512    nsecs_t ns = mReceiver.processAudioBuffer();
2513    switch (ns) {
2514    case 0:
2515        return true;
2516    case NS_INACTIVE:
2517        pauseInternal();
2518        return true;
2519    case NS_NEVER:
2520        return false;
2521    case NS_WHENEVER:
2522        // Event driven: call wake() when callback notifications conditions change.
2523        ns = INT64_MAX;
2524        // fall through
2525    default:
2526        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2527        pauseInternal(ns);
2528        return true;
2529    }
2530}
2531
2532void AudioTrack::AudioTrackThread::requestExit()
2533{
2534    // must be in this order to avoid a race condition
2535    Thread::requestExit();
2536    resume();
2537}
2538
2539void AudioTrack::AudioTrackThread::pause()
2540{
2541    AutoMutex _l(mMyLock);
2542    mPaused = true;
2543}
2544
2545void AudioTrack::AudioTrackThread::resume()
2546{
2547    AutoMutex _l(mMyLock);
2548    mIgnoreNextPausedInt = true;
2549    if (mPaused || mPausedInt) {
2550        mPaused = false;
2551        mPausedInt = false;
2552        mMyCond.signal();
2553    }
2554}
2555
2556void AudioTrack::AudioTrackThread::wake()
2557{
2558    AutoMutex _l(mMyLock);
2559    if (!mPaused) {
2560        // wake() might be called while servicing a callback - ignore the next
2561        // pause time and call processAudioBuffer.
2562        mIgnoreNextPausedInt = true;
2563        if (mPausedInt && mPausedNs > 0) {
2564            // audio track is active and internally paused with timeout.
2565            mPausedInt = false;
2566            mMyCond.signal();
2567        }
2568    }
2569}
2570
2571void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2572{
2573    AutoMutex _l(mMyLock);
2574    mPausedInt = true;
2575    mPausedNs = ns;
2576}
2577
2578} // namespace android
2579