1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41// TODO: Move to a separate .h
42
43template <typename T>
44static inline const T &min(const T &x, const T &y) {
45    return x < y ? x : y;
46}
47
48template <typename T>
49static inline const T &max(const T &x, const T &y) {
50    return x > y ? x : y;
51}
52
53static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
54{
55    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
56}
57
58static int64_t convertTimespecToUs(const struct timespec &tv)
59{
60    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
61}
62
63// current monotonic time in microseconds.
64static int64_t getNowUs()
65{
66    struct timespec tv;
67    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
68    return convertTimespecToUs(tv);
69}
70
71// FIXME: we don't use the pitch setting in the time stretcher (not working);
72// instead we emulate it using our sample rate converter.
73static const bool kFixPitch = true; // enable pitch fix
74static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
75{
76    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
77}
78
79static inline float adjustSpeed(float speed, float pitch)
80{
81    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
82}
83
84static inline float adjustPitch(float pitch)
85{
86    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
87}
88
89// Must match similar computation in createTrack_l in Threads.cpp.
90// TODO: Move to a common library
91static size_t calculateMinFrameCount(
92        uint32_t afLatencyMs, uint32_t afFrameCount, uint32_t afSampleRate,
93        uint32_t sampleRate, float speed)
94{
95    // Ensure that buffer depth covers at least audio hardware latency
96    uint32_t minBufCount = afLatencyMs / ((1000 * afFrameCount) / afSampleRate);
97    if (minBufCount < 2) {
98        minBufCount = 2;
99    }
100    ALOGV("calculateMinFrameCount afLatency %u  afFrameCount %u  afSampleRate %u  "
101            "sampleRate %u  speed %f  minBufCount: %u",
102            afLatencyMs, afFrameCount, afSampleRate, sampleRate, speed, minBufCount);
103    return minBufCount * sourceFramesNeededWithTimestretch(
104            sampleRate, afFrameCount, afSampleRate, speed);
105}
106
107// static
108status_t AudioTrack::getMinFrameCount(
109        size_t* frameCount,
110        audio_stream_type_t streamType,
111        uint32_t sampleRate)
112{
113    if (frameCount == NULL) {
114        return BAD_VALUE;
115    }
116
117    // FIXME handle in server, like createTrack_l(), possible missing info:
118    //          audio_io_handle_t output
119    //          audio_format_t format
120    //          audio_channel_mask_t channelMask
121    //          audio_output_flags_t flags (FAST)
122    uint32_t afSampleRate;
123    status_t status;
124    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
125    if (status != NO_ERROR) {
126        ALOGE("Unable to query output sample rate for stream type %d; status %d",
127                streamType, status);
128        return status;
129    }
130    size_t afFrameCount;
131    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
132    if (status != NO_ERROR) {
133        ALOGE("Unable to query output frame count for stream type %d; status %d",
134                streamType, status);
135        return status;
136    }
137    uint32_t afLatency;
138    status = AudioSystem::getOutputLatency(&afLatency, streamType);
139    if (status != NO_ERROR) {
140        ALOGE("Unable to query output latency for stream type %d; status %d",
141                streamType, status);
142        return status;
143    }
144
145    // When called from createTrack, speed is 1.0f (normal speed).
146    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
147    *frameCount = calculateMinFrameCount(afLatency, afFrameCount, afSampleRate, sampleRate, 1.0f);
148
149    // The formula above should always produce a non-zero value under normal circumstances:
150    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
151    // Return error in the unlikely event that it does not, as that's part of the API contract.
152    if (*frameCount == 0) {
153        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
154                streamType, sampleRate);
155        return BAD_VALUE;
156    }
157    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
158            *frameCount, afFrameCount, afSampleRate, afLatency);
159    return NO_ERROR;
160}
161
162// ---------------------------------------------------------------------------
163
164AudioTrack::AudioTrack()
165    : mStatus(NO_INIT),
166      mIsTimed(false),
167      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
168      mPreviousSchedulingGroup(SP_DEFAULT),
169      mPausedPosition(0),
170      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
171{
172    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
173    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
174    mAttributes.flags = 0x0;
175    strcpy(mAttributes.tags, "");
176}
177
178AudioTrack::AudioTrack(
179        audio_stream_type_t streamType,
180        uint32_t sampleRate,
181        audio_format_t format,
182        audio_channel_mask_t channelMask,
183        size_t frameCount,
184        audio_output_flags_t flags,
185        callback_t cbf,
186        void* user,
187        uint32_t notificationFrames,
188        int sessionId,
189        transfer_type transferType,
190        const audio_offload_info_t *offloadInfo,
191        int uid,
192        pid_t pid,
193        const audio_attributes_t* pAttributes,
194        bool doNotReconnect)
195    : mStatus(NO_INIT),
196      mIsTimed(false),
197      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
198      mPreviousSchedulingGroup(SP_DEFAULT),
199      mPausedPosition(0),
200      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
201{
202    mStatus = set(streamType, sampleRate, format, channelMask,
203            frameCount, flags, cbf, user, notificationFrames,
204            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
205            offloadInfo, uid, pid, pAttributes, doNotReconnect);
206}
207
208AudioTrack::AudioTrack(
209        audio_stream_type_t streamType,
210        uint32_t sampleRate,
211        audio_format_t format,
212        audio_channel_mask_t channelMask,
213        const sp<IMemory>& sharedBuffer,
214        audio_output_flags_t flags,
215        callback_t cbf,
216        void* user,
217        uint32_t notificationFrames,
218        int sessionId,
219        transfer_type transferType,
220        const audio_offload_info_t *offloadInfo,
221        int uid,
222        pid_t pid,
223        const audio_attributes_t* pAttributes,
224        bool doNotReconnect)
225    : mStatus(NO_INIT),
226      mIsTimed(false),
227      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
228      mPreviousSchedulingGroup(SP_DEFAULT),
229      mPausedPosition(0),
230      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
231{
232    mStatus = set(streamType, sampleRate, format, channelMask,
233            0 /*frameCount*/, flags, cbf, user, notificationFrames,
234            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
235            uid, pid, pAttributes, doNotReconnect);
236}
237
238AudioTrack::~AudioTrack()
239{
240    if (mStatus == NO_ERROR) {
241        // Make sure that callback function exits in the case where
242        // it is looping on buffer full condition in obtainBuffer().
243        // Otherwise the callback thread will never exit.
244        stop();
245        if (mAudioTrackThread != 0) {
246            mProxy->interrupt();
247            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
248            mAudioTrackThread->requestExitAndWait();
249            mAudioTrackThread.clear();
250        }
251        // No lock here: worst case we remove a NULL callback which will be a nop
252        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
253            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
254        }
255        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
256        mAudioTrack.clear();
257        mCblkMemory.clear();
258        mSharedBuffer.clear();
259        IPCThreadState::self()->flushCommands();
260        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
261                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
262        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
263    }
264}
265
266status_t AudioTrack::set(
267        audio_stream_type_t streamType,
268        uint32_t sampleRate,
269        audio_format_t format,
270        audio_channel_mask_t channelMask,
271        size_t frameCount,
272        audio_output_flags_t flags,
273        callback_t cbf,
274        void* user,
275        uint32_t notificationFrames,
276        const sp<IMemory>& sharedBuffer,
277        bool threadCanCallJava,
278        int sessionId,
279        transfer_type transferType,
280        const audio_offload_info_t *offloadInfo,
281        int uid,
282        pid_t pid,
283        const audio_attributes_t* pAttributes,
284        bool doNotReconnect)
285{
286    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
287          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
288          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
289          sessionId, transferType, uid, pid);
290
291    switch (transferType) {
292    case TRANSFER_DEFAULT:
293        if (sharedBuffer != 0) {
294            transferType = TRANSFER_SHARED;
295        } else if (cbf == NULL || threadCanCallJava) {
296            transferType = TRANSFER_SYNC;
297        } else {
298            transferType = TRANSFER_CALLBACK;
299        }
300        break;
301    case TRANSFER_CALLBACK:
302        if (cbf == NULL || sharedBuffer != 0) {
303            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
304            return BAD_VALUE;
305        }
306        break;
307    case TRANSFER_OBTAIN:
308    case TRANSFER_SYNC:
309        if (sharedBuffer != 0) {
310            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
311            return BAD_VALUE;
312        }
313        break;
314    case TRANSFER_SHARED:
315        if (sharedBuffer == 0) {
316            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
317            return BAD_VALUE;
318        }
319        break;
320    default:
321        ALOGE("Invalid transfer type %d", transferType);
322        return BAD_VALUE;
323    }
324    mSharedBuffer = sharedBuffer;
325    mTransfer = transferType;
326    mDoNotReconnect = doNotReconnect;
327
328    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
329            sharedBuffer->size());
330
331    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
332
333    // invariant that mAudioTrack != 0 is true only after set() returns successfully
334    if (mAudioTrack != 0) {
335        ALOGE("Track already in use");
336        return INVALID_OPERATION;
337    }
338
339    // handle default values first.
340    if (streamType == AUDIO_STREAM_DEFAULT) {
341        streamType = AUDIO_STREAM_MUSIC;
342    }
343    if (pAttributes == NULL) {
344        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
345            ALOGE("Invalid stream type %d", streamType);
346            return BAD_VALUE;
347        }
348        mStreamType = streamType;
349
350    } else {
351        // stream type shouldn't be looked at, this track has audio attributes
352        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
353        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
354                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
355        mStreamType = AUDIO_STREAM_DEFAULT;
356        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
357            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
358        }
359    }
360
361    // these below should probably come from the audioFlinger too...
362    if (format == AUDIO_FORMAT_DEFAULT) {
363        format = AUDIO_FORMAT_PCM_16_BIT;
364    }
365
366    // validate parameters
367    if (!audio_is_valid_format(format)) {
368        ALOGE("Invalid format %#x", format);
369        return BAD_VALUE;
370    }
371    mFormat = format;
372
373    if (!audio_is_output_channel(channelMask)) {
374        ALOGE("Invalid channel mask %#x", channelMask);
375        return BAD_VALUE;
376    }
377    mChannelMask = channelMask;
378    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
379    mChannelCount = channelCount;
380
381    // force direct flag if format is not linear PCM
382    // or offload was requested
383    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
384            || !audio_is_linear_pcm(format)) {
385        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
386                    ? "Offload request, forcing to Direct Output"
387                    : "Not linear PCM, forcing to Direct Output");
388        flags = (audio_output_flags_t)
389                // FIXME why can't we allow direct AND fast?
390                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
391    }
392
393    // force direct flag if HW A/V sync requested
394    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
395        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
396    }
397
398    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
399        if (audio_is_linear_pcm(format)) {
400            mFrameSize = channelCount * audio_bytes_per_sample(format);
401        } else {
402            mFrameSize = sizeof(uint8_t);
403        }
404    } else {
405        ALOG_ASSERT(audio_is_linear_pcm(format));
406        mFrameSize = channelCount * audio_bytes_per_sample(format);
407        // createTrack will return an error if PCM format is not supported by server,
408        // so no need to check for specific PCM formats here
409    }
410
411    // sampling rate must be specified for direct outputs
412    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
413        return BAD_VALUE;
414    }
415    mSampleRate = sampleRate;
416    mOriginalSampleRate = sampleRate;
417    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
418
419    // Make copy of input parameter offloadInfo so that in the future:
420    //  (a) createTrack_l doesn't need it as an input parameter
421    //  (b) we can support re-creation of offloaded tracks
422    if (offloadInfo != NULL) {
423        mOffloadInfoCopy = *offloadInfo;
424        mOffloadInfo = &mOffloadInfoCopy;
425    } else {
426        mOffloadInfo = NULL;
427    }
428
429    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
430    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
431    mSendLevel = 0.0f;
432    // mFrameCount is initialized in createTrack_l
433    mReqFrameCount = frameCount;
434    mNotificationFramesReq = notificationFrames;
435    mNotificationFramesAct = 0;
436    if (sessionId == AUDIO_SESSION_ALLOCATE) {
437        mSessionId = AudioSystem::newAudioUniqueId();
438    } else {
439        mSessionId = sessionId;
440    }
441    int callingpid = IPCThreadState::self()->getCallingPid();
442    int mypid = getpid();
443    if (uid == -1 || (callingpid != mypid)) {
444        mClientUid = IPCThreadState::self()->getCallingUid();
445    } else {
446        mClientUid = uid;
447    }
448    if (pid == -1 || (callingpid != mypid)) {
449        mClientPid = callingpid;
450    } else {
451        mClientPid = pid;
452    }
453    mAuxEffectId = 0;
454    mFlags = flags;
455    mCbf = cbf;
456
457    if (cbf != NULL) {
458        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
459        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
460        // thread begins in paused state, and will not reference us until start()
461    }
462
463    // create the IAudioTrack
464    status_t status = createTrack_l();
465
466    if (status != NO_ERROR) {
467        if (mAudioTrackThread != 0) {
468            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
469            mAudioTrackThread->requestExitAndWait();
470            mAudioTrackThread.clear();
471        }
472        return status;
473    }
474
475    mStatus = NO_ERROR;
476    mState = STATE_STOPPED;
477    mUserData = user;
478    mLoopCount = 0;
479    mLoopStart = 0;
480    mLoopEnd = 0;
481    mLoopCountNotified = 0;
482    mMarkerPosition = 0;
483    mMarkerReached = false;
484    mNewPosition = 0;
485    mUpdatePeriod = 0;
486    mPosition = 0;
487    mReleased = 0;
488    mStartUs = 0;
489    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
490    mSequence = 1;
491    mObservedSequence = mSequence;
492    mInUnderrun = false;
493    mPreviousTimestampValid = false;
494    mTimestampStartupGlitchReported = false;
495    mRetrogradeMotionReported = false;
496
497    return NO_ERROR;
498}
499
500// -------------------------------------------------------------------------
501
502status_t AudioTrack::start()
503{
504    AutoMutex lock(mLock);
505
506    if (mState == STATE_ACTIVE) {
507        return INVALID_OPERATION;
508    }
509
510    mInUnderrun = true;
511
512    State previousState = mState;
513    if (previousState == STATE_PAUSED_STOPPING) {
514        mState = STATE_STOPPING;
515    } else {
516        mState = STATE_ACTIVE;
517    }
518    (void) updateAndGetPosition_l();
519    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
520        // reset current position as seen by client to 0
521        mPosition = 0;
522        mPreviousTimestampValid = false;
523        mTimestampStartupGlitchReported = false;
524        mRetrogradeMotionReported = false;
525
526        // For offloaded tracks, we don't know if the hardware counters are really zero here,
527        // since the flush is asynchronous and stop may not fully drain.
528        // We save the time when the track is started to later verify whether
529        // the counters are realistic (i.e. start from zero after this time).
530        mStartUs = getNowUs();
531
532        // force refresh of remaining frames by processAudioBuffer() as last
533        // write before stop could be partial.
534        mRefreshRemaining = true;
535    }
536    mNewPosition = mPosition + mUpdatePeriod;
537    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
538
539    sp<AudioTrackThread> t = mAudioTrackThread;
540    if (t != 0) {
541        if (previousState == STATE_STOPPING) {
542            mProxy->interrupt();
543        } else {
544            t->resume();
545        }
546    } else {
547        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
548        get_sched_policy(0, &mPreviousSchedulingGroup);
549        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
550    }
551
552    status_t status = NO_ERROR;
553    if (!(flags & CBLK_INVALID)) {
554        status = mAudioTrack->start();
555        if (status == DEAD_OBJECT) {
556            flags |= CBLK_INVALID;
557        }
558    }
559    if (flags & CBLK_INVALID) {
560        status = restoreTrack_l("start");
561    }
562
563    if (status != NO_ERROR) {
564        ALOGE("start() status %d", status);
565        mState = previousState;
566        if (t != 0) {
567            if (previousState != STATE_STOPPING) {
568                t->pause();
569            }
570        } else {
571            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
572            set_sched_policy(0, mPreviousSchedulingGroup);
573        }
574    }
575
576    return status;
577}
578
579void AudioTrack::stop()
580{
581    AutoMutex lock(mLock);
582    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
583        return;
584    }
585
586    if (isOffloaded_l()) {
587        mState = STATE_STOPPING;
588    } else {
589        mState = STATE_STOPPED;
590        mReleased = 0;
591    }
592
593    mProxy->interrupt();
594    mAudioTrack->stop();
595    // the playback head position will reset to 0, so if a marker is set, we need
596    // to activate it again
597    mMarkerReached = false;
598
599    if (mSharedBuffer != 0) {
600        // clear buffer position and loop count.
601        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
602                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
603    }
604
605    sp<AudioTrackThread> t = mAudioTrackThread;
606    if (t != 0) {
607        if (!isOffloaded_l()) {
608            t->pause();
609        }
610    } else {
611        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
612        set_sched_policy(0, mPreviousSchedulingGroup);
613    }
614}
615
616bool AudioTrack::stopped() const
617{
618    AutoMutex lock(mLock);
619    return mState != STATE_ACTIVE;
620}
621
622void AudioTrack::flush()
623{
624    if (mSharedBuffer != 0) {
625        return;
626    }
627    AutoMutex lock(mLock);
628    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
629        return;
630    }
631    flush_l();
632}
633
634void AudioTrack::flush_l()
635{
636    ALOG_ASSERT(mState != STATE_ACTIVE);
637
638    // clear playback marker and periodic update counter
639    mMarkerPosition = 0;
640    mMarkerReached = false;
641    mUpdatePeriod = 0;
642    mRefreshRemaining = true;
643
644    mState = STATE_FLUSHED;
645    mReleased = 0;
646    if (isOffloaded_l()) {
647        mProxy->interrupt();
648    }
649    mProxy->flush();
650    mAudioTrack->flush();
651}
652
653void AudioTrack::pause()
654{
655    AutoMutex lock(mLock);
656    if (mState == STATE_ACTIVE) {
657        mState = STATE_PAUSED;
658    } else if (mState == STATE_STOPPING) {
659        mState = STATE_PAUSED_STOPPING;
660    } else {
661        return;
662    }
663    mProxy->interrupt();
664    mAudioTrack->pause();
665
666    if (isOffloaded_l()) {
667        if (mOutput != AUDIO_IO_HANDLE_NONE) {
668            // An offload output can be re-used between two audio tracks having
669            // the same configuration. A timestamp query for a paused track
670            // while the other is running would return an incorrect time.
671            // To fix this, cache the playback position on a pause() and return
672            // this time when requested until the track is resumed.
673
674            // OffloadThread sends HAL pause in its threadLoop. Time saved
675            // here can be slightly off.
676
677            // TODO: check return code for getRenderPosition.
678
679            uint32_t halFrames;
680            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
681            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
682        }
683    }
684}
685
686status_t AudioTrack::setVolume(float left, float right)
687{
688    // This duplicates a test by AudioTrack JNI, but that is not the only caller
689    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
690            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
691        return BAD_VALUE;
692    }
693
694    AutoMutex lock(mLock);
695    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
696    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
697
698    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
699
700    if (isOffloaded_l()) {
701        mAudioTrack->signal();
702    }
703    return NO_ERROR;
704}
705
706status_t AudioTrack::setVolume(float volume)
707{
708    return setVolume(volume, volume);
709}
710
711status_t AudioTrack::setAuxEffectSendLevel(float level)
712{
713    // This duplicates a test by AudioTrack JNI, but that is not the only caller
714    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
715        return BAD_VALUE;
716    }
717
718    AutoMutex lock(mLock);
719    mSendLevel = level;
720    mProxy->setSendLevel(level);
721
722    return NO_ERROR;
723}
724
725void AudioTrack::getAuxEffectSendLevel(float* level) const
726{
727    if (level != NULL) {
728        *level = mSendLevel;
729    }
730}
731
732status_t AudioTrack::setSampleRate(uint32_t rate)
733{
734    AutoMutex lock(mLock);
735    if (rate == mSampleRate) {
736        return NO_ERROR;
737    }
738    if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
739        return INVALID_OPERATION;
740    }
741    if (mOutput == AUDIO_IO_HANDLE_NONE) {
742        return NO_INIT;
743    }
744    // NOTE: it is theoretically possible, but highly unlikely, that a device change
745    // could mean a previously allowed sampling rate is no longer allowed.
746    uint32_t afSamplingRate;
747    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
748        return NO_INIT;
749    }
750    // pitch is emulated by adjusting speed and sampleRate
751    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
752    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
753        return BAD_VALUE;
754    }
755    // TODO: Should we also check if the buffer size is compatible?
756
757    mSampleRate = rate;
758    mProxy->setSampleRate(effectiveSampleRate);
759
760    return NO_ERROR;
761}
762
763uint32_t AudioTrack::getSampleRate() const
764{
765    if (mIsTimed) {
766        return 0;
767    }
768
769    AutoMutex lock(mLock);
770
771    // sample rate can be updated during playback by the offloaded decoder so we need to
772    // query the HAL and update if needed.
773// FIXME use Proxy return channel to update the rate from server and avoid polling here
774    if (isOffloadedOrDirect_l()) {
775        if (mOutput != AUDIO_IO_HANDLE_NONE) {
776            uint32_t sampleRate = 0;
777            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
778            if (status == NO_ERROR) {
779                mSampleRate = sampleRate;
780            }
781        }
782    }
783    return mSampleRate;
784}
785
786uint32_t AudioTrack::getOriginalSampleRate() const
787{
788    if (mIsTimed) {
789        return 0;
790    }
791
792    return mOriginalSampleRate;
793}
794
795status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
796{
797    AutoMutex lock(mLock);
798    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
799        return NO_ERROR;
800    }
801    if (mIsTimed || isOffloadedOrDirect_l()) {
802        return INVALID_OPERATION;
803    }
804    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
805        return INVALID_OPERATION;
806    }
807    // pitch is emulated by adjusting speed and sampleRate
808    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
809    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
810    const float effectivePitch = adjustPitch(playbackRate.mPitch);
811    AudioPlaybackRate playbackRateTemp = playbackRate;
812    playbackRateTemp.mSpeed = effectiveSpeed;
813    playbackRateTemp.mPitch = effectivePitch;
814
815    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
816        return BAD_VALUE;
817    }
818    // Check if the buffer size is compatible.
819    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
820        ALOGV("setPlaybackRate(%f, %f) failed", playbackRate.mSpeed, playbackRate.mPitch);
821        return BAD_VALUE;
822    }
823
824    // Check resampler ratios are within bounds
825    if (effectiveRate > mSampleRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
826        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
827                playbackRate.mSpeed, playbackRate.mPitch);
828        return BAD_VALUE;
829    }
830
831    if (effectiveRate * AUDIO_RESAMPLER_UP_RATIO_MAX < mSampleRate) {
832        ALOGV("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
833                        playbackRate.mSpeed, playbackRate.mPitch);
834        return BAD_VALUE;
835    }
836    mPlaybackRate = playbackRate;
837    //set effective rates
838    mProxy->setPlaybackRate(playbackRateTemp);
839    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
840    return NO_ERROR;
841}
842
843const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
844{
845    AutoMutex lock(mLock);
846    return mPlaybackRate;
847}
848
849status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
850{
851    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
852        return INVALID_OPERATION;
853    }
854
855    if (loopCount == 0) {
856        ;
857    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
858            loopEnd - loopStart >= MIN_LOOP) {
859        ;
860    } else {
861        return BAD_VALUE;
862    }
863
864    AutoMutex lock(mLock);
865    // See setPosition() regarding setting parameters such as loop points or position while active
866    if (mState == STATE_ACTIVE) {
867        return INVALID_OPERATION;
868    }
869    setLoop_l(loopStart, loopEnd, loopCount);
870    return NO_ERROR;
871}
872
873void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
874{
875    // We do not update the periodic notification point.
876    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
877    mLoopCount = loopCount;
878    mLoopEnd = loopEnd;
879    mLoopStart = loopStart;
880    mLoopCountNotified = loopCount;
881    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
882
883    // Waking the AudioTrackThread is not needed as this cannot be called when active.
884}
885
886status_t AudioTrack::setMarkerPosition(uint32_t marker)
887{
888    // The only purpose of setting marker position is to get a callback
889    if (mCbf == NULL || isOffloadedOrDirect()) {
890        return INVALID_OPERATION;
891    }
892
893    AutoMutex lock(mLock);
894    mMarkerPosition = marker;
895    mMarkerReached = false;
896
897    sp<AudioTrackThread> t = mAudioTrackThread;
898    if (t != 0) {
899        t->wake();
900    }
901    return NO_ERROR;
902}
903
904status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
905{
906    if (isOffloadedOrDirect()) {
907        return INVALID_OPERATION;
908    }
909    if (marker == NULL) {
910        return BAD_VALUE;
911    }
912
913    AutoMutex lock(mLock);
914    *marker = mMarkerPosition;
915
916    return NO_ERROR;
917}
918
919status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
920{
921    // The only purpose of setting position update period is to get a callback
922    if (mCbf == NULL || isOffloadedOrDirect()) {
923        return INVALID_OPERATION;
924    }
925
926    AutoMutex lock(mLock);
927    mNewPosition = updateAndGetPosition_l() + updatePeriod;
928    mUpdatePeriod = updatePeriod;
929
930    sp<AudioTrackThread> t = mAudioTrackThread;
931    if (t != 0) {
932        t->wake();
933    }
934    return NO_ERROR;
935}
936
937status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
938{
939    if (isOffloadedOrDirect()) {
940        return INVALID_OPERATION;
941    }
942    if (updatePeriod == NULL) {
943        return BAD_VALUE;
944    }
945
946    AutoMutex lock(mLock);
947    *updatePeriod = mUpdatePeriod;
948
949    return NO_ERROR;
950}
951
952status_t AudioTrack::setPosition(uint32_t position)
953{
954    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
955        return INVALID_OPERATION;
956    }
957    if (position > mFrameCount) {
958        return BAD_VALUE;
959    }
960
961    AutoMutex lock(mLock);
962    // Currently we require that the player is inactive before setting parameters such as position
963    // or loop points.  Otherwise, there could be a race condition: the application could read the
964    // current position, compute a new position or loop parameters, and then set that position or
965    // loop parameters but it would do the "wrong" thing since the position has continued to advance
966    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
967    // to specify how it wants to handle such scenarios.
968    if (mState == STATE_ACTIVE) {
969        return INVALID_OPERATION;
970    }
971    // After setting the position, use full update period before notification.
972    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
973    mStaticProxy->setBufferPosition(position);
974
975    // Waking the AudioTrackThread is not needed as this cannot be called when active.
976    return NO_ERROR;
977}
978
979status_t AudioTrack::getPosition(uint32_t *position)
980{
981    if (position == NULL) {
982        return BAD_VALUE;
983    }
984
985    AutoMutex lock(mLock);
986    if (isOffloadedOrDirect_l()) {
987        uint32_t dspFrames = 0;
988
989        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
990            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
991            *position = mPausedPosition;
992            return NO_ERROR;
993        }
994
995        if (mOutput != AUDIO_IO_HANDLE_NONE) {
996            uint32_t halFrames; // actually unused
997            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
998            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
999        }
1000        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1001        // due to hardware latency. We leave this behavior for now.
1002        *position = dspFrames;
1003    } else {
1004        if (mCblk->mFlags & CBLK_INVALID) {
1005            (void) restoreTrack_l("getPosition");
1006            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1007            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1008        }
1009
1010        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1011        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1012                0 : updateAndGetPosition_l();
1013    }
1014    return NO_ERROR;
1015}
1016
1017status_t AudioTrack::getBufferPosition(uint32_t *position)
1018{
1019    if (mSharedBuffer == 0 || mIsTimed) {
1020        return INVALID_OPERATION;
1021    }
1022    if (position == NULL) {
1023        return BAD_VALUE;
1024    }
1025
1026    AutoMutex lock(mLock);
1027    *position = mStaticProxy->getBufferPosition();
1028    return NO_ERROR;
1029}
1030
1031status_t AudioTrack::reload()
1032{
1033    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
1034        return INVALID_OPERATION;
1035    }
1036
1037    AutoMutex lock(mLock);
1038    // See setPosition() regarding setting parameters such as loop points or position while active
1039    if (mState == STATE_ACTIVE) {
1040        return INVALID_OPERATION;
1041    }
1042    mNewPosition = mUpdatePeriod;
1043    (void) updateAndGetPosition_l();
1044    mPosition = 0;
1045    mPreviousTimestampValid = false;
1046#if 0
1047    // The documentation is not clear on the behavior of reload() and the restoration
1048    // of loop count. Historically we have not restored loop count, start, end,
1049    // but it makes sense if one desires to repeat playing a particular sound.
1050    if (mLoopCount != 0) {
1051        mLoopCountNotified = mLoopCount;
1052        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1053    }
1054#endif
1055    mStaticProxy->setBufferPosition(0);
1056    return NO_ERROR;
1057}
1058
1059audio_io_handle_t AudioTrack::getOutput() const
1060{
1061    AutoMutex lock(mLock);
1062    return mOutput;
1063}
1064
1065status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1066    AutoMutex lock(mLock);
1067    if (mSelectedDeviceId != deviceId) {
1068        mSelectedDeviceId = deviceId;
1069        android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1070    }
1071    return NO_ERROR;
1072}
1073
1074audio_port_handle_t AudioTrack::getOutputDevice() {
1075    AutoMutex lock(mLock);
1076    return mSelectedDeviceId;
1077}
1078
1079audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1080    AutoMutex lock(mLock);
1081    if (mOutput == AUDIO_IO_HANDLE_NONE) {
1082        return AUDIO_PORT_HANDLE_NONE;
1083    }
1084    return AudioSystem::getDeviceIdForIo(mOutput);
1085}
1086
1087status_t AudioTrack::attachAuxEffect(int effectId)
1088{
1089    AutoMutex lock(mLock);
1090    status_t status = mAudioTrack->attachAuxEffect(effectId);
1091    if (status == NO_ERROR) {
1092        mAuxEffectId = effectId;
1093    }
1094    return status;
1095}
1096
1097audio_stream_type_t AudioTrack::streamType() const
1098{
1099    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1100        return audio_attributes_to_stream_type(&mAttributes);
1101    }
1102    return mStreamType;
1103}
1104
1105// -------------------------------------------------------------------------
1106
1107// must be called with mLock held
1108status_t AudioTrack::createTrack_l()
1109{
1110    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1111    if (audioFlinger == 0) {
1112        ALOGE("Could not get audioflinger");
1113        return NO_INIT;
1114    }
1115
1116    if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
1117        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
1118    }
1119    audio_io_handle_t output;
1120    audio_stream_type_t streamType = mStreamType;
1121    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
1122
1123    status_t status;
1124    status = AudioSystem::getOutputForAttr(attr, &output,
1125                                           (audio_session_t)mSessionId, &streamType, mClientUid,
1126                                           mSampleRate, mFormat, mChannelMask,
1127                                           mFlags, mSelectedDeviceId, mOffloadInfo);
1128
1129    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
1130        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
1131              " channel mask %#x, flags %#x",
1132              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
1133        return BAD_VALUE;
1134    }
1135    {
1136    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
1137    // we must release it ourselves if anything goes wrong.
1138
1139    // Not all of these values are needed under all conditions, but it is easier to get them all
1140    status = AudioSystem::getLatency(output, &mAfLatency);
1141    if (status != NO_ERROR) {
1142        ALOGE("getLatency(%d) failed status %d", output, status);
1143        goto release;
1144    }
1145    ALOGV("createTrack_l() output %d afLatency %u", output, mAfLatency);
1146
1147    status = AudioSystem::getFrameCount(output, &mAfFrameCount);
1148    if (status != NO_ERROR) {
1149        ALOGE("getFrameCount(output=%d) status %d", output, status);
1150        goto release;
1151    }
1152
1153    status = AudioSystem::getSamplingRate(output, &mAfSampleRate);
1154    if (status != NO_ERROR) {
1155        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1156        goto release;
1157    }
1158    if (mSampleRate == 0) {
1159        mSampleRate = mAfSampleRate;
1160        mOriginalSampleRate = mAfSampleRate;
1161    }
1162    // Client decides whether the track is TIMED (see below), but can only express a preference
1163    // for FAST.  Server will perform additional tests.
1164    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
1165            // either of these use cases:
1166            // use case 1: shared buffer
1167            (mSharedBuffer != 0) ||
1168            // use case 2: callback transfer mode
1169            (mTransfer == TRANSFER_CALLBACK) ||
1170            // use case 3: obtain/release mode
1171            (mTransfer == TRANSFER_OBTAIN)) &&
1172            // matching sample rate
1173            (mSampleRate == mAfSampleRate))) {
1174        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
1175                mTransfer, mSampleRate, mAfSampleRate);
1176        // once denied, do not request again if IAudioTrack is re-created
1177        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1178    }
1179
1180    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1181    //  n = 1   fast track with single buffering; nBuffering is ignored
1182    //  n = 2   fast track with double buffering
1183    //  n = 2   normal track, (including those with sample rate conversion)
1184    //  n >= 3  very high latency or very small notification interval (unused).
1185    const uint32_t nBuffering = 2;
1186
1187    mNotificationFramesAct = mNotificationFramesReq;
1188
1189    size_t frameCount = mReqFrameCount;
1190    if (!audio_is_linear_pcm(mFormat)) {
1191
1192        if (mSharedBuffer != 0) {
1193            // Same comment as below about ignoring frameCount parameter for set()
1194            frameCount = mSharedBuffer->size();
1195        } else if (frameCount == 0) {
1196            frameCount = mAfFrameCount;
1197        }
1198        if (mNotificationFramesAct != frameCount) {
1199            mNotificationFramesAct = frameCount;
1200        }
1201    } else if (mSharedBuffer != 0) {
1202        // FIXME: Ensure client side memory buffers need
1203        // not have additional alignment beyond sample
1204        // (e.g. 16 bit stereo accessed as 32 bit frame).
1205        size_t alignment = audio_bytes_per_sample(mFormat);
1206        if (alignment & 1) {
1207            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1208            alignment = 1;
1209        }
1210        if (mChannelCount > 1) {
1211            // More than 2 channels does not require stronger alignment than stereo
1212            alignment <<= 1;
1213        }
1214        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1215            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1216                    mSharedBuffer->pointer(), mChannelCount);
1217            status = BAD_VALUE;
1218            goto release;
1219        }
1220
1221        // When initializing a shared buffer AudioTrack via constructors,
1222        // there's no frameCount parameter.
1223        // But when initializing a shared buffer AudioTrack via set(),
1224        // there _is_ a frameCount parameter.  We silently ignore it.
1225        frameCount = mSharedBuffer->size() / mFrameSize;
1226    } else {
1227        // For fast tracks the frame count calculations and checks are done by server
1228
1229        if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1230            // for normal tracks precompute the frame count based on speed.
1231            const size_t minFrameCount = calculateMinFrameCount(
1232                    mAfLatency, mAfFrameCount, mAfSampleRate, mSampleRate,
1233                    mPlaybackRate.mSpeed);
1234            if (frameCount < minFrameCount) {
1235                frameCount = minFrameCount;
1236            }
1237        }
1238    }
1239
1240    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1241    if (mIsTimed) {
1242        trackFlags |= IAudioFlinger::TRACK_TIMED;
1243    }
1244
1245    pid_t tid = -1;
1246    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1247        trackFlags |= IAudioFlinger::TRACK_FAST;
1248        if (mAudioTrackThread != 0) {
1249            tid = mAudioTrackThread->getTid();
1250        }
1251    }
1252
1253    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1254        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1255    }
1256
1257    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1258        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1259    }
1260
1261    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1262                                // but we will still need the original value also
1263    int originalSessionId = mSessionId;
1264    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1265                                                      mSampleRate,
1266                                                      mFormat,
1267                                                      mChannelMask,
1268                                                      &temp,
1269                                                      &trackFlags,
1270                                                      mSharedBuffer,
1271                                                      output,
1272                                                      tid,
1273                                                      &mSessionId,
1274                                                      mClientUid,
1275                                                      &status);
1276    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1277            "session ID changed from %d to %d", originalSessionId, mSessionId);
1278
1279    if (status != NO_ERROR) {
1280        ALOGE("AudioFlinger could not create track, status: %d", status);
1281        goto release;
1282    }
1283    ALOG_ASSERT(track != 0);
1284
1285    // AudioFlinger now owns the reference to the I/O handle,
1286    // so we are no longer responsible for releasing it.
1287
1288    sp<IMemory> iMem = track->getCblk();
1289    if (iMem == 0) {
1290        ALOGE("Could not get control block");
1291        return NO_INIT;
1292    }
1293    void *iMemPointer = iMem->pointer();
1294    if (iMemPointer == NULL) {
1295        ALOGE("Could not get control block pointer");
1296        return NO_INIT;
1297    }
1298    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1299    if (mAudioTrack != 0) {
1300        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1301        mDeathNotifier.clear();
1302    }
1303    mAudioTrack = track;
1304    mCblkMemory = iMem;
1305    IPCThreadState::self()->flushCommands();
1306
1307    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1308    mCblk = cblk;
1309    // note that temp is the (possibly revised) value of frameCount
1310    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1311        // In current design, AudioTrack client checks and ensures frame count validity before
1312        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1313        // for fast track as it uses a special method of assigning frame count.
1314        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1315    }
1316    frameCount = temp;
1317
1318    mAwaitBoost = false;
1319    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1320        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1321            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1322            mAwaitBoost = true;
1323        } else {
1324            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1325            // once denied, do not request again if IAudioTrack is re-created
1326            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1327        }
1328    }
1329    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1330        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1331            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1332        } else {
1333            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1334            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1335            // FIXME This is a warning, not an error, so don't return error status
1336            //return NO_INIT;
1337        }
1338    }
1339    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1340        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1341            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1342        } else {
1343            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1344            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1345            // FIXME This is a warning, not an error, so don't return error status
1346            //return NO_INIT;
1347        }
1348    }
1349    // Make sure that application is notified with sufficient margin before underrun
1350    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1351        // Theoretically double-buffering is not required for fast tracks,
1352        // due to tighter scheduling.  But in practice, to accommodate kernels with
1353        // scheduling jitter, and apps with computation jitter, we use double-buffering
1354        // for fast tracks just like normal streaming tracks.
1355        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
1356            mNotificationFramesAct = frameCount / nBuffering;
1357        }
1358    }
1359
1360    // We retain a copy of the I/O handle, but don't own the reference
1361    mOutput = output;
1362    mRefreshRemaining = true;
1363
1364    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1365    // is the value of pointer() for the shared buffer, otherwise buffers points
1366    // immediately after the control block.  This address is for the mapping within client
1367    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1368    void* buffers;
1369    if (mSharedBuffer == 0) {
1370        buffers = cblk + 1;
1371    } else {
1372        buffers = mSharedBuffer->pointer();
1373        if (buffers == NULL) {
1374            ALOGE("Could not get buffer pointer");
1375            return NO_INIT;
1376        }
1377    }
1378
1379    mAudioTrack->attachAuxEffect(mAuxEffectId);
1380    // FIXME doesn't take into account speed or future sample rate changes (until restoreTrack)
1381    // FIXME don't believe this lie
1382    mLatency = mAfLatency + (1000*frameCount) / mSampleRate;
1383
1384    mFrameCount = frameCount;
1385    // If IAudioTrack is re-created, don't let the requested frameCount
1386    // decrease.  This can confuse clients that cache frameCount().
1387    if (frameCount > mReqFrameCount) {
1388        mReqFrameCount = frameCount;
1389    }
1390
1391    // reset server position to 0 as we have new cblk.
1392    mServer = 0;
1393
1394    // update proxy
1395    if (mSharedBuffer == 0) {
1396        mStaticProxy.clear();
1397        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1398    } else {
1399        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1400        mProxy = mStaticProxy;
1401    }
1402
1403    mProxy->setVolumeLR(gain_minifloat_pack(
1404            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1405            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1406
1407    mProxy->setSendLevel(mSendLevel);
1408    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1409    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1410    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1411    mProxy->setSampleRate(effectiveSampleRate);
1412
1413    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1414    playbackRateTemp.mSpeed = effectiveSpeed;
1415    playbackRateTemp.mPitch = effectivePitch;
1416    mProxy->setPlaybackRate(playbackRateTemp);
1417    mProxy->setMinimum(mNotificationFramesAct);
1418
1419    mDeathNotifier = new DeathNotifier(this);
1420    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1421
1422    if (mDeviceCallback != 0) {
1423        AudioSystem::addAudioDeviceCallback(mDeviceCallback, mOutput);
1424    }
1425
1426    return NO_ERROR;
1427    }
1428
1429release:
1430    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1431    if (status == NO_ERROR) {
1432        status = NO_INIT;
1433    }
1434    return status;
1435}
1436
1437status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1438{
1439    if (audioBuffer == NULL) {
1440        if (nonContig != NULL) {
1441            *nonContig = 0;
1442        }
1443        return BAD_VALUE;
1444    }
1445    if (mTransfer != TRANSFER_OBTAIN) {
1446        audioBuffer->frameCount = 0;
1447        audioBuffer->size = 0;
1448        audioBuffer->raw = NULL;
1449        if (nonContig != NULL) {
1450            *nonContig = 0;
1451        }
1452        return INVALID_OPERATION;
1453    }
1454
1455    const struct timespec *requested;
1456    struct timespec timeout;
1457    if (waitCount == -1) {
1458        requested = &ClientProxy::kForever;
1459    } else if (waitCount == 0) {
1460        requested = &ClientProxy::kNonBlocking;
1461    } else if (waitCount > 0) {
1462        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1463        timeout.tv_sec = ms / 1000;
1464        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1465        requested = &timeout;
1466    } else {
1467        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1468        requested = NULL;
1469    }
1470    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1471}
1472
1473status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1474        struct timespec *elapsed, size_t *nonContig)
1475{
1476    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1477    uint32_t oldSequence = 0;
1478    uint32_t newSequence;
1479
1480    Proxy::Buffer buffer;
1481    status_t status = NO_ERROR;
1482
1483    static const int32_t kMaxTries = 5;
1484    int32_t tryCounter = kMaxTries;
1485
1486    do {
1487        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1488        // keep them from going away if another thread re-creates the track during obtainBuffer()
1489        sp<AudioTrackClientProxy> proxy;
1490        sp<IMemory> iMem;
1491
1492        {   // start of lock scope
1493            AutoMutex lock(mLock);
1494
1495            newSequence = mSequence;
1496            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1497            if (status == DEAD_OBJECT) {
1498                // re-create track, unless someone else has already done so
1499                if (newSequence == oldSequence) {
1500                    status = restoreTrack_l("obtainBuffer");
1501                    if (status != NO_ERROR) {
1502                        buffer.mFrameCount = 0;
1503                        buffer.mRaw = NULL;
1504                        buffer.mNonContig = 0;
1505                        break;
1506                    }
1507                }
1508            }
1509            oldSequence = newSequence;
1510
1511            // Keep the extra references
1512            proxy = mProxy;
1513            iMem = mCblkMemory;
1514
1515            if (mState == STATE_STOPPING) {
1516                status = -EINTR;
1517                buffer.mFrameCount = 0;
1518                buffer.mRaw = NULL;
1519                buffer.mNonContig = 0;
1520                break;
1521            }
1522
1523            // Non-blocking if track is stopped or paused
1524            if (mState != STATE_ACTIVE) {
1525                requested = &ClientProxy::kNonBlocking;
1526            }
1527
1528        }   // end of lock scope
1529
1530        buffer.mFrameCount = audioBuffer->frameCount;
1531        // FIXME starts the requested timeout and elapsed over from scratch
1532        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1533
1534    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1535
1536    audioBuffer->frameCount = buffer.mFrameCount;
1537    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1538    audioBuffer->raw = buffer.mRaw;
1539    if (nonContig != NULL) {
1540        *nonContig = buffer.mNonContig;
1541    }
1542    return status;
1543}
1544
1545void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1546{
1547    // FIXME add error checking on mode, by adding an internal version
1548    if (mTransfer == TRANSFER_SHARED) {
1549        return;
1550    }
1551
1552    size_t stepCount = audioBuffer->size / mFrameSize;
1553    if (stepCount == 0) {
1554        return;
1555    }
1556
1557    Proxy::Buffer buffer;
1558    buffer.mFrameCount = stepCount;
1559    buffer.mRaw = audioBuffer->raw;
1560
1561    AutoMutex lock(mLock);
1562    mReleased += stepCount;
1563    mInUnderrun = false;
1564    mProxy->releaseBuffer(&buffer);
1565
1566    // restart track if it was disabled by audioflinger due to previous underrun
1567    if (mState == STATE_ACTIVE) {
1568        audio_track_cblk_t* cblk = mCblk;
1569        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1570            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1571            // FIXME ignoring status
1572            mAudioTrack->start();
1573        }
1574    }
1575}
1576
1577// -------------------------------------------------------------------------
1578
1579ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1580{
1581    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1582        return INVALID_OPERATION;
1583    }
1584
1585    if (isDirect()) {
1586        AutoMutex lock(mLock);
1587        int32_t flags = android_atomic_and(
1588                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1589                            &mCblk->mFlags);
1590        if (flags & CBLK_INVALID) {
1591            return DEAD_OBJECT;
1592        }
1593    }
1594
1595    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1596        // Sanity-check: user is most-likely passing an error code, and it would
1597        // make the return value ambiguous (actualSize vs error).
1598        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1599        return BAD_VALUE;
1600    }
1601
1602    size_t written = 0;
1603    Buffer audioBuffer;
1604
1605    while (userSize >= mFrameSize) {
1606        audioBuffer.frameCount = userSize / mFrameSize;
1607
1608        status_t err = obtainBuffer(&audioBuffer,
1609                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1610        if (err < 0) {
1611            if (written > 0) {
1612                break;
1613            }
1614            return ssize_t(err);
1615        }
1616
1617        size_t toWrite = audioBuffer.size;
1618        memcpy(audioBuffer.i8, buffer, toWrite);
1619        buffer = ((const char *) buffer) + toWrite;
1620        userSize -= toWrite;
1621        written += toWrite;
1622
1623        releaseBuffer(&audioBuffer);
1624    }
1625
1626    return written;
1627}
1628
1629// -------------------------------------------------------------------------
1630
1631TimedAudioTrack::TimedAudioTrack() {
1632    mIsTimed = true;
1633}
1634
1635status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1636{
1637    AutoMutex lock(mLock);
1638    status_t result = UNKNOWN_ERROR;
1639
1640#if 1
1641    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1642    // while we are accessing the cblk
1643    sp<IAudioTrack> audioTrack = mAudioTrack;
1644    sp<IMemory> iMem = mCblkMemory;
1645#endif
1646
1647    // If the track is not invalid already, try to allocate a buffer.  alloc
1648    // fails indicating that the server is dead, flag the track as invalid so
1649    // we can attempt to restore in just a bit.
1650    audio_track_cblk_t* cblk = mCblk;
1651    if (!(cblk->mFlags & CBLK_INVALID)) {
1652        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1653        if (result == DEAD_OBJECT) {
1654            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1655        }
1656    }
1657
1658    // If the track is invalid at this point, attempt to restore it. and try the
1659    // allocation one more time.
1660    if (cblk->mFlags & CBLK_INVALID) {
1661        result = restoreTrack_l("allocateTimedBuffer");
1662
1663        if (result == NO_ERROR) {
1664            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1665        }
1666    }
1667
1668    return result;
1669}
1670
1671status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1672                                           int64_t pts)
1673{
1674    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1675    {
1676        AutoMutex lock(mLock);
1677        audio_track_cblk_t* cblk = mCblk;
1678        // restart track if it was disabled by audioflinger due to previous underrun
1679        if (buffer->size() != 0 && status == NO_ERROR &&
1680                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1681            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1682            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1683            // FIXME ignoring status
1684            mAudioTrack->start();
1685        }
1686    }
1687    return status;
1688}
1689
1690status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1691                                                TargetTimeline target)
1692{
1693    return mAudioTrack->setMediaTimeTransform(xform, target);
1694}
1695
1696// -------------------------------------------------------------------------
1697
1698nsecs_t AudioTrack::processAudioBuffer()
1699{
1700    // Currently the AudioTrack thread is not created if there are no callbacks.
1701    // Would it ever make sense to run the thread, even without callbacks?
1702    // If so, then replace this by checks at each use for mCbf != NULL.
1703    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1704
1705    mLock.lock();
1706    if (mAwaitBoost) {
1707        mAwaitBoost = false;
1708        mLock.unlock();
1709        static const int32_t kMaxTries = 5;
1710        int32_t tryCounter = kMaxTries;
1711        uint32_t pollUs = 10000;
1712        do {
1713            int policy = sched_getscheduler(0);
1714            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1715                break;
1716            }
1717            usleep(pollUs);
1718            pollUs <<= 1;
1719        } while (tryCounter-- > 0);
1720        if (tryCounter < 0) {
1721            ALOGE("did not receive expected priority boost on time");
1722        }
1723        // Run again immediately
1724        return 0;
1725    }
1726
1727    // Can only reference mCblk while locked
1728    int32_t flags = android_atomic_and(
1729        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1730
1731    // Check for track invalidation
1732    if (flags & CBLK_INVALID) {
1733        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1734        // AudioSystem cache. We should not exit here but after calling the callback so
1735        // that the upper layers can recreate the track
1736        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1737            status_t status __unused = restoreTrack_l("processAudioBuffer");
1738            // FIXME unused status
1739            // after restoration, continue below to make sure that the loop and buffer events
1740            // are notified because they have been cleared from mCblk->mFlags above.
1741        }
1742    }
1743
1744    bool waitStreamEnd = mState == STATE_STOPPING;
1745    bool active = mState == STATE_ACTIVE;
1746
1747    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1748    bool newUnderrun = false;
1749    if (flags & CBLK_UNDERRUN) {
1750#if 0
1751        // Currently in shared buffer mode, when the server reaches the end of buffer,
1752        // the track stays active in continuous underrun state.  It's up to the application
1753        // to pause or stop the track, or set the position to a new offset within buffer.
1754        // This was some experimental code to auto-pause on underrun.   Keeping it here
1755        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1756        if (mTransfer == TRANSFER_SHARED) {
1757            mState = STATE_PAUSED;
1758            active = false;
1759        }
1760#endif
1761        if (!mInUnderrun) {
1762            mInUnderrun = true;
1763            newUnderrun = true;
1764        }
1765    }
1766
1767    // Get current position of server
1768    size_t position = updateAndGetPosition_l();
1769
1770    // Manage marker callback
1771    bool markerReached = false;
1772    size_t markerPosition = mMarkerPosition;
1773    // FIXME fails for wraparound, need 64 bits
1774    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1775        mMarkerReached = markerReached = true;
1776    }
1777
1778    // Determine number of new position callback(s) that will be needed, while locked
1779    size_t newPosCount = 0;
1780    size_t newPosition = mNewPosition;
1781    size_t updatePeriod = mUpdatePeriod;
1782    // FIXME fails for wraparound, need 64 bits
1783    if (updatePeriod > 0 && position >= newPosition) {
1784        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1785        mNewPosition += updatePeriod * newPosCount;
1786    }
1787
1788    // Cache other fields that will be needed soon
1789    uint32_t sampleRate = mSampleRate;
1790    float speed = mPlaybackRate.mSpeed;
1791    const uint32_t notificationFrames = mNotificationFramesAct;
1792    if (mRefreshRemaining) {
1793        mRefreshRemaining = false;
1794        mRemainingFrames = notificationFrames;
1795        mRetryOnPartialBuffer = false;
1796    }
1797    size_t misalignment = mProxy->getMisalignment();
1798    uint32_t sequence = mSequence;
1799    sp<AudioTrackClientProxy> proxy = mProxy;
1800
1801    // Determine the number of new loop callback(s) that will be needed, while locked.
1802    int loopCountNotifications = 0;
1803    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1804
1805    if (mLoopCount > 0) {
1806        int loopCount;
1807        size_t bufferPosition;
1808        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1809        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1810        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1811        mLoopCountNotified = loopCount; // discard any excess notifications
1812    } else if (mLoopCount < 0) {
1813        // FIXME: We're not accurate with notification count and position with infinite looping
1814        // since loopCount from server side will always return -1 (we could decrement it).
1815        size_t bufferPosition = mStaticProxy->getBufferPosition();
1816        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1817        loopPeriod = mLoopEnd - bufferPosition;
1818    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1819        size_t bufferPosition = mStaticProxy->getBufferPosition();
1820        loopPeriod = mFrameCount - bufferPosition;
1821    }
1822
1823    // These fields don't need to be cached, because they are assigned only by set():
1824    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1825    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1826
1827    mLock.unlock();
1828
1829    // get anchor time to account for callbacks.
1830    const nsecs_t timeBeforeCallbacks = systemTime();
1831
1832    if (waitStreamEnd) {
1833        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1834        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1835        // (and make sure we don't callback for more data while we're stopping).
1836        // This helps with position, marker notifications, and track invalidation.
1837        struct timespec timeout;
1838        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1839        timeout.tv_nsec = 0;
1840
1841        status_t status = proxy->waitStreamEndDone(&timeout);
1842        switch (status) {
1843        case NO_ERROR:
1844        case DEAD_OBJECT:
1845        case TIMED_OUT:
1846            mCbf(EVENT_STREAM_END, mUserData, NULL);
1847            {
1848                AutoMutex lock(mLock);
1849                // The previously assigned value of waitStreamEnd is no longer valid,
1850                // since the mutex has been unlocked and either the callback handler
1851                // or another thread could have re-started the AudioTrack during that time.
1852                waitStreamEnd = mState == STATE_STOPPING;
1853                if (waitStreamEnd) {
1854                    mState = STATE_STOPPED;
1855                    mReleased = 0;
1856                }
1857            }
1858            if (waitStreamEnd && status != DEAD_OBJECT) {
1859               return NS_INACTIVE;
1860            }
1861            break;
1862        }
1863        return 0;
1864    }
1865
1866    // perform callbacks while unlocked
1867    if (newUnderrun) {
1868        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1869    }
1870    while (loopCountNotifications > 0) {
1871        mCbf(EVENT_LOOP_END, mUserData, NULL);
1872        --loopCountNotifications;
1873    }
1874    if (flags & CBLK_BUFFER_END) {
1875        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1876    }
1877    if (markerReached) {
1878        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1879    }
1880    while (newPosCount > 0) {
1881        size_t temp = newPosition;
1882        mCbf(EVENT_NEW_POS, mUserData, &temp);
1883        newPosition += updatePeriod;
1884        newPosCount--;
1885    }
1886
1887    if (mObservedSequence != sequence) {
1888        mObservedSequence = sequence;
1889        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1890        // for offloaded tracks, just wait for the upper layers to recreate the track
1891        if (isOffloadedOrDirect()) {
1892            return NS_INACTIVE;
1893        }
1894    }
1895
1896    // if inactive, then don't run me again until re-started
1897    if (!active) {
1898        return NS_INACTIVE;
1899    }
1900
1901    // Compute the estimated time until the next timed event (position, markers, loops)
1902    // FIXME only for non-compressed audio
1903    uint32_t minFrames = ~0;
1904    if (!markerReached && position < markerPosition) {
1905        minFrames = markerPosition - position;
1906    }
1907    if (loopPeriod > 0 && loopPeriod < minFrames) {
1908        // loopPeriod is already adjusted for actual position.
1909        minFrames = loopPeriod;
1910    }
1911    if (updatePeriod > 0) {
1912        minFrames = min(minFrames, uint32_t(newPosition - position));
1913    }
1914
1915    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1916    static const uint32_t kPoll = 0;
1917    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1918        minFrames = kPoll * notificationFrames;
1919    }
1920
1921    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1922    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
1923    const nsecs_t timeAfterCallbacks = systemTime();
1924
1925    // Convert frame units to time units
1926    nsecs_t ns = NS_WHENEVER;
1927    if (minFrames != (uint32_t) ~0) {
1928        ns = framesToNanoseconds(minFrames, sampleRate, speed) + kWaitPeriodNs;
1929        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
1930        // TODO: Should we warn if the callback time is too long?
1931        if (ns < 0) ns = 0;
1932    }
1933
1934    // If not supplying data by EVENT_MORE_DATA, then we're done
1935    if (mTransfer != TRANSFER_CALLBACK) {
1936        return ns;
1937    }
1938
1939    // EVENT_MORE_DATA callback handling.
1940    // Timing for linear pcm audio data formats can be derived directly from the
1941    // buffer fill level.
1942    // Timing for compressed data is not directly available from the buffer fill level,
1943    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
1944    // to return a certain fill level.
1945
1946    struct timespec timeout;
1947    const struct timespec *requested = &ClientProxy::kForever;
1948    if (ns != NS_WHENEVER) {
1949        timeout.tv_sec = ns / 1000000000LL;
1950        timeout.tv_nsec = ns % 1000000000LL;
1951        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1952        requested = &timeout;
1953    }
1954
1955    while (mRemainingFrames > 0) {
1956
1957        Buffer audioBuffer;
1958        audioBuffer.frameCount = mRemainingFrames;
1959        size_t nonContig;
1960        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1961        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1962                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1963        requested = &ClientProxy::kNonBlocking;
1964        size_t avail = audioBuffer.frameCount + nonContig;
1965        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1966                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1967        if (err != NO_ERROR) {
1968            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1969                    (isOffloaded() && (err == DEAD_OBJECT))) {
1970                return 0;
1971            }
1972            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1973            return NS_NEVER;
1974        }
1975
1976        if (mRetryOnPartialBuffer && audio_is_linear_pcm(mFormat)) {
1977            mRetryOnPartialBuffer = false;
1978            if (avail < mRemainingFrames) {
1979                if (ns > 0) { // account for obtain time
1980                    const nsecs_t timeNow = systemTime();
1981                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
1982                }
1983                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
1984                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
1985                    ns = myns;
1986                }
1987                return ns;
1988            }
1989        }
1990
1991        size_t reqSize = audioBuffer.size;
1992        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1993        size_t writtenSize = audioBuffer.size;
1994
1995        // Sanity check on returned size
1996        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1997            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1998                    reqSize, ssize_t(writtenSize));
1999            return NS_NEVER;
2000        }
2001
2002        if (writtenSize == 0) {
2003            // The callback is done filling buffers
2004            // Keep this thread going to handle timed events and
2005            // still try to get more data in intervals of WAIT_PERIOD_MS
2006            // but don't just loop and block the CPU, so wait
2007
2008            // mCbf(EVENT_MORE_DATA, ...) might either
2009            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2010            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2011            // (3) Return 0 size when no data is available, does not wait for more data.
2012            //
2013            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2014            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2015            // especially for case (3).
2016            //
2017            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2018            // and this loop; whereas for case (3) we could simply check once with the full
2019            // buffer size and skip the loop entirely.
2020
2021            nsecs_t myns;
2022            if (audio_is_linear_pcm(mFormat)) {
2023                // time to wait based on buffer occupancy
2024                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2025                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2026                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2027                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2028                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2029                myns = datans + (afns / 2);
2030            } else {
2031                // FIXME: This could ping quite a bit if the buffer isn't full.
2032                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2033                myns = kWaitPeriodNs;
2034            }
2035            if (ns > 0) { // account for obtain and callback time
2036                const nsecs_t timeNow = systemTime();
2037                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2038            }
2039            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2040                ns = myns;
2041            }
2042            return ns;
2043        }
2044
2045        size_t releasedFrames = writtenSize / mFrameSize;
2046        audioBuffer.frameCount = releasedFrames;
2047        mRemainingFrames -= releasedFrames;
2048        if (misalignment >= releasedFrames) {
2049            misalignment -= releasedFrames;
2050        } else {
2051            misalignment = 0;
2052        }
2053
2054        releaseBuffer(&audioBuffer);
2055
2056        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2057        // if callback doesn't like to accept the full chunk
2058        if (writtenSize < reqSize) {
2059            continue;
2060        }
2061
2062        // There could be enough non-contiguous frames available to satisfy the remaining request
2063        if (mRemainingFrames <= nonContig) {
2064            continue;
2065        }
2066
2067#if 0
2068        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2069        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2070        // that total to a sum == notificationFrames.
2071        if (0 < misalignment && misalignment <= mRemainingFrames) {
2072            mRemainingFrames = misalignment;
2073            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2074        }
2075#endif
2076
2077    }
2078    mRemainingFrames = notificationFrames;
2079    mRetryOnPartialBuffer = true;
2080
2081    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2082    return 0;
2083}
2084
2085status_t AudioTrack::restoreTrack_l(const char *from)
2086{
2087    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2088          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2089    ++mSequence;
2090
2091    // refresh the audio configuration cache in this process to make sure we get new
2092    // output parameters and new IAudioFlinger in createTrack_l()
2093    AudioSystem::clearAudioConfigCache();
2094
2095    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2096        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2097        // reconsider enabling for linear PCM encodings when position can be preserved.
2098        return DEAD_OBJECT;
2099    }
2100
2101    // save the old static buffer position
2102    size_t bufferPosition = 0;
2103    int loopCount = 0;
2104    if (mStaticProxy != 0) {
2105        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2106    }
2107
2108    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2109    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2110    // It will also delete the strong references on previous IAudioTrack and IMemory.
2111    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2112    status_t result = createTrack_l();
2113
2114    if (result == NO_ERROR) {
2115        // take the frames that will be lost by track recreation into account in saved position
2116        // For streaming tracks, this is the amount we obtained from the user/client
2117        // (not the number actually consumed at the server - those are already lost).
2118        if (mStaticProxy == 0) {
2119            mPosition = mReleased;
2120        }
2121        // Continue playback from last known position and restore loop.
2122        if (mStaticProxy != 0) {
2123            if (loopCount != 0) {
2124                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2125                        mLoopStart, mLoopEnd, loopCount);
2126            } else {
2127                mStaticProxy->setBufferPosition(bufferPosition);
2128                if (bufferPosition == mFrameCount) {
2129                    ALOGD("restoring track at end of static buffer");
2130                }
2131            }
2132        }
2133        if (mState == STATE_ACTIVE) {
2134            result = mAudioTrack->start();
2135        }
2136    }
2137    if (result != NO_ERROR) {
2138        ALOGW("restoreTrack_l() failed status %d", result);
2139        mState = STATE_STOPPED;
2140        mReleased = 0;
2141    }
2142
2143    return result;
2144}
2145
2146uint32_t AudioTrack::updateAndGetPosition_l()
2147{
2148    // This is the sole place to read server consumed frames
2149    uint32_t newServer = mProxy->getPosition();
2150    int32_t delta = newServer - mServer;
2151    mServer = newServer;
2152    // TODO There is controversy about whether there can be "negative jitter" in server position.
2153    //      This should be investigated further, and if possible, it should be addressed.
2154    //      A more definite failure mode is infrequent polling by client.
2155    //      One could call (void)getPosition_l() in releaseBuffer(),
2156    //      so mReleased and mPosition are always lock-step as best possible.
2157    //      That should ensure delta never goes negative for infrequent polling
2158    //      unless the server has more than 2^31 frames in its buffer,
2159    //      in which case the use of uint32_t for these counters has bigger issues.
2160    if (delta < 0) {
2161        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
2162        delta = 0;
2163    }
2164    return mPosition += (uint32_t) delta;
2165}
2166
2167bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed) const
2168{
2169    // applicable for mixing tracks only (not offloaded or direct)
2170    if (mStaticProxy != 0) {
2171        return true; // static tracks do not have issues with buffer sizing.
2172    }
2173    const size_t minFrameCount =
2174            calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed);
2175    ALOGV("isSampleRateSpeedAllowed_l mFrameCount %zu  minFrameCount %zu",
2176            mFrameCount, minFrameCount);
2177    return mFrameCount >= minFrameCount;
2178}
2179
2180status_t AudioTrack::setParameters(const String8& keyValuePairs)
2181{
2182    AutoMutex lock(mLock);
2183    return mAudioTrack->setParameters(keyValuePairs);
2184}
2185
2186status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2187{
2188    AutoMutex lock(mLock);
2189
2190    bool previousTimestampValid = mPreviousTimestampValid;
2191    // Set false here to cover all the error return cases.
2192    mPreviousTimestampValid = false;
2193
2194    // FIXME not implemented for fast tracks; should use proxy and SSQ
2195    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
2196        return INVALID_OPERATION;
2197    }
2198
2199    switch (mState) {
2200    case STATE_ACTIVE:
2201    case STATE_PAUSED:
2202        break; // handle below
2203    case STATE_FLUSHED:
2204    case STATE_STOPPED:
2205        return WOULD_BLOCK;
2206    case STATE_STOPPING:
2207    case STATE_PAUSED_STOPPING:
2208        if (!isOffloaded_l()) {
2209            return INVALID_OPERATION;
2210        }
2211        break; // offloaded tracks handled below
2212    default:
2213        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2214        break;
2215    }
2216
2217    if (mCblk->mFlags & CBLK_INVALID) {
2218        const status_t status = restoreTrack_l("getTimestamp");
2219        if (status != OK) {
2220            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2221            // recommending that the track be recreated.
2222            return DEAD_OBJECT;
2223        }
2224    }
2225
2226    // The presented frame count must always lag behind the consumed frame count.
2227    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2228    status_t status = mAudioTrack->getTimestamp(timestamp);
2229    if (status != NO_ERROR) {
2230        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2231        return status;
2232    }
2233    if (isOffloadedOrDirect_l()) {
2234        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2235            // use cached paused position in case another offloaded track is running.
2236            timestamp.mPosition = mPausedPosition;
2237            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2238            return NO_ERROR;
2239        }
2240
2241        // Check whether a pending flush or stop has completed, as those commands may
2242        // be asynchronous or return near finish or exhibit glitchy behavior.
2243        //
2244        // Originally this showed up as the first timestamp being a continuation of
2245        // the previous song under gapless playback.
2246        // However, we sometimes see zero timestamps, then a glitch of
2247        // the previous song's position, and then correct timestamps afterwards.
2248        if (mStartUs != 0 && mSampleRate != 0) {
2249            static const int kTimeJitterUs = 100000; // 100 ms
2250            static const int k1SecUs = 1000000;
2251
2252            const int64_t timeNow = getNowUs();
2253
2254            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
2255                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2256                if (timestampTimeUs < mStartUs) {
2257                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2258                }
2259                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
2260                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2261                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2262
2263                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2264                    // Verify that the counter can't count faster than the sample rate
2265                    // since the start time.  If greater, then that means we may have failed
2266                    // to completely flush or stop the previous playing track.
2267                    ALOGW_IF(!mTimestampStartupGlitchReported,
2268                            "getTimestamp startup glitch detected"
2269                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2270                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2271                            timestamp.mPosition);
2272                    mTimestampStartupGlitchReported = true;
2273                    if (previousTimestampValid
2274                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2275                        timestamp = mPreviousTimestamp;
2276                        mPreviousTimestampValid = true;
2277                        return NO_ERROR;
2278                    }
2279                    return WOULD_BLOCK;
2280                }
2281                if (deltaPositionByUs != 0) {
2282                    mStartUs = 0; // don't check again, we got valid nonzero position.
2283                }
2284            } else {
2285                mStartUs = 0; // don't check again, start time expired.
2286            }
2287            mTimestampStartupGlitchReported = false;
2288        }
2289    } else {
2290        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2291        (void) updateAndGetPosition_l();
2292        // Server consumed (mServer) and presented both use the same server time base,
2293        // and server consumed is always >= presented.
2294        // The delta between these represents the number of frames in the buffer pipeline.
2295        // If this delta between these is greater than the client position, it means that
2296        // actually presented is still stuck at the starting line (figuratively speaking),
2297        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2298        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
2299            return INVALID_OPERATION;
2300        }
2301        // Convert timestamp position from server time base to client time base.
2302        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2303        // But if we change it to 64-bit then this could fail.
2304        // If (mPosition - mServer) can be negative then should use:
2305        //   (int32_t)(mPosition - mServer)
2306        timestamp.mPosition += mPosition - mServer;
2307        // Immediately after a call to getPosition_l(), mPosition and
2308        // mServer both represent the same frame position.  mPosition is
2309        // in client's point of view, and mServer is in server's point of
2310        // view.  So the difference between them is the "fudge factor"
2311        // between client and server views due to stop() and/or new
2312        // IAudioTrack.  And timestamp.mPosition is initially in server's
2313        // point of view, so we need to apply the same fudge factor to it.
2314    }
2315
2316    // Prevent retrograde motion in timestamp.
2317    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2318    if (status == NO_ERROR) {
2319        if (previousTimestampValid) {
2320#define TIME_TO_NANOS(time) ((uint64_t)time.tv_sec * 1000000000 + time.tv_nsec)
2321            const uint64_t previousTimeNanos = TIME_TO_NANOS(mPreviousTimestamp.mTime);
2322            const uint64_t currentTimeNanos = TIME_TO_NANOS(timestamp.mTime);
2323#undef TIME_TO_NANOS
2324            if (currentTimeNanos < previousTimeNanos) {
2325                ALOGW("retrograde timestamp time");
2326                // FIXME Consider blocking this from propagating upwards.
2327            }
2328
2329            // Looking at signed delta will work even when the timestamps
2330            // are wrapping around.
2331            int32_t deltaPosition = static_cast<int32_t>(timestamp.mPosition
2332                    - mPreviousTimestamp.mPosition);
2333            // position can bobble slightly as an artifact; this hides the bobble
2334            static const int32_t MINIMUM_POSITION_DELTA = 8;
2335            if (deltaPosition < 0) {
2336                // Only report once per position instead of spamming the log.
2337                if (!mRetrogradeMotionReported) {
2338                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2339                            deltaPosition,
2340                            timestamp.mPosition,
2341                            mPreviousTimestamp.mPosition);
2342                    mRetrogradeMotionReported = true;
2343                }
2344            } else {
2345                mRetrogradeMotionReported = false;
2346            }
2347            if (deltaPosition < MINIMUM_POSITION_DELTA) {
2348                timestamp = mPreviousTimestamp;  // Use last valid timestamp.
2349            }
2350        }
2351        mPreviousTimestamp = timestamp;
2352        mPreviousTimestampValid = true;
2353    }
2354
2355    return status;
2356}
2357
2358String8 AudioTrack::getParameters(const String8& keys)
2359{
2360    audio_io_handle_t output = getOutput();
2361    if (output != AUDIO_IO_HANDLE_NONE) {
2362        return AudioSystem::getParameters(output, keys);
2363    } else {
2364        return String8::empty();
2365    }
2366}
2367
2368bool AudioTrack::isOffloaded() const
2369{
2370    AutoMutex lock(mLock);
2371    return isOffloaded_l();
2372}
2373
2374bool AudioTrack::isDirect() const
2375{
2376    AutoMutex lock(mLock);
2377    return isDirect_l();
2378}
2379
2380bool AudioTrack::isOffloadedOrDirect() const
2381{
2382    AutoMutex lock(mLock);
2383    return isOffloadedOrDirect_l();
2384}
2385
2386
2387status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2388{
2389
2390    const size_t SIZE = 256;
2391    char buffer[SIZE];
2392    String8 result;
2393
2394    result.append(" AudioTrack::dump\n");
2395    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2396            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2397    result.append(buffer);
2398    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2399            mChannelCount, mFrameCount);
2400    result.append(buffer);
2401    snprintf(buffer, 255, "  sample rate(%u), speed(%f), status(%d)\n",
2402            mSampleRate, mPlaybackRate.mSpeed, mStatus);
2403    result.append(buffer);
2404    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2405    result.append(buffer);
2406    ::write(fd, result.string(), result.size());
2407    return NO_ERROR;
2408}
2409
2410uint32_t AudioTrack::getUnderrunFrames() const
2411{
2412    AutoMutex lock(mLock);
2413    return mProxy->getUnderrunFrames();
2414}
2415
2416status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2417{
2418    if (callback == 0) {
2419        ALOGW("%s adding NULL callback!", __FUNCTION__);
2420        return BAD_VALUE;
2421    }
2422    AutoMutex lock(mLock);
2423    if (mDeviceCallback == callback) {
2424        ALOGW("%s adding same callback!", __FUNCTION__);
2425        return INVALID_OPERATION;
2426    }
2427    status_t status = NO_ERROR;
2428    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2429        if (mDeviceCallback != 0) {
2430            ALOGW("%s callback already present!", __FUNCTION__);
2431            AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2432        }
2433        status = AudioSystem::addAudioDeviceCallback(callback, mOutput);
2434    }
2435    mDeviceCallback = callback;
2436    return status;
2437}
2438
2439status_t AudioTrack::removeAudioDeviceCallback(
2440        const sp<AudioSystem::AudioDeviceCallback>& callback)
2441{
2442    if (callback == 0) {
2443        ALOGW("%s removing NULL callback!", __FUNCTION__);
2444        return BAD_VALUE;
2445    }
2446    AutoMutex lock(mLock);
2447    if (mDeviceCallback != callback) {
2448        ALOGW("%s removing different callback!", __FUNCTION__);
2449        return INVALID_OPERATION;
2450    }
2451    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2452        AudioSystem::removeAudioDeviceCallback(mDeviceCallback, mOutput);
2453    }
2454    mDeviceCallback = 0;
2455    return NO_ERROR;
2456}
2457
2458// =========================================================================
2459
2460void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2461{
2462    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2463    if (audioTrack != 0) {
2464        AutoMutex lock(audioTrack->mLock);
2465        audioTrack->mProxy->binderDied();
2466    }
2467}
2468
2469// =========================================================================
2470
2471AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2472    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2473      mIgnoreNextPausedInt(false)
2474{
2475}
2476
2477AudioTrack::AudioTrackThread::~AudioTrackThread()
2478{
2479}
2480
2481bool AudioTrack::AudioTrackThread::threadLoop()
2482{
2483    {
2484        AutoMutex _l(mMyLock);
2485        if (mPaused) {
2486            mMyCond.wait(mMyLock);
2487            // caller will check for exitPending()
2488            return true;
2489        }
2490        if (mIgnoreNextPausedInt) {
2491            mIgnoreNextPausedInt = false;
2492            mPausedInt = false;
2493        }
2494        if (mPausedInt) {
2495            if (mPausedNs > 0) {
2496                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2497            } else {
2498                mMyCond.wait(mMyLock);
2499            }
2500            mPausedInt = false;
2501            return true;
2502        }
2503    }
2504    if (exitPending()) {
2505        return false;
2506    }
2507    nsecs_t ns = mReceiver.processAudioBuffer();
2508    switch (ns) {
2509    case 0:
2510        return true;
2511    case NS_INACTIVE:
2512        pauseInternal();
2513        return true;
2514    case NS_NEVER:
2515        return false;
2516    case NS_WHENEVER:
2517        // Event driven: call wake() when callback notifications conditions change.
2518        ns = INT64_MAX;
2519        // fall through
2520    default:
2521        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2522        pauseInternal(ns);
2523        return true;
2524    }
2525}
2526
2527void AudioTrack::AudioTrackThread::requestExit()
2528{
2529    // must be in this order to avoid a race condition
2530    Thread::requestExit();
2531    resume();
2532}
2533
2534void AudioTrack::AudioTrackThread::pause()
2535{
2536    AutoMutex _l(mMyLock);
2537    mPaused = true;
2538}
2539
2540void AudioTrack::AudioTrackThread::resume()
2541{
2542    AutoMutex _l(mMyLock);
2543    mIgnoreNextPausedInt = true;
2544    if (mPaused || mPausedInt) {
2545        mPaused = false;
2546        mPausedInt = false;
2547        mMyCond.signal();
2548    }
2549}
2550
2551void AudioTrack::AudioTrackThread::wake()
2552{
2553    AutoMutex _l(mMyLock);
2554    if (!mPaused) {
2555        // wake() might be called while servicing a callback - ignore the next
2556        // pause time and call processAudioBuffer.
2557        mIgnoreNextPausedInt = true;
2558        if (mPausedInt && mPausedNs > 0) {
2559            // audio track is active and internally paused with timeout.
2560            mPausedInt = false;
2561            mMyCond.signal();
2562        }
2563    }
2564}
2565
2566void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2567{
2568    AutoMutex _l(mMyLock);
2569    mPausedInt = true;
2570    mPausedNs = ns;
2571}
2572
2573} // namespace android
2574