AudioTrack.cpp revision 551b5355d34aa42890811fc3606d3b63429296cd
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36static const int kMaxLoopCountNotifications = 32;
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41template <typename T>
42const T &min(const T &x, const T &y) {
43    return x < y ? x : y;
44}
45
46static int64_t convertTimespecToUs(const struct timespec &tv)
47{
48    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
49}
50
51// current monotonic time in microseconds.
52static int64_t getNowUs()
53{
54    struct timespec tv;
55    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
56    return convertTimespecToUs(tv);
57}
58
59// static
60status_t AudioTrack::getMinFrameCount(
61        size_t* frameCount,
62        audio_stream_type_t streamType,
63        uint32_t sampleRate)
64{
65    if (frameCount == NULL) {
66        return BAD_VALUE;
67    }
68
69    // FIXME handle in server, like createTrack_l(), possible missing info:
70    //          audio_io_handle_t output
71    //          audio_format_t format
72    //          audio_channel_mask_t channelMask
73    //          audio_output_flags_t flags (FAST)
74    uint32_t afSampleRate;
75    status_t status;
76    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
77    if (status != NO_ERROR) {
78        ALOGE("Unable to query output sample rate for stream type %d; status %d",
79                streamType, status);
80        return status;
81    }
82    size_t afFrameCount;
83    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
84    if (status != NO_ERROR) {
85        ALOGE("Unable to query output frame count for stream type %d; status %d",
86                streamType, status);
87        return status;
88    }
89    uint32_t afLatency;
90    status = AudioSystem::getOutputLatency(&afLatency, streamType);
91    if (status != NO_ERROR) {
92        ALOGE("Unable to query output latency for stream type %d; status %d",
93                streamType, status);
94        return status;
95    }
96
97    // Ensure that buffer depth covers at least audio hardware latency
98    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
99    if (minBufCount < 2) {
100        minBufCount = 2;
101    }
102
103    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
104    // The formula above should always produce a non-zero value under normal circumstances:
105    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
106    // Return error in the unlikely event that it does not, as that's part of the API contract.
107    if (*frameCount == 0) {
108        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
109                streamType, sampleRate);
110        return BAD_VALUE;
111    }
112    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
113            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
114    return NO_ERROR;
115}
116
117// ---------------------------------------------------------------------------
118
119AudioTrack::AudioTrack()
120    : mStatus(NO_INIT),
121      mIsTimed(false),
122      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
123      mPreviousSchedulingGroup(SP_DEFAULT),
124      mPausedPosition(0)
125{
126    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
127    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
128    mAttributes.flags = 0x0;
129    strcpy(mAttributes.tags, "");
130}
131
132AudioTrack::AudioTrack(
133        audio_stream_type_t streamType,
134        uint32_t sampleRate,
135        audio_format_t format,
136        audio_channel_mask_t channelMask,
137        size_t frameCount,
138        audio_output_flags_t flags,
139        callback_t cbf,
140        void* user,
141        uint32_t notificationFrames,
142        int sessionId,
143        transfer_type transferType,
144        const audio_offload_info_t *offloadInfo,
145        int uid,
146        pid_t pid,
147        const audio_attributes_t* pAttributes)
148    : mStatus(NO_INIT),
149      mIsTimed(false),
150      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
151      mPreviousSchedulingGroup(SP_DEFAULT),
152      mPausedPosition(0)
153{
154    mStatus = set(streamType, sampleRate, format, channelMask,
155            frameCount, flags, cbf, user, notificationFrames,
156            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
157            offloadInfo, uid, pid, pAttributes);
158}
159
160AudioTrack::AudioTrack(
161        audio_stream_type_t streamType,
162        uint32_t sampleRate,
163        audio_format_t format,
164        audio_channel_mask_t channelMask,
165        const sp<IMemory>& sharedBuffer,
166        audio_output_flags_t flags,
167        callback_t cbf,
168        void* user,
169        uint32_t notificationFrames,
170        int sessionId,
171        transfer_type transferType,
172        const audio_offload_info_t *offloadInfo,
173        int uid,
174        pid_t pid,
175        const audio_attributes_t* pAttributes)
176    : mStatus(NO_INIT),
177      mIsTimed(false),
178      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
179      mPreviousSchedulingGroup(SP_DEFAULT),
180      mPausedPosition(0)
181{
182    mStatus = set(streamType, sampleRate, format, channelMask,
183            0 /*frameCount*/, flags, cbf, user, notificationFrames,
184            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
185            uid, pid, pAttributes);
186}
187
188AudioTrack::~AudioTrack()
189{
190    if (mStatus == NO_ERROR) {
191        // Make sure that callback function exits in the case where
192        // it is looping on buffer full condition in obtainBuffer().
193        // Otherwise the callback thread will never exit.
194        stop();
195        if (mAudioTrackThread != 0) {
196            mProxy->interrupt();
197            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
198            mAudioTrackThread->requestExitAndWait();
199            mAudioTrackThread.clear();
200        }
201        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
202        mAudioTrack.clear();
203        mCblkMemory.clear();
204        mSharedBuffer.clear();
205        IPCThreadState::self()->flushCommands();
206        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
207                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
208        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
209    }
210}
211
212status_t AudioTrack::set(
213        audio_stream_type_t streamType,
214        uint32_t sampleRate,
215        audio_format_t format,
216        audio_channel_mask_t channelMask,
217        size_t frameCount,
218        audio_output_flags_t flags,
219        callback_t cbf,
220        void* user,
221        uint32_t notificationFrames,
222        const sp<IMemory>& sharedBuffer,
223        bool threadCanCallJava,
224        int sessionId,
225        transfer_type transferType,
226        const audio_offload_info_t *offloadInfo,
227        int uid,
228        pid_t pid,
229        const audio_attributes_t* pAttributes)
230{
231    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
232          "flags #%x, notificationFrames %u, sessionId %d, transferType %d, uid %d, pid %d",
233          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
234          sessionId, transferType, uid, pid);
235
236    switch (transferType) {
237    case TRANSFER_DEFAULT:
238        if (sharedBuffer != 0) {
239            transferType = TRANSFER_SHARED;
240        } else if (cbf == NULL || threadCanCallJava) {
241            transferType = TRANSFER_SYNC;
242        } else {
243            transferType = TRANSFER_CALLBACK;
244        }
245        break;
246    case TRANSFER_CALLBACK:
247        if (cbf == NULL || sharedBuffer != 0) {
248            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
249            return BAD_VALUE;
250        }
251        break;
252    case TRANSFER_OBTAIN:
253    case TRANSFER_SYNC:
254        if (sharedBuffer != 0) {
255            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
256            return BAD_VALUE;
257        }
258        break;
259    case TRANSFER_SHARED:
260        if (sharedBuffer == 0) {
261            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
262            return BAD_VALUE;
263        }
264        break;
265    default:
266        ALOGE("Invalid transfer type %d", transferType);
267        return BAD_VALUE;
268    }
269    mSharedBuffer = sharedBuffer;
270    mTransfer = transferType;
271
272    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
273            sharedBuffer->size());
274
275    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
276
277    // invariant that mAudioTrack != 0 is true only after set() returns successfully
278    if (mAudioTrack != 0) {
279        ALOGE("Track already in use");
280        return INVALID_OPERATION;
281    }
282
283    // handle default values first.
284    if (streamType == AUDIO_STREAM_DEFAULT) {
285        streamType = AUDIO_STREAM_MUSIC;
286    }
287    if (pAttributes == NULL) {
288        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
289            ALOGE("Invalid stream type %d", streamType);
290            return BAD_VALUE;
291        }
292        mStreamType = streamType;
293
294    } else {
295        // stream type shouldn't be looked at, this track has audio attributes
296        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
297        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
298                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
299        mStreamType = AUDIO_STREAM_DEFAULT;
300        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
301            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
302        }
303    }
304
305    // these below should probably come from the audioFlinger too...
306    if (format == AUDIO_FORMAT_DEFAULT) {
307        format = AUDIO_FORMAT_PCM_16_BIT;
308    }
309
310    // validate parameters
311    if (!audio_is_valid_format(format)) {
312        ALOGE("Invalid format %#x", format);
313        return BAD_VALUE;
314    }
315    mFormat = format;
316
317    if (!audio_is_output_channel(channelMask)) {
318        ALOGE("Invalid channel mask %#x", channelMask);
319        return BAD_VALUE;
320    }
321    mChannelMask = channelMask;
322    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
323    mChannelCount = channelCount;
324
325    // force direct flag if format is not linear PCM
326    // or offload was requested
327    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
328            || !audio_is_linear_pcm(format)) {
329        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
330                    ? "Offload request, forcing to Direct Output"
331                    : "Not linear PCM, forcing to Direct Output");
332        flags = (audio_output_flags_t)
333                // FIXME why can't we allow direct AND fast?
334                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
335    }
336
337    // force direct flag if HW A/V sync requested
338    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
339        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
340    }
341
342    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
343        if (audio_is_linear_pcm(format)) {
344            mFrameSize = channelCount * audio_bytes_per_sample(format);
345        } else {
346            mFrameSize = sizeof(uint8_t);
347        }
348    } else {
349        ALOG_ASSERT(audio_is_linear_pcm(format));
350        mFrameSize = channelCount * audio_bytes_per_sample(format);
351        // createTrack will return an error if PCM format is not supported by server,
352        // so no need to check for specific PCM formats here
353    }
354
355    // sampling rate must be specified for direct outputs
356    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
357        return BAD_VALUE;
358    }
359    mSampleRate = sampleRate;
360
361    // Make copy of input parameter offloadInfo so that in the future:
362    //  (a) createTrack_l doesn't need it as an input parameter
363    //  (b) we can support re-creation of offloaded tracks
364    if (offloadInfo != NULL) {
365        mOffloadInfoCopy = *offloadInfo;
366        mOffloadInfo = &mOffloadInfoCopy;
367    } else {
368        mOffloadInfo = NULL;
369    }
370
371    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
372    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
373    mSendLevel = 0.0f;
374    // mFrameCount is initialized in createTrack_l
375    mReqFrameCount = frameCount;
376    mNotificationFramesReq = notificationFrames;
377    mNotificationFramesAct = 0;
378    if (sessionId == AUDIO_SESSION_ALLOCATE) {
379        mSessionId = AudioSystem::newAudioUniqueId();
380    } else {
381        mSessionId = sessionId;
382    }
383    int callingpid = IPCThreadState::self()->getCallingPid();
384    int mypid = getpid();
385    if (uid == -1 || (callingpid != mypid)) {
386        mClientUid = IPCThreadState::self()->getCallingUid();
387    } else {
388        mClientUid = uid;
389    }
390    if (pid == -1 || (callingpid != mypid)) {
391        mClientPid = callingpid;
392    } else {
393        mClientPid = pid;
394    }
395    mAuxEffectId = 0;
396    mFlags = flags;
397    mCbf = cbf;
398
399    if (cbf != NULL) {
400        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
401        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
402        // thread begins in paused state, and will not reference us until start()
403    }
404
405    // create the IAudioTrack
406    status_t status = createTrack_l();
407
408    if (status != NO_ERROR) {
409        if (mAudioTrackThread != 0) {
410            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
411            mAudioTrackThread->requestExitAndWait();
412            mAudioTrackThread.clear();
413        }
414        return status;
415    }
416
417    mStatus = NO_ERROR;
418    mState = STATE_STOPPED;
419    mUserData = user;
420    mLoopCount = 0;
421    mLoopStart = 0;
422    mLoopEnd = 0;
423    mLoopCountNotified = 0;
424    mMarkerPosition = 0;
425    mMarkerReached = false;
426    mNewPosition = 0;
427    mUpdatePeriod = 0;
428    mServer = 0;
429    mPosition = 0;
430    mReleased = 0;
431    mStartUs = 0;
432    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
433    mSequence = 1;
434    mObservedSequence = mSequence;
435    mInUnderrun = false;
436
437    return NO_ERROR;
438}
439
440// -------------------------------------------------------------------------
441
442status_t AudioTrack::start()
443{
444    AutoMutex lock(mLock);
445
446    if (mState == STATE_ACTIVE) {
447        return INVALID_OPERATION;
448    }
449
450    mInUnderrun = true;
451
452    State previousState = mState;
453    if (previousState == STATE_PAUSED_STOPPING) {
454        mState = STATE_STOPPING;
455    } else {
456        mState = STATE_ACTIVE;
457    }
458    (void) updateAndGetPosition_l();
459    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
460        // reset current position as seen by client to 0
461        mPosition = 0;
462        // For offloaded tracks, we don't know if the hardware counters are really zero here,
463        // since the flush is asynchronous and stop may not fully drain.
464        // We save the time when the track is started to later verify whether
465        // the counters are realistic (i.e. start from zero after this time).
466        mStartUs = getNowUs();
467
468        // force refresh of remaining frames by processAudioBuffer() as last
469        // write before stop could be partial.
470        mRefreshRemaining = true;
471    }
472    mNewPosition = mPosition + mUpdatePeriod;
473    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
474
475    sp<AudioTrackThread> t = mAudioTrackThread;
476    if (t != 0) {
477        if (previousState == STATE_STOPPING) {
478            mProxy->interrupt();
479        } else {
480            t->resume();
481        }
482    } else {
483        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
484        get_sched_policy(0, &mPreviousSchedulingGroup);
485        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
486    }
487
488    status_t status = NO_ERROR;
489    if (!(flags & CBLK_INVALID)) {
490        status = mAudioTrack->start();
491        if (status == DEAD_OBJECT) {
492            flags |= CBLK_INVALID;
493        }
494    }
495    if (flags & CBLK_INVALID) {
496        status = restoreTrack_l("start");
497    }
498
499    if (status != NO_ERROR) {
500        ALOGE("start() status %d", status);
501        mState = previousState;
502        if (t != 0) {
503            if (previousState != STATE_STOPPING) {
504                t->pause();
505            }
506        } else {
507            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
508            set_sched_policy(0, mPreviousSchedulingGroup);
509        }
510    }
511
512    return status;
513}
514
515void AudioTrack::stop()
516{
517    AutoMutex lock(mLock);
518    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
519        return;
520    }
521
522    if (isOffloaded_l()) {
523        mState = STATE_STOPPING;
524    } else {
525        mState = STATE_STOPPED;
526        mReleased = 0;
527    }
528
529    mProxy->interrupt();
530    mAudioTrack->stop();
531    // the playback head position will reset to 0, so if a marker is set, we need
532    // to activate it again
533    mMarkerReached = false;
534
535    if (mSharedBuffer != 0) {
536        // clear buffer position and loop count.
537        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
538                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
539    }
540
541    sp<AudioTrackThread> t = mAudioTrackThread;
542    if (t != 0) {
543        if (!isOffloaded_l()) {
544            t->pause();
545        }
546    } else {
547        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
548        set_sched_policy(0, mPreviousSchedulingGroup);
549    }
550}
551
552bool AudioTrack::stopped() const
553{
554    AutoMutex lock(mLock);
555    return mState != STATE_ACTIVE;
556}
557
558void AudioTrack::flush()
559{
560    if (mSharedBuffer != 0) {
561        return;
562    }
563    AutoMutex lock(mLock);
564    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
565        return;
566    }
567    flush_l();
568}
569
570void AudioTrack::flush_l()
571{
572    ALOG_ASSERT(mState != STATE_ACTIVE);
573
574    // clear playback marker and periodic update counter
575    mMarkerPosition = 0;
576    mMarkerReached = false;
577    mUpdatePeriod = 0;
578    mRefreshRemaining = true;
579
580    mState = STATE_FLUSHED;
581    mReleased = 0;
582    if (isOffloaded_l()) {
583        mProxy->interrupt();
584    }
585    mProxy->flush();
586    mAudioTrack->flush();
587}
588
589void AudioTrack::pause()
590{
591    AutoMutex lock(mLock);
592    if (mState == STATE_ACTIVE) {
593        mState = STATE_PAUSED;
594    } else if (mState == STATE_STOPPING) {
595        mState = STATE_PAUSED_STOPPING;
596    } else {
597        return;
598    }
599    mProxy->interrupt();
600    mAudioTrack->pause();
601
602    if (isOffloaded_l()) {
603        if (mOutput != AUDIO_IO_HANDLE_NONE) {
604            // An offload output can be re-used between two audio tracks having
605            // the same configuration. A timestamp query for a paused track
606            // while the other is running would return an incorrect time.
607            // To fix this, cache the playback position on a pause() and return
608            // this time when requested until the track is resumed.
609
610            // OffloadThread sends HAL pause in its threadLoop. Time saved
611            // here can be slightly off.
612
613            // TODO: check return code for getRenderPosition.
614
615            uint32_t halFrames;
616            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
617            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
618        }
619    }
620}
621
622status_t AudioTrack::setVolume(float left, float right)
623{
624    // This duplicates a test by AudioTrack JNI, but that is not the only caller
625    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
626            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
627        return BAD_VALUE;
628    }
629
630    AutoMutex lock(mLock);
631    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
632    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
633
634    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
635
636    if (isOffloaded_l()) {
637        mAudioTrack->signal();
638    }
639    return NO_ERROR;
640}
641
642status_t AudioTrack::setVolume(float volume)
643{
644    return setVolume(volume, volume);
645}
646
647status_t AudioTrack::setAuxEffectSendLevel(float level)
648{
649    // This duplicates a test by AudioTrack JNI, but that is not the only caller
650    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
651        return BAD_VALUE;
652    }
653
654    AutoMutex lock(mLock);
655    mSendLevel = level;
656    mProxy->setSendLevel(level);
657
658    return NO_ERROR;
659}
660
661void AudioTrack::getAuxEffectSendLevel(float* level) const
662{
663    if (level != NULL) {
664        *level = mSendLevel;
665    }
666}
667
668status_t AudioTrack::setSampleRate(uint32_t rate)
669{
670    AutoMutex lock(mLock);
671    if (rate == mSampleRate) {
672        return NO_ERROR;
673    }
674    if (mIsTimed || isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
675        return INVALID_OPERATION;
676    }
677    if (mOutput == AUDIO_IO_HANDLE_NONE) {
678        return NO_INIT;
679    }
680    // NOTE: it is theoretically possible, but highly unlikely, that a device change
681    // could mean a previously allowed sampling rate is no longer allowed.
682    uint32_t afSamplingRate;
683    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
684        return NO_INIT;
685    }
686    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
687        return BAD_VALUE;
688    }
689
690    mSampleRate = rate;
691    mProxy->setSampleRate(rate);
692
693    return NO_ERROR;
694}
695
696uint32_t AudioTrack::getSampleRate() const
697{
698    if (mIsTimed) {
699        return 0;
700    }
701
702    AutoMutex lock(mLock);
703
704    // sample rate can be updated during playback by the offloaded decoder so we need to
705    // query the HAL and update if needed.
706// FIXME use Proxy return channel to update the rate from server and avoid polling here
707    if (isOffloadedOrDirect_l()) {
708        if (mOutput != AUDIO_IO_HANDLE_NONE) {
709            uint32_t sampleRate = 0;
710            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
711            if (status == NO_ERROR) {
712                mSampleRate = sampleRate;
713            }
714        }
715    }
716    return mSampleRate;
717}
718
719status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
720{
721    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
722        return INVALID_OPERATION;
723    }
724
725    if (loopCount == 0) {
726        ;
727    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
728            loopEnd - loopStart >= MIN_LOOP) {
729        ;
730    } else {
731        return BAD_VALUE;
732    }
733
734    AutoMutex lock(mLock);
735    // See setPosition() regarding setting parameters such as loop points or position while active
736    if (mState == STATE_ACTIVE) {
737        return INVALID_OPERATION;
738    }
739    setLoop_l(loopStart, loopEnd, loopCount);
740    return NO_ERROR;
741}
742
743void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
744{
745    // We do not update the periodic notification point.
746    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
747    mLoopCount = loopCount;
748    mLoopEnd = loopEnd;
749    mLoopStart = loopStart;
750    mLoopCountNotified = loopCount;
751    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
752
753    // Waking the AudioTrackThread is not needed as this cannot be called when active.
754}
755
756status_t AudioTrack::setMarkerPosition(uint32_t marker)
757{
758    // The only purpose of setting marker position is to get a callback
759    if (mCbf == NULL || isOffloadedOrDirect()) {
760        return INVALID_OPERATION;
761    }
762
763    AutoMutex lock(mLock);
764    mMarkerPosition = marker;
765    mMarkerReached = false;
766
767    sp<AudioTrackThread> t = mAudioTrackThread;
768    if (t != 0) {
769        t->wake();
770    }
771    return NO_ERROR;
772}
773
774status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
775{
776    if (isOffloadedOrDirect()) {
777        return INVALID_OPERATION;
778    }
779    if (marker == NULL) {
780        return BAD_VALUE;
781    }
782
783    AutoMutex lock(mLock);
784    *marker = mMarkerPosition;
785
786    return NO_ERROR;
787}
788
789status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
790{
791    // The only purpose of setting position update period is to get a callback
792    if (mCbf == NULL || isOffloadedOrDirect()) {
793        return INVALID_OPERATION;
794    }
795
796    AutoMutex lock(mLock);
797    mNewPosition = updateAndGetPosition_l() + updatePeriod;
798    mUpdatePeriod = updatePeriod;
799
800    sp<AudioTrackThread> t = mAudioTrackThread;
801    if (t != 0) {
802        t->wake();
803    }
804    return NO_ERROR;
805}
806
807status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
808{
809    if (isOffloadedOrDirect()) {
810        return INVALID_OPERATION;
811    }
812    if (updatePeriod == NULL) {
813        return BAD_VALUE;
814    }
815
816    AutoMutex lock(mLock);
817    *updatePeriod = mUpdatePeriod;
818
819    return NO_ERROR;
820}
821
822status_t AudioTrack::setPosition(uint32_t position)
823{
824    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
825        return INVALID_OPERATION;
826    }
827    if (position > mFrameCount) {
828        return BAD_VALUE;
829    }
830
831    AutoMutex lock(mLock);
832    // Currently we require that the player is inactive before setting parameters such as position
833    // or loop points.  Otherwise, there could be a race condition: the application could read the
834    // current position, compute a new position or loop parameters, and then set that position or
835    // loop parameters but it would do the "wrong" thing since the position has continued to advance
836    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
837    // to specify how it wants to handle such scenarios.
838    if (mState == STATE_ACTIVE) {
839        return INVALID_OPERATION;
840    }
841    // After setting the position, use full update period before notification.
842    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
843    mStaticProxy->setBufferPosition(position);
844
845    // Waking the AudioTrackThread is not needed as this cannot be called when active.
846    return NO_ERROR;
847}
848
849status_t AudioTrack::getPosition(uint32_t *position)
850{
851    if (position == NULL) {
852        return BAD_VALUE;
853    }
854
855    AutoMutex lock(mLock);
856    if (isOffloadedOrDirect_l()) {
857        uint32_t dspFrames = 0;
858
859        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
860            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
861            *position = mPausedPosition;
862            return NO_ERROR;
863        }
864
865        if (mOutput != AUDIO_IO_HANDLE_NONE) {
866            uint32_t halFrames;
867            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
868        }
869        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
870        // due to hardware latency. We leave this behavior for now.
871        *position = dspFrames;
872    } else {
873        if (mCblk->mFlags & CBLK_INVALID) {
874            restoreTrack_l("getPosition");
875        }
876
877        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
878        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
879                0 : updateAndGetPosition_l();
880    }
881    return NO_ERROR;
882}
883
884status_t AudioTrack::getBufferPosition(uint32_t *position)
885{
886    if (mSharedBuffer == 0 || mIsTimed) {
887        return INVALID_OPERATION;
888    }
889    if (position == NULL) {
890        return BAD_VALUE;
891    }
892
893    AutoMutex lock(mLock);
894    *position = mStaticProxy->getBufferPosition();
895    return NO_ERROR;
896}
897
898status_t AudioTrack::reload()
899{
900    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
901        return INVALID_OPERATION;
902    }
903
904    AutoMutex lock(mLock);
905    // See setPosition() regarding setting parameters such as loop points or position while active
906    if (mState == STATE_ACTIVE) {
907        return INVALID_OPERATION;
908    }
909    mNewPosition = mUpdatePeriod;
910    (void) updateAndGetPosition_l();
911    mPosition = 0;
912#if 0
913    // The documentation is not clear on the behavior of reload() and the restoration
914    // of loop count. Historically we have not restored loop count, start, end,
915    // but it makes sense if one desires to repeat playing a particular sound.
916    if (mLoopCount != 0) {
917        mLoopCountNotified = mLoopCount;
918        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
919    }
920#endif
921    mStaticProxy->setBufferPosition(0);
922    return NO_ERROR;
923}
924
925audio_io_handle_t AudioTrack::getOutput() const
926{
927    AutoMutex lock(mLock);
928    return mOutput;
929}
930
931status_t AudioTrack::attachAuxEffect(int effectId)
932{
933    AutoMutex lock(mLock);
934    status_t status = mAudioTrack->attachAuxEffect(effectId);
935    if (status == NO_ERROR) {
936        mAuxEffectId = effectId;
937    }
938    return status;
939}
940
941audio_stream_type_t AudioTrack::streamType() const
942{
943    if (mStreamType == AUDIO_STREAM_DEFAULT) {
944        return audio_attributes_to_stream_type(&mAttributes);
945    }
946    return mStreamType;
947}
948
949// -------------------------------------------------------------------------
950
951// must be called with mLock held
952status_t AudioTrack::createTrack_l()
953{
954    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
955    if (audioFlinger == 0) {
956        ALOGE("Could not get audioflinger");
957        return NO_INIT;
958    }
959
960    audio_io_handle_t output;
961    audio_stream_type_t streamType = mStreamType;
962    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
963    status_t status = AudioSystem::getOutputForAttr(attr, &output,
964                                                    (audio_session_t)mSessionId, &streamType,
965                                                    mSampleRate, mFormat, mChannelMask,
966                                                    mFlags, mOffloadInfo);
967
968
969    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
970        ALOGE("Could not get audio output for session %d, stream type %d, usage %d, sample rate %u, format %#x,"
971              " channel mask %#x, flags %#x",
972              mSessionId, streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
973        return BAD_VALUE;
974    }
975    {
976    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
977    // we must release it ourselves if anything goes wrong.
978
979    // Not all of these values are needed under all conditions, but it is easier to get them all
980
981    uint32_t afLatency;
982    status = AudioSystem::getLatency(output, &afLatency);
983    if (status != NO_ERROR) {
984        ALOGE("getLatency(%d) failed status %d", output, status);
985        goto release;
986    }
987    ALOGV("createTrack_l() output %d afLatency %u", output, afLatency);
988
989    size_t afFrameCount;
990    status = AudioSystem::getFrameCount(output, &afFrameCount);
991    if (status != NO_ERROR) {
992        ALOGE("getFrameCount(output=%d) status %d", output, status);
993        goto release;
994    }
995
996    uint32_t afSampleRate;
997    status = AudioSystem::getSamplingRate(output, &afSampleRate);
998    if (status != NO_ERROR) {
999        ALOGE("getSamplingRate(output=%d) status %d", output, status);
1000        goto release;
1001    }
1002    if (mSampleRate == 0) {
1003        mSampleRate = afSampleRate;
1004    }
1005    // Client decides whether the track is TIMED (see below), but can only express a preference
1006    // for FAST.  Server will perform additional tests.
1007    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
1008            // either of these use cases:
1009            // use case 1: shared buffer
1010            (mSharedBuffer != 0) ||
1011            // use case 2: callback transfer mode
1012            (mTransfer == TRANSFER_CALLBACK) ||
1013            // use case 3: obtain/release mode
1014            (mTransfer == TRANSFER_OBTAIN)) &&
1015            // matching sample rate
1016            (mSampleRate == afSampleRate))) {
1017        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client; transfer %d, track %u Hz, output %u Hz",
1018                mTransfer, mSampleRate, afSampleRate);
1019        // once denied, do not request again if IAudioTrack is re-created
1020        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1021    }
1022
1023    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1024    //  n = 1   fast track with single buffering; nBuffering is ignored
1025    //  n = 2   fast track with double buffering
1026    //  n = 2   normal track, (including those with sample rate conversion)
1027    //  n >= 3  very high latency or very small notification interval (unused).
1028    const uint32_t nBuffering = 2;
1029
1030    mNotificationFramesAct = mNotificationFramesReq;
1031
1032    size_t frameCount = mReqFrameCount;
1033    if (!audio_is_linear_pcm(mFormat)) {
1034
1035        if (mSharedBuffer != 0) {
1036            // Same comment as below about ignoring frameCount parameter for set()
1037            frameCount = mSharedBuffer->size();
1038        } else if (frameCount == 0) {
1039            frameCount = afFrameCount;
1040        }
1041        if (mNotificationFramesAct != frameCount) {
1042            mNotificationFramesAct = frameCount;
1043        }
1044    } else if (mSharedBuffer != 0) {
1045        // FIXME: Ensure client side memory buffers need
1046        // not have additional alignment beyond sample
1047        // (e.g. 16 bit stereo accessed as 32 bit frame).
1048        size_t alignment = audio_bytes_per_sample(mFormat);
1049        if (alignment & 1) {
1050            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1051            alignment = 1;
1052        }
1053        if (mChannelCount > 1) {
1054            // More than 2 channels does not require stronger alignment than stereo
1055            alignment <<= 1;
1056        }
1057        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1058            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1059                    mSharedBuffer->pointer(), mChannelCount);
1060            status = BAD_VALUE;
1061            goto release;
1062        }
1063
1064        // When initializing a shared buffer AudioTrack via constructors,
1065        // there's no frameCount parameter.
1066        // But when initializing a shared buffer AudioTrack via set(),
1067        // there _is_ a frameCount parameter.  We silently ignore it.
1068        frameCount = mSharedBuffer->size() / mFrameSize;
1069    } else {
1070        // For fast and normal streaming tracks,
1071        // the frame count calculations and checks are done by server
1072    }
1073
1074    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1075    if (mIsTimed) {
1076        trackFlags |= IAudioFlinger::TRACK_TIMED;
1077    }
1078
1079    pid_t tid = -1;
1080    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1081        trackFlags |= IAudioFlinger::TRACK_FAST;
1082        if (mAudioTrackThread != 0) {
1083            tid = mAudioTrackThread->getTid();
1084        }
1085    }
1086
1087    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1088        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1089    }
1090
1091    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1092        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1093    }
1094
1095    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1096                                // but we will still need the original value also
1097    int originalSessionId = mSessionId;
1098    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1099                                                      mSampleRate,
1100                                                      mFormat,
1101                                                      mChannelMask,
1102                                                      &temp,
1103                                                      &trackFlags,
1104                                                      mSharedBuffer,
1105                                                      output,
1106                                                      tid,
1107                                                      &mSessionId,
1108                                                      mClientUid,
1109                                                      &status);
1110    ALOGE_IF(originalSessionId != AUDIO_SESSION_ALLOCATE && mSessionId != originalSessionId,
1111            "session ID changed from %d to %d", originalSessionId, mSessionId);
1112
1113    if (status != NO_ERROR) {
1114        ALOGE("AudioFlinger could not create track, status: %d", status);
1115        goto release;
1116    }
1117    ALOG_ASSERT(track != 0);
1118
1119    // AudioFlinger now owns the reference to the I/O handle,
1120    // so we are no longer responsible for releasing it.
1121
1122    sp<IMemory> iMem = track->getCblk();
1123    if (iMem == 0) {
1124        ALOGE("Could not get control block");
1125        return NO_INIT;
1126    }
1127    void *iMemPointer = iMem->pointer();
1128    if (iMemPointer == NULL) {
1129        ALOGE("Could not get control block pointer");
1130        return NO_INIT;
1131    }
1132    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1133    if (mAudioTrack != 0) {
1134        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1135        mDeathNotifier.clear();
1136    }
1137    mAudioTrack = track;
1138    mCblkMemory = iMem;
1139    IPCThreadState::self()->flushCommands();
1140
1141    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1142    mCblk = cblk;
1143    // note that temp is the (possibly revised) value of frameCount
1144    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1145        // In current design, AudioTrack client checks and ensures frame count validity before
1146        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1147        // for fast track as it uses a special method of assigning frame count.
1148        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1149    }
1150    frameCount = temp;
1151
1152    mAwaitBoost = false;
1153    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1154        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1155            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1156            mAwaitBoost = true;
1157        } else {
1158            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1159            // once denied, do not request again if IAudioTrack is re-created
1160            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1161        }
1162    }
1163    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1164        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1165            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1166        } else {
1167            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1168            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1169            // FIXME This is a warning, not an error, so don't return error status
1170            //return NO_INIT;
1171        }
1172    }
1173    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1174        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1175            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1176        } else {
1177            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1178            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1179            // FIXME This is a warning, not an error, so don't return error status
1180            //return NO_INIT;
1181        }
1182    }
1183    // Make sure that application is notified with sufficient margin before underrun
1184    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1185        // Theoretically double-buffering is not required for fast tracks,
1186        // due to tighter scheduling.  But in practice, to accommodate kernels with
1187        // scheduling jitter, and apps with computation jitter, we use double-buffering
1188        // for fast tracks just like normal streaming tracks.
1189        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
1190            mNotificationFramesAct = frameCount / nBuffering;
1191        }
1192    }
1193
1194    // We retain a copy of the I/O handle, but don't own the reference
1195    mOutput = output;
1196    mRefreshRemaining = true;
1197
1198    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1199    // is the value of pointer() for the shared buffer, otherwise buffers points
1200    // immediately after the control block.  This address is for the mapping within client
1201    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1202    void* buffers;
1203    if (mSharedBuffer == 0) {
1204        buffers = cblk + 1;
1205    } else {
1206        buffers = mSharedBuffer->pointer();
1207        if (buffers == NULL) {
1208            ALOGE("Could not get buffer pointer");
1209            return NO_INIT;
1210        }
1211    }
1212
1213    mAudioTrack->attachAuxEffect(mAuxEffectId);
1214    // FIXME don't believe this lie
1215    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1216
1217    mFrameCount = frameCount;
1218    // If IAudioTrack is re-created, don't let the requested frameCount
1219    // decrease.  This can confuse clients that cache frameCount().
1220    if (frameCount > mReqFrameCount) {
1221        mReqFrameCount = frameCount;
1222    }
1223
1224    // update proxy
1225    if (mSharedBuffer == 0) {
1226        mStaticProxy.clear();
1227        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1228    } else {
1229        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1230        mProxy = mStaticProxy;
1231    }
1232
1233    mProxy->setVolumeLR(gain_minifloat_pack(
1234            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1235            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1236
1237    mProxy->setSendLevel(mSendLevel);
1238    mProxy->setSampleRate(mSampleRate);
1239    mProxy->setMinimum(mNotificationFramesAct);
1240
1241    mDeathNotifier = new DeathNotifier(this);
1242    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1243
1244    return NO_ERROR;
1245    }
1246
1247release:
1248    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1249    if (status == NO_ERROR) {
1250        status = NO_INIT;
1251    }
1252    return status;
1253}
1254
1255status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1256{
1257    if (audioBuffer == NULL) {
1258        if (nonContig != NULL) {
1259            *nonContig = 0;
1260        }
1261        return BAD_VALUE;
1262    }
1263    if (mTransfer != TRANSFER_OBTAIN) {
1264        audioBuffer->frameCount = 0;
1265        audioBuffer->size = 0;
1266        audioBuffer->raw = NULL;
1267        if (nonContig != NULL) {
1268            *nonContig = 0;
1269        }
1270        return INVALID_OPERATION;
1271    }
1272
1273    const struct timespec *requested;
1274    struct timespec timeout;
1275    if (waitCount == -1) {
1276        requested = &ClientProxy::kForever;
1277    } else if (waitCount == 0) {
1278        requested = &ClientProxy::kNonBlocking;
1279    } else if (waitCount > 0) {
1280        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1281        timeout.tv_sec = ms / 1000;
1282        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1283        requested = &timeout;
1284    } else {
1285        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1286        requested = NULL;
1287    }
1288    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1289}
1290
1291status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1292        struct timespec *elapsed, size_t *nonContig)
1293{
1294    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1295    uint32_t oldSequence = 0;
1296    uint32_t newSequence;
1297
1298    Proxy::Buffer buffer;
1299    status_t status = NO_ERROR;
1300
1301    static const int32_t kMaxTries = 5;
1302    int32_t tryCounter = kMaxTries;
1303
1304    do {
1305        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1306        // keep them from going away if another thread re-creates the track during obtainBuffer()
1307        sp<AudioTrackClientProxy> proxy;
1308        sp<IMemory> iMem;
1309
1310        {   // start of lock scope
1311            AutoMutex lock(mLock);
1312
1313            newSequence = mSequence;
1314            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1315            if (status == DEAD_OBJECT) {
1316                // re-create track, unless someone else has already done so
1317                if (newSequence == oldSequence) {
1318                    status = restoreTrack_l("obtainBuffer");
1319                    if (status != NO_ERROR) {
1320                        buffer.mFrameCount = 0;
1321                        buffer.mRaw = NULL;
1322                        buffer.mNonContig = 0;
1323                        break;
1324                    }
1325                }
1326            }
1327            oldSequence = newSequence;
1328
1329            // Keep the extra references
1330            proxy = mProxy;
1331            iMem = mCblkMemory;
1332
1333            if (mState == STATE_STOPPING) {
1334                status = -EINTR;
1335                buffer.mFrameCount = 0;
1336                buffer.mRaw = NULL;
1337                buffer.mNonContig = 0;
1338                break;
1339            }
1340
1341            // Non-blocking if track is stopped or paused
1342            if (mState != STATE_ACTIVE) {
1343                requested = &ClientProxy::kNonBlocking;
1344            }
1345
1346        }   // end of lock scope
1347
1348        buffer.mFrameCount = audioBuffer->frameCount;
1349        // FIXME starts the requested timeout and elapsed over from scratch
1350        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1351
1352    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1353
1354    audioBuffer->frameCount = buffer.mFrameCount;
1355    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1356    audioBuffer->raw = buffer.mRaw;
1357    if (nonContig != NULL) {
1358        *nonContig = buffer.mNonContig;
1359    }
1360    return status;
1361}
1362
1363void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1364{
1365    // FIXME add error checking on mode, by adding an internal version
1366    if (mTransfer == TRANSFER_SHARED) {
1367        return;
1368    }
1369
1370    size_t stepCount = audioBuffer->size / mFrameSize;
1371    if (stepCount == 0) {
1372        return;
1373    }
1374
1375    Proxy::Buffer buffer;
1376    buffer.mFrameCount = stepCount;
1377    buffer.mRaw = audioBuffer->raw;
1378
1379    AutoMutex lock(mLock);
1380    mReleased += stepCount;
1381    mInUnderrun = false;
1382    mProxy->releaseBuffer(&buffer);
1383
1384    // restart track if it was disabled by audioflinger due to previous underrun
1385    if (mState == STATE_ACTIVE) {
1386        audio_track_cblk_t* cblk = mCblk;
1387        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1388            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1389            // FIXME ignoring status
1390            mAudioTrack->start();
1391        }
1392    }
1393}
1394
1395// -------------------------------------------------------------------------
1396
1397ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1398{
1399    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1400        return INVALID_OPERATION;
1401    }
1402
1403    if (isDirect()) {
1404        AutoMutex lock(mLock);
1405        int32_t flags = android_atomic_and(
1406                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1407                            &mCblk->mFlags);
1408        if (flags & CBLK_INVALID) {
1409            return DEAD_OBJECT;
1410        }
1411    }
1412
1413    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1414        // Sanity-check: user is most-likely passing an error code, and it would
1415        // make the return value ambiguous (actualSize vs error).
1416        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1417        return BAD_VALUE;
1418    }
1419
1420    size_t written = 0;
1421    Buffer audioBuffer;
1422
1423    while (userSize >= mFrameSize) {
1424        audioBuffer.frameCount = userSize / mFrameSize;
1425
1426        status_t err = obtainBuffer(&audioBuffer,
1427                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1428        if (err < 0) {
1429            if (written > 0) {
1430                break;
1431            }
1432            return ssize_t(err);
1433        }
1434
1435        size_t toWrite = audioBuffer.size;
1436        memcpy(audioBuffer.i8, buffer, toWrite);
1437        buffer = ((const char *) buffer) + toWrite;
1438        userSize -= toWrite;
1439        written += toWrite;
1440
1441        releaseBuffer(&audioBuffer);
1442    }
1443
1444    return written;
1445}
1446
1447// -------------------------------------------------------------------------
1448
1449TimedAudioTrack::TimedAudioTrack() {
1450    mIsTimed = true;
1451}
1452
1453status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1454{
1455    AutoMutex lock(mLock);
1456    status_t result = UNKNOWN_ERROR;
1457
1458#if 1
1459    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1460    // while we are accessing the cblk
1461    sp<IAudioTrack> audioTrack = mAudioTrack;
1462    sp<IMemory> iMem = mCblkMemory;
1463#endif
1464
1465    // If the track is not invalid already, try to allocate a buffer.  alloc
1466    // fails indicating that the server is dead, flag the track as invalid so
1467    // we can attempt to restore in just a bit.
1468    audio_track_cblk_t* cblk = mCblk;
1469    if (!(cblk->mFlags & CBLK_INVALID)) {
1470        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1471        if (result == DEAD_OBJECT) {
1472            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1473        }
1474    }
1475
1476    // If the track is invalid at this point, attempt to restore it. and try the
1477    // allocation one more time.
1478    if (cblk->mFlags & CBLK_INVALID) {
1479        result = restoreTrack_l("allocateTimedBuffer");
1480
1481        if (result == NO_ERROR) {
1482            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1483        }
1484    }
1485
1486    return result;
1487}
1488
1489status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1490                                           int64_t pts)
1491{
1492    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1493    {
1494        AutoMutex lock(mLock);
1495        audio_track_cblk_t* cblk = mCblk;
1496        // restart track if it was disabled by audioflinger due to previous underrun
1497        if (buffer->size() != 0 && status == NO_ERROR &&
1498                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1499            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1500            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1501            // FIXME ignoring status
1502            mAudioTrack->start();
1503        }
1504    }
1505    return status;
1506}
1507
1508status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1509                                                TargetTimeline target)
1510{
1511    return mAudioTrack->setMediaTimeTransform(xform, target);
1512}
1513
1514// -------------------------------------------------------------------------
1515
1516nsecs_t AudioTrack::processAudioBuffer()
1517{
1518    // Currently the AudioTrack thread is not created if there are no callbacks.
1519    // Would it ever make sense to run the thread, even without callbacks?
1520    // If so, then replace this by checks at each use for mCbf != NULL.
1521    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1522
1523    mLock.lock();
1524    if (mAwaitBoost) {
1525        mAwaitBoost = false;
1526        mLock.unlock();
1527        static const int32_t kMaxTries = 5;
1528        int32_t tryCounter = kMaxTries;
1529        uint32_t pollUs = 10000;
1530        do {
1531            int policy = sched_getscheduler(0);
1532            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1533                break;
1534            }
1535            usleep(pollUs);
1536            pollUs <<= 1;
1537        } while (tryCounter-- > 0);
1538        if (tryCounter < 0) {
1539            ALOGE("did not receive expected priority boost on time");
1540        }
1541        // Run again immediately
1542        return 0;
1543    }
1544
1545    // Can only reference mCblk while locked
1546    int32_t flags = android_atomic_and(
1547        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1548
1549    // Check for track invalidation
1550    if (flags & CBLK_INVALID) {
1551        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1552        // AudioSystem cache. We should not exit here but after calling the callback so
1553        // that the upper layers can recreate the track
1554        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1555            status_t status = restoreTrack_l("processAudioBuffer");
1556            // after restoration, continue below to make sure that the loop and buffer events
1557            // are notified because they have been cleared from mCblk->mFlags above.
1558        }
1559    }
1560
1561    bool waitStreamEnd = mState == STATE_STOPPING;
1562    bool active = mState == STATE_ACTIVE;
1563
1564    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1565    bool newUnderrun = false;
1566    if (flags & CBLK_UNDERRUN) {
1567#if 0
1568        // Currently in shared buffer mode, when the server reaches the end of buffer,
1569        // the track stays active in continuous underrun state.  It's up to the application
1570        // to pause or stop the track, or set the position to a new offset within buffer.
1571        // This was some experimental code to auto-pause on underrun.   Keeping it here
1572        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1573        if (mTransfer == TRANSFER_SHARED) {
1574            mState = STATE_PAUSED;
1575            active = false;
1576        }
1577#endif
1578        if (!mInUnderrun) {
1579            mInUnderrun = true;
1580            newUnderrun = true;
1581        }
1582    }
1583
1584    // Get current position of server
1585    size_t position = updateAndGetPosition_l();
1586
1587    // Manage marker callback
1588    bool markerReached = false;
1589    size_t markerPosition = mMarkerPosition;
1590    // FIXME fails for wraparound, need 64 bits
1591    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1592        mMarkerReached = markerReached = true;
1593    }
1594
1595    // Determine number of new position callback(s) that will be needed, while locked
1596    size_t newPosCount = 0;
1597    size_t newPosition = mNewPosition;
1598    size_t updatePeriod = mUpdatePeriod;
1599    // FIXME fails for wraparound, need 64 bits
1600    if (updatePeriod > 0 && position >= newPosition) {
1601        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1602        mNewPosition += updatePeriod * newPosCount;
1603    }
1604
1605    // Cache other fields that will be needed soon
1606    uint32_t sampleRate = mSampleRate;
1607    uint32_t notificationFrames = mNotificationFramesAct;
1608    if (mRefreshRemaining) {
1609        mRefreshRemaining = false;
1610        mRemainingFrames = notificationFrames;
1611        mRetryOnPartialBuffer = false;
1612    }
1613    size_t misalignment = mProxy->getMisalignment();
1614    uint32_t sequence = mSequence;
1615    sp<AudioTrackClientProxy> proxy = mProxy;
1616
1617    // Determine the number of new loop callback(s) that will be needed, while locked.
1618    int loopCountNotifications = 0;
1619    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1620
1621    if (mLoopCount > 0) {
1622        int loopCount;
1623        size_t bufferPosition;
1624        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1625        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1626        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1627        mLoopCountNotified = loopCount; // discard any excess notifications
1628    } else if (mLoopCount < 0) {
1629        // FIXME: We're not accurate with notification count and position with infinite looping
1630        // since loopCount from server side will always return -1 (we could decrement it).
1631        size_t bufferPosition = mStaticProxy->getBufferPosition();
1632        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1633        loopPeriod = mLoopEnd - bufferPosition;
1634    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1635        size_t bufferPosition = mStaticProxy->getBufferPosition();
1636        loopPeriod = mFrameCount - bufferPosition;
1637    }
1638
1639    // These fields don't need to be cached, because they are assigned only by set():
1640    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1641    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1642
1643    mLock.unlock();
1644
1645    if (waitStreamEnd) {
1646        struct timespec timeout;
1647        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1648        timeout.tv_nsec = 0;
1649
1650        status_t status = proxy->waitStreamEndDone(&timeout);
1651        switch (status) {
1652        case NO_ERROR:
1653        case DEAD_OBJECT:
1654        case TIMED_OUT:
1655            mCbf(EVENT_STREAM_END, mUserData, NULL);
1656            {
1657                AutoMutex lock(mLock);
1658                // The previously assigned value of waitStreamEnd is no longer valid,
1659                // since the mutex has been unlocked and either the callback handler
1660                // or another thread could have re-started the AudioTrack during that time.
1661                waitStreamEnd = mState == STATE_STOPPING;
1662                if (waitStreamEnd) {
1663                    mState = STATE_STOPPED;
1664                    mReleased = 0;
1665                }
1666            }
1667            if (waitStreamEnd && status != DEAD_OBJECT) {
1668               return NS_INACTIVE;
1669            }
1670            break;
1671        }
1672        return 0;
1673    }
1674
1675    // perform callbacks while unlocked
1676    if (newUnderrun) {
1677        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1678    }
1679    while (loopCountNotifications > 0) {
1680        mCbf(EVENT_LOOP_END, mUserData, NULL);
1681        --loopCountNotifications;
1682    }
1683    if (flags & CBLK_BUFFER_END) {
1684        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1685    }
1686    if (markerReached) {
1687        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1688    }
1689    while (newPosCount > 0) {
1690        size_t temp = newPosition;
1691        mCbf(EVENT_NEW_POS, mUserData, &temp);
1692        newPosition += updatePeriod;
1693        newPosCount--;
1694    }
1695
1696    if (mObservedSequence != sequence) {
1697        mObservedSequence = sequence;
1698        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1699        // for offloaded tracks, just wait for the upper layers to recreate the track
1700        if (isOffloadedOrDirect()) {
1701            return NS_INACTIVE;
1702        }
1703    }
1704
1705    // if inactive, then don't run me again until re-started
1706    if (!active) {
1707        return NS_INACTIVE;
1708    }
1709
1710    // Compute the estimated time until the next timed event (position, markers, loops)
1711    // FIXME only for non-compressed audio
1712    uint32_t minFrames = ~0;
1713    if (!markerReached && position < markerPosition) {
1714        minFrames = markerPosition - position;
1715    }
1716    if (loopPeriod > 0 && loopPeriod < minFrames) {
1717        // loopPeriod is already adjusted for actual position.
1718        minFrames = loopPeriod;
1719    }
1720    if (updatePeriod > 0) {
1721        minFrames = min(minFrames, uint32_t(newPosition - position));
1722    }
1723
1724    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1725    static const uint32_t kPoll = 0;
1726    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1727        minFrames = kPoll * notificationFrames;
1728    }
1729
1730    // Convert frame units to time units
1731    nsecs_t ns = NS_WHENEVER;
1732    if (minFrames != (uint32_t) ~0) {
1733        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1734        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1735        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1736    }
1737
1738    // If not supplying data by EVENT_MORE_DATA, then we're done
1739    if (mTransfer != TRANSFER_CALLBACK) {
1740        return ns;
1741    }
1742
1743    struct timespec timeout;
1744    const struct timespec *requested = &ClientProxy::kForever;
1745    if (ns != NS_WHENEVER) {
1746        timeout.tv_sec = ns / 1000000000LL;
1747        timeout.tv_nsec = ns % 1000000000LL;
1748        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1749        requested = &timeout;
1750    }
1751
1752    while (mRemainingFrames > 0) {
1753
1754        Buffer audioBuffer;
1755        audioBuffer.frameCount = mRemainingFrames;
1756        size_t nonContig;
1757        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1758        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1759                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1760        requested = &ClientProxy::kNonBlocking;
1761        size_t avail = audioBuffer.frameCount + nonContig;
1762        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1763                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1764        if (err != NO_ERROR) {
1765            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1766                    (isOffloaded() && (err == DEAD_OBJECT))) {
1767                return 0;
1768            }
1769            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1770            return NS_NEVER;
1771        }
1772
1773        if (mRetryOnPartialBuffer && !isOffloaded()) {
1774            mRetryOnPartialBuffer = false;
1775            if (avail < mRemainingFrames) {
1776                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1777                if (ns < 0 || myns < ns) {
1778                    ns = myns;
1779                }
1780                return ns;
1781            }
1782        }
1783
1784        size_t reqSize = audioBuffer.size;
1785        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1786        size_t writtenSize = audioBuffer.size;
1787
1788        // Sanity check on returned size
1789        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1790            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1791                    reqSize, ssize_t(writtenSize));
1792            return NS_NEVER;
1793        }
1794
1795        if (writtenSize == 0) {
1796            // The callback is done filling buffers
1797            // Keep this thread going to handle timed events and
1798            // still try to get more data in intervals of WAIT_PERIOD_MS
1799            // but don't just loop and block the CPU, so wait
1800            return WAIT_PERIOD_MS * 1000000LL;
1801        }
1802
1803        size_t releasedFrames = writtenSize / mFrameSize;
1804        audioBuffer.frameCount = releasedFrames;
1805        mRemainingFrames -= releasedFrames;
1806        if (misalignment >= releasedFrames) {
1807            misalignment -= releasedFrames;
1808        } else {
1809            misalignment = 0;
1810        }
1811
1812        releaseBuffer(&audioBuffer);
1813
1814        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1815        // if callback doesn't like to accept the full chunk
1816        if (writtenSize < reqSize) {
1817            continue;
1818        }
1819
1820        // There could be enough non-contiguous frames available to satisfy the remaining request
1821        if (mRemainingFrames <= nonContig) {
1822            continue;
1823        }
1824
1825#if 0
1826        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1827        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1828        // that total to a sum == notificationFrames.
1829        if (0 < misalignment && misalignment <= mRemainingFrames) {
1830            mRemainingFrames = misalignment;
1831            return (mRemainingFrames * 1100000000LL) / sampleRate;
1832        }
1833#endif
1834
1835    }
1836    mRemainingFrames = notificationFrames;
1837    mRetryOnPartialBuffer = true;
1838
1839    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1840    return 0;
1841}
1842
1843status_t AudioTrack::restoreTrack_l(const char *from)
1844{
1845    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1846          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1847    ++mSequence;
1848
1849    // refresh the audio configuration cache in this process to make sure we get new
1850    // output parameters and new IAudioFlinger in createTrack_l()
1851    AudioSystem::clearAudioConfigCache();
1852
1853    if (isOffloadedOrDirect_l()) {
1854        // FIXME re-creation of offloaded tracks is not yet implemented
1855        return DEAD_OBJECT;
1856    }
1857
1858    // save the old static buffer position
1859    size_t bufferPosition = 0;
1860    int loopCount = 0;
1861    if (mStaticProxy != 0) {
1862        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1863    }
1864
1865    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1866    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1867    // It will also delete the strong references on previous IAudioTrack and IMemory.
1868    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1869    status_t result = createTrack_l();
1870
1871    // take the frames that will be lost by track recreation into account in saved position
1872    // For streaming tracks, this is the amount we obtained from the user/client
1873    // (not the number actually consumed at the server - those are already lost).
1874    (void) updateAndGetPosition_l();
1875    if (mStaticProxy == 0) {
1876        mPosition = mReleased;
1877    }
1878
1879    if (result == NO_ERROR) {
1880        // Continue playback from last known position and restore loop.
1881        if (mStaticProxy != 0) {
1882            if (loopCount != 0) {
1883                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
1884                        mLoopStart, mLoopEnd, loopCount);
1885            } else {
1886                mStaticProxy->setBufferPosition(bufferPosition);
1887                if (bufferPosition == mFrameCount) {
1888                    ALOGD("restoring track at end of static buffer");
1889                }
1890            }
1891        }
1892        if (mState == STATE_ACTIVE) {
1893            result = mAudioTrack->start();
1894        }
1895    }
1896    if (result != NO_ERROR) {
1897        ALOGW("restoreTrack_l() failed status %d", result);
1898        mState = STATE_STOPPED;
1899        mReleased = 0;
1900    }
1901
1902    return result;
1903}
1904
1905uint32_t AudioTrack::updateAndGetPosition_l()
1906{
1907    // This is the sole place to read server consumed frames
1908    uint32_t newServer = mProxy->getPosition();
1909    int32_t delta = newServer - mServer;
1910    mServer = newServer;
1911    // TODO There is controversy about whether there can be "negative jitter" in server position.
1912    //      This should be investigated further, and if possible, it should be addressed.
1913    //      A more definite failure mode is infrequent polling by client.
1914    //      One could call (void)getPosition_l() in releaseBuffer(),
1915    //      so mReleased and mPosition are always lock-step as best possible.
1916    //      That should ensure delta never goes negative for infrequent polling
1917    //      unless the server has more than 2^31 frames in its buffer,
1918    //      in which case the use of uint32_t for these counters has bigger issues.
1919    if (delta < 0) {
1920        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1921        delta = 0;
1922    }
1923    return mPosition += (uint32_t) delta;
1924}
1925
1926status_t AudioTrack::setParameters(const String8& keyValuePairs)
1927{
1928    AutoMutex lock(mLock);
1929    return mAudioTrack->setParameters(keyValuePairs);
1930}
1931
1932status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1933{
1934    AutoMutex lock(mLock);
1935    // FIXME not implemented for fast tracks; should use proxy and SSQ
1936    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1937        return INVALID_OPERATION;
1938    }
1939
1940    switch (mState) {
1941    case STATE_ACTIVE:
1942    case STATE_PAUSED:
1943        break; // handle below
1944    case STATE_FLUSHED:
1945    case STATE_STOPPED:
1946        return WOULD_BLOCK;
1947    case STATE_STOPPING:
1948    case STATE_PAUSED_STOPPING:
1949        if (!isOffloaded_l()) {
1950            return INVALID_OPERATION;
1951        }
1952        break; // offloaded tracks handled below
1953    default:
1954        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1955        break;
1956    }
1957
1958    if (mCblk->mFlags & CBLK_INVALID) {
1959        restoreTrack_l("getTimestamp");
1960    }
1961
1962    // The presented frame count must always lag behind the consumed frame count.
1963    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1964    status_t status = mAudioTrack->getTimestamp(timestamp);
1965    if (status != NO_ERROR) {
1966        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1967        return status;
1968    }
1969    if (isOffloadedOrDirect_l()) {
1970        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1971            // use cached paused position in case another offloaded track is running.
1972            timestamp.mPosition = mPausedPosition;
1973            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1974            return NO_ERROR;
1975        }
1976
1977        // Check whether a pending flush or stop has completed, as those commands may
1978        // be asynchronous or return near finish.
1979        if (mStartUs != 0 && mSampleRate != 0) {
1980            static const int kTimeJitterUs = 100000; // 100 ms
1981            static const int k1SecUs = 1000000;
1982
1983            const int64_t timeNow = getNowUs();
1984
1985            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1986                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1987                if (timestampTimeUs < mStartUs) {
1988                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1989                }
1990                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1991                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1992
1993                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1994                    // Verify that the counter can't count faster than the sample rate
1995                    // since the start time.  If greater, then that means we have failed
1996                    // to completely flush or stop the previous playing track.
1997                    ALOGW("incomplete flush or stop:"
1998                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1999                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2000                            timestamp.mPosition);
2001                    return WOULD_BLOCK;
2002                }
2003            }
2004            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
2005        }
2006    } else {
2007        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2008        (void) updateAndGetPosition_l();
2009        // Server consumed (mServer) and presented both use the same server time base,
2010        // and server consumed is always >= presented.
2011        // The delta between these represents the number of frames in the buffer pipeline.
2012        // If this delta between these is greater than the client position, it means that
2013        // actually presented is still stuck at the starting line (figuratively speaking),
2014        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2015        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
2016            return INVALID_OPERATION;
2017        }
2018        // Convert timestamp position from server time base to client time base.
2019        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2020        // But if we change it to 64-bit then this could fail.
2021        // If (mPosition - mServer) can be negative then should use:
2022        //   (int32_t)(mPosition - mServer)
2023        timestamp.mPosition += mPosition - mServer;
2024        // Immediately after a call to getPosition_l(), mPosition and
2025        // mServer both represent the same frame position.  mPosition is
2026        // in client's point of view, and mServer is in server's point of
2027        // view.  So the difference between them is the "fudge factor"
2028        // between client and server views due to stop() and/or new
2029        // IAudioTrack.  And timestamp.mPosition is initially in server's
2030        // point of view, so we need to apply the same fudge factor to it.
2031    }
2032    return status;
2033}
2034
2035String8 AudioTrack::getParameters(const String8& keys)
2036{
2037    audio_io_handle_t output = getOutput();
2038    if (output != AUDIO_IO_HANDLE_NONE) {
2039        return AudioSystem::getParameters(output, keys);
2040    } else {
2041        return String8::empty();
2042    }
2043}
2044
2045bool AudioTrack::isOffloaded() const
2046{
2047    AutoMutex lock(mLock);
2048    return isOffloaded_l();
2049}
2050
2051bool AudioTrack::isDirect() const
2052{
2053    AutoMutex lock(mLock);
2054    return isDirect_l();
2055}
2056
2057bool AudioTrack::isOffloadedOrDirect() const
2058{
2059    AutoMutex lock(mLock);
2060    return isOffloadedOrDirect_l();
2061}
2062
2063
2064status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2065{
2066
2067    const size_t SIZE = 256;
2068    char buffer[SIZE];
2069    String8 result;
2070
2071    result.append(" AudioTrack::dump\n");
2072    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2073            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2074    result.append(buffer);
2075    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2076            mChannelCount, mFrameCount);
2077    result.append(buffer);
2078    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2079    result.append(buffer);
2080    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2081    result.append(buffer);
2082    ::write(fd, result.string(), result.size());
2083    return NO_ERROR;
2084}
2085
2086uint32_t AudioTrack::getUnderrunFrames() const
2087{
2088    AutoMutex lock(mLock);
2089    return mProxy->getUnderrunFrames();
2090}
2091
2092// =========================================================================
2093
2094void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2095{
2096    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2097    if (audioTrack != 0) {
2098        AutoMutex lock(audioTrack->mLock);
2099        audioTrack->mProxy->binderDied();
2100    }
2101}
2102
2103// =========================================================================
2104
2105AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2106    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2107      mIgnoreNextPausedInt(false)
2108{
2109}
2110
2111AudioTrack::AudioTrackThread::~AudioTrackThread()
2112{
2113}
2114
2115bool AudioTrack::AudioTrackThread::threadLoop()
2116{
2117    {
2118        AutoMutex _l(mMyLock);
2119        if (mPaused) {
2120            mMyCond.wait(mMyLock);
2121            // caller will check for exitPending()
2122            return true;
2123        }
2124        if (mIgnoreNextPausedInt) {
2125            mIgnoreNextPausedInt = false;
2126            mPausedInt = false;
2127        }
2128        if (mPausedInt) {
2129            if (mPausedNs > 0) {
2130                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2131            } else {
2132                mMyCond.wait(mMyLock);
2133            }
2134            mPausedInt = false;
2135            return true;
2136        }
2137    }
2138    if (exitPending()) {
2139        return false;
2140    }
2141    nsecs_t ns = mReceiver.processAudioBuffer();
2142    switch (ns) {
2143    case 0:
2144        return true;
2145    case NS_INACTIVE:
2146        pauseInternal();
2147        return true;
2148    case NS_NEVER:
2149        return false;
2150    case NS_WHENEVER:
2151        // Event driven: call wake() when callback notifications conditions change.
2152        ns = INT64_MAX;
2153        // fall through
2154    default:
2155        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2156        pauseInternal(ns);
2157        return true;
2158    }
2159}
2160
2161void AudioTrack::AudioTrackThread::requestExit()
2162{
2163    // must be in this order to avoid a race condition
2164    Thread::requestExit();
2165    resume();
2166}
2167
2168void AudioTrack::AudioTrackThread::pause()
2169{
2170    AutoMutex _l(mMyLock);
2171    mPaused = true;
2172}
2173
2174void AudioTrack::AudioTrackThread::resume()
2175{
2176    AutoMutex _l(mMyLock);
2177    mIgnoreNextPausedInt = true;
2178    if (mPaused || mPausedInt) {
2179        mPaused = false;
2180        mPausedInt = false;
2181        mMyCond.signal();
2182    }
2183}
2184
2185void AudioTrack::AudioTrackThread::wake()
2186{
2187    AutoMutex _l(mMyLock);
2188    if (!mPaused && mPausedInt && mPausedNs > 0) {
2189        // audio track is active and internally paused with timeout.
2190        mIgnoreNextPausedInt = true;
2191        mPausedInt = false;
2192        mMyCond.signal();
2193    }
2194}
2195
2196void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2197{
2198    AutoMutex _l(mMyLock);
2199    mPausedInt = true;
2200    mPausedNs = ns;
2201}
2202
2203} // namespace android
2204