AudioTrack.cpp revision 275e8e9de2e11b4b344f5a201f1f0e51fda02d9c
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/primitives.h>
26#include <binder/IPCThreadState.h>
27#include <media/AudioTrack.h>
28#include <utils/Log.h>
29#include <private/media/AudioTrackShared.h>
30#include <media/IAudioFlinger.h>
31#include <media/AudioPolicyHelper.h>
32#include <media/AudioResamplerPublic.h>
33
34#define WAIT_PERIOD_MS                  10
35#define WAIT_STREAM_END_TIMEOUT_SEC     120
36
37
38namespace android {
39// ---------------------------------------------------------------------------
40
41static int64_t convertTimespecToUs(const struct timespec &tv)
42{
43    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
44}
45
46// current monotonic time in microseconds.
47static int64_t getNowUs()
48{
49    struct timespec tv;
50    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
51    return convertTimespecToUs(tv);
52}
53
54// static
55status_t AudioTrack::getMinFrameCount(
56        size_t* frameCount,
57        audio_stream_type_t streamType,
58        uint32_t sampleRate)
59{
60    if (frameCount == NULL) {
61        return BAD_VALUE;
62    }
63
64    // FIXME merge with similar code in createTrack_l(), except we're missing
65    //       some information here that is available in createTrack_l():
66    //          audio_io_handle_t output
67    //          audio_format_t format
68    //          audio_channel_mask_t channelMask
69    //          audio_output_flags_t flags
70    uint32_t afSampleRate;
71    status_t status;
72    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
73    if (status != NO_ERROR) {
74        ALOGE("Unable to query output sample rate for stream type %d; status %d",
75                streamType, status);
76        return status;
77    }
78    size_t afFrameCount;
79    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
80    if (status != NO_ERROR) {
81        ALOGE("Unable to query output frame count for stream type %d; status %d",
82                streamType, status);
83        return status;
84    }
85    uint32_t afLatency;
86    status = AudioSystem::getOutputLatency(&afLatency, streamType);
87    if (status != NO_ERROR) {
88        ALOGE("Unable to query output latency for stream type %d; status %d",
89                streamType, status);
90        return status;
91    }
92
93    // Ensure that buffer depth covers at least audio hardware latency
94    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
95    if (minBufCount < 2) {
96        minBufCount = 2;
97    }
98
99    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
100            afFrameCount * minBufCount * uint64_t(sampleRate) / afSampleRate;
101    // The formula above should always produce a non-zero value, but return an error
102    // in the unlikely event that it does not, as that's part of the API contract.
103    if (*frameCount == 0) {
104        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %d",
105                streamType, sampleRate);
106        return BAD_VALUE;
107    }
108    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%d, afSampleRate=%d, afLatency=%d",
109            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
110    return NO_ERROR;
111}
112
113// ---------------------------------------------------------------------------
114
115AudioTrack::AudioTrack()
116    : mStatus(NO_INIT),
117      mIsTimed(false),
118      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
119      mPreviousSchedulingGroup(SP_DEFAULT),
120      mPausedPosition(0)
121{
122    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
123    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
124    mAttributes.flags = 0x0;
125    strcpy(mAttributes.tags, "");
126}
127
128AudioTrack::AudioTrack(
129        audio_stream_type_t streamType,
130        uint32_t sampleRate,
131        audio_format_t format,
132        audio_channel_mask_t channelMask,
133        size_t frameCount,
134        audio_output_flags_t flags,
135        callback_t cbf,
136        void* user,
137        uint32_t notificationFrames,
138        int sessionId,
139        transfer_type transferType,
140        const audio_offload_info_t *offloadInfo,
141        int uid,
142        pid_t pid,
143        const audio_attributes_t* pAttributes)
144    : mStatus(NO_INIT),
145      mIsTimed(false),
146      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
147      mPreviousSchedulingGroup(SP_DEFAULT),
148      mPausedPosition(0)
149{
150    mStatus = set(streamType, sampleRate, format, channelMask,
151            frameCount, flags, cbf, user, notificationFrames,
152            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
153            offloadInfo, uid, pid, pAttributes);
154}
155
156AudioTrack::AudioTrack(
157        audio_stream_type_t streamType,
158        uint32_t sampleRate,
159        audio_format_t format,
160        audio_channel_mask_t channelMask,
161        const sp<IMemory>& sharedBuffer,
162        audio_output_flags_t flags,
163        callback_t cbf,
164        void* user,
165        uint32_t notificationFrames,
166        int sessionId,
167        transfer_type transferType,
168        const audio_offload_info_t *offloadInfo,
169        int uid,
170        pid_t pid,
171        const audio_attributes_t* pAttributes)
172    : mStatus(NO_INIT),
173      mIsTimed(false),
174      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
175      mPreviousSchedulingGroup(SP_DEFAULT),
176      mPausedPosition(0)
177{
178    mStatus = set(streamType, sampleRate, format, channelMask,
179            0 /*frameCount*/, flags, cbf, user, notificationFrames,
180            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
181            uid, pid, pAttributes);
182}
183
184AudioTrack::~AudioTrack()
185{
186    if (mStatus == NO_ERROR) {
187        // Make sure that callback function exits in the case where
188        // it is looping on buffer full condition in obtainBuffer().
189        // Otherwise the callback thread will never exit.
190        stop();
191        if (mAudioTrackThread != 0) {
192            mProxy->interrupt();
193            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
194            mAudioTrackThread->requestExitAndWait();
195            mAudioTrackThread.clear();
196        }
197        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
198        mAudioTrack.clear();
199        mCblkMemory.clear();
200        mSharedBuffer.clear();
201        IPCThreadState::self()->flushCommands();
202        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
203                IPCThreadState::self()->getCallingPid(), mClientPid);
204        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
205    }
206}
207
208status_t AudioTrack::set(
209        audio_stream_type_t streamType,
210        uint32_t sampleRate,
211        audio_format_t format,
212        audio_channel_mask_t channelMask,
213        size_t frameCount,
214        audio_output_flags_t flags,
215        callback_t cbf,
216        void* user,
217        uint32_t notificationFrames,
218        const sp<IMemory>& sharedBuffer,
219        bool threadCanCallJava,
220        int sessionId,
221        transfer_type transferType,
222        const audio_offload_info_t *offloadInfo,
223        int uid,
224        pid_t pid,
225        const audio_attributes_t* pAttributes)
226{
227    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
228          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
229          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
230          sessionId, transferType);
231
232    switch (transferType) {
233    case TRANSFER_DEFAULT:
234        if (sharedBuffer != 0) {
235            transferType = TRANSFER_SHARED;
236        } else if (cbf == NULL || threadCanCallJava) {
237            transferType = TRANSFER_SYNC;
238        } else {
239            transferType = TRANSFER_CALLBACK;
240        }
241        break;
242    case TRANSFER_CALLBACK:
243        if (cbf == NULL || sharedBuffer != 0) {
244            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
245            return BAD_VALUE;
246        }
247        break;
248    case TRANSFER_OBTAIN:
249    case TRANSFER_SYNC:
250        if (sharedBuffer != 0) {
251            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
252            return BAD_VALUE;
253        }
254        break;
255    case TRANSFER_SHARED:
256        if (sharedBuffer == 0) {
257            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
258            return BAD_VALUE;
259        }
260        break;
261    default:
262        ALOGE("Invalid transfer type %d", transferType);
263        return BAD_VALUE;
264    }
265    mSharedBuffer = sharedBuffer;
266    mTransfer = transferType;
267
268    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
269            sharedBuffer->size());
270
271    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
272
273    AutoMutex lock(mLock);
274
275    // invariant that mAudioTrack != 0 is true only after set() returns successfully
276    if (mAudioTrack != 0) {
277        ALOGE("Track already in use");
278        return INVALID_OPERATION;
279    }
280
281    // handle default values first.
282    if (streamType == AUDIO_STREAM_DEFAULT) {
283        streamType = AUDIO_STREAM_MUSIC;
284    }
285    if (pAttributes == NULL) {
286        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
287            ALOGE("Invalid stream type %d", streamType);
288            return BAD_VALUE;
289        }
290        mStreamType = streamType;
291
292    } else {
293        // stream type shouldn't be looked at, this track has audio attributes
294        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
295        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
296                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
297        mStreamType = AUDIO_STREAM_DEFAULT;
298    }
299
300    // these below should probably come from the audioFlinger too...
301    if (format == AUDIO_FORMAT_DEFAULT) {
302        format = AUDIO_FORMAT_PCM_16_BIT;
303    }
304
305    // validate parameters
306    if (!audio_is_valid_format(format)) {
307        ALOGE("Invalid format %#x", format);
308        return BAD_VALUE;
309    }
310    mFormat = format;
311
312    if (!audio_is_output_channel(channelMask)) {
313        ALOGE("Invalid channel mask %#x", channelMask);
314        return BAD_VALUE;
315    }
316    mChannelMask = channelMask;
317    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
318    mChannelCount = channelCount;
319
320    // AudioFlinger does not currently support 8-bit data in shared memory
321    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
322        ALOGE("8-bit data in shared memory is not supported");
323        return BAD_VALUE;
324    }
325
326    // force direct flag if format is not linear PCM
327    // or offload was requested
328    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
329            || !audio_is_linear_pcm(format)) {
330        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
331                    ? "Offload request, forcing to Direct Output"
332                    : "Not linear PCM, forcing to Direct Output");
333        flags = (audio_output_flags_t)
334                // FIXME why can't we allow direct AND fast?
335                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
336    }
337
338    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
339        if (audio_is_linear_pcm(format)) {
340            mFrameSize = channelCount * audio_bytes_per_sample(format);
341        } else {
342            mFrameSize = sizeof(uint8_t);
343        }
344        mFrameSizeAF = mFrameSize;
345    } else {
346        ALOG_ASSERT(audio_is_linear_pcm(format));
347        mFrameSize = channelCount * audio_bytes_per_sample(format);
348        mFrameSizeAF = channelCount * audio_bytes_per_sample(
349                format == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : format);
350        // createTrack will return an error if PCM format is not supported by server,
351        // so no need to check for specific PCM formats here
352    }
353
354    // sampling rate must be specified for direct outputs
355    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
356        return BAD_VALUE;
357    }
358    mSampleRate = sampleRate;
359
360    // Make copy of input parameter offloadInfo so that in the future:
361    //  (a) createTrack_l doesn't need it as an input parameter
362    //  (b) we can support re-creation of offloaded tracks
363    if (offloadInfo != NULL) {
364        mOffloadInfoCopy = *offloadInfo;
365        mOffloadInfo = &mOffloadInfoCopy;
366    } else {
367        mOffloadInfo = NULL;
368    }
369
370    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
371    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
372    mSendLevel = 0.0f;
373    // mFrameCount is initialized in createTrack_l
374    mReqFrameCount = frameCount;
375    mNotificationFramesReq = notificationFrames;
376    mNotificationFramesAct = 0;
377    if (sessionId == AUDIO_SESSION_ALLOCATE) {
378        mSessionId = AudioSystem::newAudioUniqueId();
379    } else {
380        mSessionId = sessionId;
381    }
382    int callingpid = IPCThreadState::self()->getCallingPid();
383    int mypid = getpid();
384    if (uid == -1 || (callingpid != mypid)) {
385        mClientUid = IPCThreadState::self()->getCallingUid();
386    } else {
387        mClientUid = uid;
388    }
389    if (pid == -1 || (callingpid != mypid)) {
390        mClientPid = callingpid;
391    } else {
392        mClientPid = pid;
393    }
394    mAuxEffectId = 0;
395    mFlags = flags;
396    mCbf = cbf;
397
398    if (cbf != NULL) {
399        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
400        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
401    }
402
403    // create the IAudioTrack
404    status_t status = createTrack_l();
405
406    if (status != NO_ERROR) {
407        if (mAudioTrackThread != 0) {
408            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
409            mAudioTrackThread->requestExitAndWait();
410            mAudioTrackThread.clear();
411        }
412        return status;
413    }
414
415    mStatus = NO_ERROR;
416    mState = STATE_STOPPED;
417    mUserData = user;
418    mLoopPeriod = 0;
419    mMarkerPosition = 0;
420    mMarkerReached = false;
421    mNewPosition = 0;
422    mUpdatePeriod = 0;
423    mServer = 0;
424    mPosition = 0;
425    mReleased = 0;
426    mStartUs = 0;
427    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
428    mSequence = 1;
429    mObservedSequence = mSequence;
430    mInUnderrun = false;
431
432    return NO_ERROR;
433}
434
435// -------------------------------------------------------------------------
436
437status_t AudioTrack::start()
438{
439    AutoMutex lock(mLock);
440
441    if (mState == STATE_ACTIVE) {
442        return INVALID_OPERATION;
443    }
444
445    mInUnderrun = true;
446
447    State previousState = mState;
448    if (previousState == STATE_PAUSED_STOPPING) {
449        mState = STATE_STOPPING;
450    } else {
451        mState = STATE_ACTIVE;
452    }
453    (void) updateAndGetPosition_l();
454    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
455        // reset current position as seen by client to 0
456        mPosition = 0;
457        // For offloaded tracks, we don't know if the hardware counters are really zero here,
458        // since the flush is asynchronous and stop may not fully drain.
459        // We save the time when the track is started to later verify whether
460        // the counters are realistic (i.e. start from zero after this time).
461        mStartUs = getNowUs();
462
463        // force refresh of remaining frames by processAudioBuffer() as last
464        // write before stop could be partial.
465        mRefreshRemaining = true;
466    }
467    mNewPosition = mPosition + mUpdatePeriod;
468    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
469
470    sp<AudioTrackThread> t = mAudioTrackThread;
471    if (t != 0) {
472        if (previousState == STATE_STOPPING) {
473            mProxy->interrupt();
474        } else {
475            t->resume();
476        }
477    } else {
478        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
479        get_sched_policy(0, &mPreviousSchedulingGroup);
480        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
481    }
482
483    status_t status = NO_ERROR;
484    if (!(flags & CBLK_INVALID)) {
485        status = mAudioTrack->start();
486        if (status == DEAD_OBJECT) {
487            flags |= CBLK_INVALID;
488        }
489    }
490    if (flags & CBLK_INVALID) {
491        status = restoreTrack_l("start");
492    }
493
494    if (status != NO_ERROR) {
495        ALOGE("start() status %d", status);
496        mState = previousState;
497        if (t != 0) {
498            if (previousState != STATE_STOPPING) {
499                t->pause();
500            }
501        } else {
502            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
503            set_sched_policy(0, mPreviousSchedulingGroup);
504        }
505    }
506
507    return status;
508}
509
510void AudioTrack::stop()
511{
512    AutoMutex lock(mLock);
513    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
514        return;
515    }
516
517    if (isOffloaded_l()) {
518        mState = STATE_STOPPING;
519    } else {
520        mState = STATE_STOPPED;
521        mReleased = 0;
522    }
523
524    mProxy->interrupt();
525    mAudioTrack->stop();
526    // the playback head position will reset to 0, so if a marker is set, we need
527    // to activate it again
528    mMarkerReached = false;
529#if 0
530    // Force flush if a shared buffer is used otherwise audioflinger
531    // will not stop before end of buffer is reached.
532    // It may be needed to make sure that we stop playback, likely in case looping is on.
533    if (mSharedBuffer != 0) {
534        flush_l();
535    }
536#endif
537
538    sp<AudioTrackThread> t = mAudioTrackThread;
539    if (t != 0) {
540        if (!isOffloaded_l()) {
541            t->pause();
542        }
543    } else {
544        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
545        set_sched_policy(0, mPreviousSchedulingGroup);
546    }
547}
548
549bool AudioTrack::stopped() const
550{
551    AutoMutex lock(mLock);
552    return mState != STATE_ACTIVE;
553}
554
555void AudioTrack::flush()
556{
557    if (mSharedBuffer != 0) {
558        return;
559    }
560    AutoMutex lock(mLock);
561    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
562        return;
563    }
564    flush_l();
565}
566
567void AudioTrack::flush_l()
568{
569    ALOG_ASSERT(mState != STATE_ACTIVE);
570
571    // clear playback marker and periodic update counter
572    mMarkerPosition = 0;
573    mMarkerReached = false;
574    mUpdatePeriod = 0;
575    mRefreshRemaining = true;
576
577    mState = STATE_FLUSHED;
578    mReleased = 0;
579    if (isOffloaded_l()) {
580        mProxy->interrupt();
581    }
582    mProxy->flush();
583    mAudioTrack->flush();
584}
585
586void AudioTrack::pause()
587{
588    AutoMutex lock(mLock);
589    if (mState == STATE_ACTIVE) {
590        mState = STATE_PAUSED;
591    } else if (mState == STATE_STOPPING) {
592        mState = STATE_PAUSED_STOPPING;
593    } else {
594        return;
595    }
596    mProxy->interrupt();
597    mAudioTrack->pause();
598
599    if (isOffloaded_l()) {
600        if (mOutput != AUDIO_IO_HANDLE_NONE) {
601            // An offload output can be re-used between two audio tracks having
602            // the same configuration. A timestamp query for a paused track
603            // while the other is running would return an incorrect time.
604            // To fix this, cache the playback position on a pause() and return
605            // this time when requested until the track is resumed.
606
607            // OffloadThread sends HAL pause in its threadLoop. Time saved
608            // here can be slightly off.
609
610            // TODO: check return code for getRenderPosition.
611
612            uint32_t halFrames;
613            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
614            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
615        }
616    }
617}
618
619status_t AudioTrack::setVolume(float left, float right)
620{
621    // This duplicates a test by AudioTrack JNI, but that is not the only caller
622    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
623            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
624        return BAD_VALUE;
625    }
626
627    AutoMutex lock(mLock);
628    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
629    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
630
631    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
632
633    if (isOffloaded_l()) {
634        mAudioTrack->signal();
635    }
636    return NO_ERROR;
637}
638
639status_t AudioTrack::setVolume(float volume)
640{
641    return setVolume(volume, volume);
642}
643
644status_t AudioTrack::setAuxEffectSendLevel(float level)
645{
646    // This duplicates a test by AudioTrack JNI, but that is not the only caller
647    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
648        return BAD_VALUE;
649    }
650
651    AutoMutex lock(mLock);
652    mSendLevel = level;
653    mProxy->setSendLevel(level);
654
655    return NO_ERROR;
656}
657
658void AudioTrack::getAuxEffectSendLevel(float* level) const
659{
660    if (level != NULL) {
661        *level = mSendLevel;
662    }
663}
664
665status_t AudioTrack::setSampleRate(uint32_t rate)
666{
667    if (mIsTimed || isOffloadedOrDirect()) {
668        return INVALID_OPERATION;
669    }
670
671    AutoMutex lock(mLock);
672    if (mOutput == AUDIO_IO_HANDLE_NONE) {
673        return NO_INIT;
674    }
675    uint32_t afSamplingRate;
676    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
677        return NO_INIT;
678    }
679    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
680        return BAD_VALUE;
681    }
682
683    mSampleRate = rate;
684    mProxy->setSampleRate(rate);
685
686    return NO_ERROR;
687}
688
689uint32_t AudioTrack::getSampleRate() const
690{
691    if (mIsTimed) {
692        return 0;
693    }
694
695    AutoMutex lock(mLock);
696
697    // sample rate can be updated during playback by the offloaded decoder so we need to
698    // query the HAL and update if needed.
699// FIXME use Proxy return channel to update the rate from server and avoid polling here
700    if (isOffloadedOrDirect_l()) {
701        if (mOutput != AUDIO_IO_HANDLE_NONE) {
702            uint32_t sampleRate = 0;
703            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
704            if (status == NO_ERROR) {
705                mSampleRate = sampleRate;
706            }
707        }
708    }
709    return mSampleRate;
710}
711
712status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
713{
714    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
715        return INVALID_OPERATION;
716    }
717
718    if (loopCount == 0) {
719        ;
720    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
721            loopEnd - loopStart >= MIN_LOOP) {
722        ;
723    } else {
724        return BAD_VALUE;
725    }
726
727    AutoMutex lock(mLock);
728    // See setPosition() regarding setting parameters such as loop points or position while active
729    if (mState == STATE_ACTIVE) {
730        return INVALID_OPERATION;
731    }
732    setLoop_l(loopStart, loopEnd, loopCount);
733    return NO_ERROR;
734}
735
736void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
737{
738    // Setting the loop will reset next notification update period (like setPosition).
739    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
740    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
741    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
742}
743
744status_t AudioTrack::setMarkerPosition(uint32_t marker)
745{
746    // The only purpose of setting marker position is to get a callback
747    if (mCbf == NULL || isOffloadedOrDirect()) {
748        return INVALID_OPERATION;
749    }
750
751    AutoMutex lock(mLock);
752    mMarkerPosition = marker;
753    mMarkerReached = false;
754
755    return NO_ERROR;
756}
757
758status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
759{
760    if (isOffloadedOrDirect()) {
761        return INVALID_OPERATION;
762    }
763    if (marker == NULL) {
764        return BAD_VALUE;
765    }
766
767    AutoMutex lock(mLock);
768    *marker = mMarkerPosition;
769
770    return NO_ERROR;
771}
772
773status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
774{
775    // The only purpose of setting position update period is to get a callback
776    if (mCbf == NULL || isOffloadedOrDirect()) {
777        return INVALID_OPERATION;
778    }
779
780    AutoMutex lock(mLock);
781    mNewPosition = updateAndGetPosition_l() + updatePeriod;
782    mUpdatePeriod = updatePeriod;
783
784    return NO_ERROR;
785}
786
787status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
788{
789    if (isOffloadedOrDirect()) {
790        return INVALID_OPERATION;
791    }
792    if (updatePeriod == NULL) {
793        return BAD_VALUE;
794    }
795
796    AutoMutex lock(mLock);
797    *updatePeriod = mUpdatePeriod;
798
799    return NO_ERROR;
800}
801
802status_t AudioTrack::setPosition(uint32_t position)
803{
804    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
805        return INVALID_OPERATION;
806    }
807    if (position > mFrameCount) {
808        return BAD_VALUE;
809    }
810
811    AutoMutex lock(mLock);
812    // Currently we require that the player is inactive before setting parameters such as position
813    // or loop points.  Otherwise, there could be a race condition: the application could read the
814    // current position, compute a new position or loop parameters, and then set that position or
815    // loop parameters but it would do the "wrong" thing since the position has continued to advance
816    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
817    // to specify how it wants to handle such scenarios.
818    if (mState == STATE_ACTIVE) {
819        return INVALID_OPERATION;
820    }
821    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
822    mLoopPeriod = 0;
823    // FIXME Check whether loops and setting position are incompatible in old code.
824    // If we use setLoop for both purposes we lose the capability to set the position while looping.
825    mStaticProxy->setLoop(position, mFrameCount, 0);
826
827    return NO_ERROR;
828}
829
830status_t AudioTrack::getPosition(uint32_t *position)
831{
832    if (position == NULL) {
833        return BAD_VALUE;
834    }
835
836    AutoMutex lock(mLock);
837    if (isOffloadedOrDirect_l()) {
838        uint32_t dspFrames = 0;
839
840        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
841            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
842            *position = mPausedPosition;
843            return NO_ERROR;
844        }
845
846        if (mOutput != AUDIO_IO_HANDLE_NONE) {
847            uint32_t halFrames;
848            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
849        }
850        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
851        // due to hardware latency. We leave this behavior for now.
852        *position = dspFrames;
853    } else {
854        if (mCblk->mFlags & CBLK_INVALID) {
855            restoreTrack_l("getPosition");
856        }
857
858        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
859        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
860                0 : updateAndGetPosition_l();
861    }
862    return NO_ERROR;
863}
864
865status_t AudioTrack::getBufferPosition(uint32_t *position)
866{
867    if (mSharedBuffer == 0 || mIsTimed) {
868        return INVALID_OPERATION;
869    }
870    if (position == NULL) {
871        return BAD_VALUE;
872    }
873
874    AutoMutex lock(mLock);
875    *position = mStaticProxy->getBufferPosition();
876    return NO_ERROR;
877}
878
879status_t AudioTrack::reload()
880{
881    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
882        return INVALID_OPERATION;
883    }
884
885    AutoMutex lock(mLock);
886    // See setPosition() regarding setting parameters such as loop points or position while active
887    if (mState == STATE_ACTIVE) {
888        return INVALID_OPERATION;
889    }
890    mNewPosition = mUpdatePeriod;
891    mLoopPeriod = 0;
892    // FIXME The new code cannot reload while keeping a loop specified.
893    // Need to check how the old code handled this, and whether it's a significant change.
894    mStaticProxy->setLoop(0, mFrameCount, 0);
895    return NO_ERROR;
896}
897
898audio_io_handle_t AudioTrack::getOutput() const
899{
900    AutoMutex lock(mLock);
901    return mOutput;
902}
903
904status_t AudioTrack::attachAuxEffect(int effectId)
905{
906    AutoMutex lock(mLock);
907    status_t status = mAudioTrack->attachAuxEffect(effectId);
908    if (status == NO_ERROR) {
909        mAuxEffectId = effectId;
910    }
911    return status;
912}
913
914audio_stream_type_t AudioTrack::streamType() const
915{
916    if (mStreamType == AUDIO_STREAM_DEFAULT) {
917        return audio_attributes_to_stream_type(&mAttributes);
918    }
919    return mStreamType;
920}
921
922// -------------------------------------------------------------------------
923
924// must be called with mLock held
925status_t AudioTrack::createTrack_l()
926{
927    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
928    if (audioFlinger == 0) {
929        ALOGE("Could not get audioflinger");
930        return NO_INIT;
931    }
932
933    audio_io_handle_t output;
934    audio_stream_type_t streamType = mStreamType;
935    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
936    status_t status = AudioSystem::getOutputForAttr(attr, &output,
937                                                    (audio_session_t)mSessionId, &streamType,
938                                                    mSampleRate, mFormat, mChannelMask,
939                                                    mFlags, mOffloadInfo);
940
941
942    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
943        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
944              " channel mask %#x, flags %#x",
945              streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
946        return BAD_VALUE;
947    }
948    {
949    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
950    // we must release it ourselves if anything goes wrong.
951
952    // Not all of these values are needed under all conditions, but it is easier to get them all
953
954    uint32_t afLatency;
955    status = AudioSystem::getLatency(output, &afLatency);
956    if (status != NO_ERROR) {
957        ALOGE("getLatency(%d) failed status %d", output, status);
958        goto release;
959    }
960
961    size_t afFrameCount;
962    status = AudioSystem::getFrameCount(output, &afFrameCount);
963    if (status != NO_ERROR) {
964        ALOGE("getFrameCount(output=%d) status %d", output, status);
965        goto release;
966    }
967
968    uint32_t afSampleRate;
969    status = AudioSystem::getSamplingRate(output, &afSampleRate);
970    if (status != NO_ERROR) {
971        ALOGE("getSamplingRate(output=%d) status %d", output, status);
972        goto release;
973    }
974    if (mSampleRate == 0) {
975        mSampleRate = afSampleRate;
976    }
977    // Client decides whether the track is TIMED (see below), but can only express a preference
978    // for FAST.  Server will perform additional tests.
979    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
980            // either of these use cases:
981            // use case 1: shared buffer
982            (mSharedBuffer != 0) ||
983            // use case 2: callback transfer mode
984            (mTransfer == TRANSFER_CALLBACK)) &&
985            // matching sample rate
986            (mSampleRate == afSampleRate))) {
987        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
988        // once denied, do not request again if IAudioTrack is re-created
989        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
990    }
991    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
992
993    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
994    //  n = 1   fast track with single buffering; nBuffering is ignored
995    //  n = 2   fast track with double buffering
996    //  n = 2   normal track, no sample rate conversion
997    //  n = 3   normal track, with sample rate conversion
998    //          (pessimistic; some non-1:1 conversion ratios don't actually need triple-buffering)
999    //  n > 3   very high latency or very small notification interval; nBuffering is ignored
1000    const uint32_t nBuffering = (mSampleRate == afSampleRate) ? 2 : 3;
1001
1002    mNotificationFramesAct = mNotificationFramesReq;
1003
1004    size_t frameCount = mReqFrameCount;
1005    if (!audio_is_linear_pcm(mFormat)) {
1006
1007        if (mSharedBuffer != 0) {
1008            // Same comment as below about ignoring frameCount parameter for set()
1009            frameCount = mSharedBuffer->size();
1010        } else if (frameCount == 0) {
1011            frameCount = afFrameCount;
1012        }
1013        if (mNotificationFramesAct != frameCount) {
1014            mNotificationFramesAct = frameCount;
1015        }
1016    } else if (mSharedBuffer != 0) {
1017
1018        // Ensure that buffer alignment matches channel count
1019        // 8-bit data in shared memory is not currently supported by AudioFlinger
1020        size_t alignment = audio_bytes_per_sample(
1021                mFormat == AUDIO_FORMAT_PCM_8_BIT ? AUDIO_FORMAT_PCM_16_BIT : mFormat);
1022        if (alignment & 1) {
1023            alignment = 1;
1024        }
1025        if (mChannelCount > 1) {
1026            // More than 2 channels does not require stronger alignment than stereo
1027            alignment <<= 1;
1028        }
1029        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1030            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1031                    mSharedBuffer->pointer(), mChannelCount);
1032            status = BAD_VALUE;
1033            goto release;
1034        }
1035
1036        // When initializing a shared buffer AudioTrack via constructors,
1037        // there's no frameCount parameter.
1038        // But when initializing a shared buffer AudioTrack via set(),
1039        // there _is_ a frameCount parameter.  We silently ignore it.
1040        frameCount = mSharedBuffer->size() / mFrameSizeAF;
1041
1042    } else if (!(mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
1043
1044        // FIXME move these calculations and associated checks to server
1045
1046        // Ensure that buffer depth covers at least audio hardware latency
1047        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
1048        ALOGV("afFrameCount=%zu, minBufCount=%d, afSampleRate=%u, afLatency=%d",
1049                afFrameCount, minBufCount, afSampleRate, afLatency);
1050        if (minBufCount <= nBuffering) {
1051            minBufCount = nBuffering;
1052        }
1053
1054        size_t minFrameCount = afFrameCount * minBufCount * uint64_t(mSampleRate) / afSampleRate;
1055        ALOGV("minFrameCount: %zu, afFrameCount=%zu, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
1056                ", afLatency=%d",
1057                minFrameCount, afFrameCount, minBufCount, mSampleRate, afSampleRate, afLatency);
1058
1059        if (frameCount == 0) {
1060            frameCount = minFrameCount;
1061        } else if (frameCount < minFrameCount) {
1062            // not ALOGW because it happens all the time when playing key clicks over A2DP
1063            ALOGV("Minimum buffer size corrected from %zu to %zu",
1064                     frameCount, minFrameCount);
1065            frameCount = minFrameCount;
1066        }
1067        // Make sure that application is notified with sufficient margin before underrun
1068        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1069            mNotificationFramesAct = frameCount/nBuffering;
1070        }
1071
1072    } else {
1073        // For fast tracks, the frame count calculations and checks are done by server
1074    }
1075
1076    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1077    if (mIsTimed) {
1078        trackFlags |= IAudioFlinger::TRACK_TIMED;
1079    }
1080
1081    pid_t tid = -1;
1082    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1083        trackFlags |= IAudioFlinger::TRACK_FAST;
1084        if (mAudioTrackThread != 0) {
1085            tid = mAudioTrackThread->getTid();
1086        }
1087    }
1088
1089    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1090        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1091    }
1092
1093    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1094        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1095    }
1096
1097    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1098                                // but we will still need the original value also
1099    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1100                                                      mSampleRate,
1101                                                      // AudioFlinger only sees 16-bit PCM
1102                                                      mFormat == AUDIO_FORMAT_PCM_8_BIT &&
1103                                                          !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT) ?
1104                                                              AUDIO_FORMAT_PCM_16_BIT : mFormat,
1105                                                      mChannelMask,
1106                                                      &temp,
1107                                                      &trackFlags,
1108                                                      mSharedBuffer,
1109                                                      output,
1110                                                      tid,
1111                                                      &mSessionId,
1112                                                      mClientUid,
1113                                                      &status);
1114
1115    if (status != NO_ERROR) {
1116        ALOGE("AudioFlinger could not create track, status: %d", status);
1117        goto release;
1118    }
1119    ALOG_ASSERT(track != 0);
1120
1121    // AudioFlinger now owns the reference to the I/O handle,
1122    // so we are no longer responsible for releasing it.
1123
1124    sp<IMemory> iMem = track->getCblk();
1125    if (iMem == 0) {
1126        ALOGE("Could not get control block");
1127        return NO_INIT;
1128    }
1129    void *iMemPointer = iMem->pointer();
1130    if (iMemPointer == NULL) {
1131        ALOGE("Could not get control block pointer");
1132        return NO_INIT;
1133    }
1134    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1135    if (mAudioTrack != 0) {
1136        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
1137        mDeathNotifier.clear();
1138    }
1139    mAudioTrack = track;
1140    mCblkMemory = iMem;
1141    IPCThreadState::self()->flushCommands();
1142
1143    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1144    mCblk = cblk;
1145    // note that temp is the (possibly revised) value of frameCount
1146    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1147        // In current design, AudioTrack client checks and ensures frame count validity before
1148        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1149        // for fast track as it uses a special method of assigning frame count.
1150        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1151    }
1152    frameCount = temp;
1153
1154    mAwaitBoost = false;
1155    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1156        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1157            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1158            mAwaitBoost = true;
1159            if (mSharedBuffer == 0) {
1160                // Theoretically double-buffering is not required for fast tracks,
1161                // due to tighter scheduling.  But in practice, to accommodate kernels with
1162                // scheduling jitter, and apps with computation jitter, we use double-buffering.
1163                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1164                    mNotificationFramesAct = frameCount/nBuffering;
1165                }
1166            }
1167        } else {
1168            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1169            // once denied, do not request again if IAudioTrack is re-created
1170            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1171            if (mSharedBuffer == 0) {
1172                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/nBuffering) {
1173                    mNotificationFramesAct = frameCount/nBuffering;
1174                }
1175            }
1176        }
1177    }
1178    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1179        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1180            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1181        } else {
1182            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1183            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1184            // FIXME This is a warning, not an error, so don't return error status
1185            //return NO_INIT;
1186        }
1187    }
1188    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1189        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1190            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1191        } else {
1192            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1193            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1194            // FIXME This is a warning, not an error, so don't return error status
1195            //return NO_INIT;
1196        }
1197    }
1198
1199    // We retain a copy of the I/O handle, but don't own the reference
1200    mOutput = output;
1201    mRefreshRemaining = true;
1202
1203    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1204    // is the value of pointer() for the shared buffer, otherwise buffers points
1205    // immediately after the control block.  This address is for the mapping within client
1206    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1207    void* buffers;
1208    if (mSharedBuffer == 0) {
1209        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1210    } else {
1211        buffers = mSharedBuffer->pointer();
1212    }
1213
1214    mAudioTrack->attachAuxEffect(mAuxEffectId);
1215    // FIXME don't believe this lie
1216    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1217
1218    mFrameCount = frameCount;
1219    // If IAudioTrack is re-created, don't let the requested frameCount
1220    // decrease.  This can confuse clients that cache frameCount().
1221    if (frameCount > mReqFrameCount) {
1222        mReqFrameCount = frameCount;
1223    }
1224
1225    // update proxy
1226    if (mSharedBuffer == 0) {
1227        mStaticProxy.clear();
1228        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1229    } else {
1230        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
1231        mProxy = mStaticProxy;
1232    }
1233
1234    mProxy->setVolumeLR(gain_minifloat_pack(
1235            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1236            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1237
1238    mProxy->setSendLevel(mSendLevel);
1239    mProxy->setSampleRate(mSampleRate);
1240    mProxy->setMinimum(mNotificationFramesAct);
1241
1242    mDeathNotifier = new DeathNotifier(this);
1243    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
1244
1245    return NO_ERROR;
1246    }
1247
1248release:
1249    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1250    if (status == NO_ERROR) {
1251        status = NO_INIT;
1252    }
1253    return status;
1254}
1255
1256status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
1257{
1258    if (audioBuffer == NULL) {
1259        return BAD_VALUE;
1260    }
1261    if (mTransfer != TRANSFER_OBTAIN) {
1262        audioBuffer->frameCount = 0;
1263        audioBuffer->size = 0;
1264        audioBuffer->raw = NULL;
1265        return INVALID_OPERATION;
1266    }
1267
1268    const struct timespec *requested;
1269    struct timespec timeout;
1270    if (waitCount == -1) {
1271        requested = &ClientProxy::kForever;
1272    } else if (waitCount == 0) {
1273        requested = &ClientProxy::kNonBlocking;
1274    } else if (waitCount > 0) {
1275        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1276        timeout.tv_sec = ms / 1000;
1277        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1278        requested = &timeout;
1279    } else {
1280        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1281        requested = NULL;
1282    }
1283    return obtainBuffer(audioBuffer, requested);
1284}
1285
1286status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1287        struct timespec *elapsed, size_t *nonContig)
1288{
1289    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1290    uint32_t oldSequence = 0;
1291    uint32_t newSequence;
1292
1293    Proxy::Buffer buffer;
1294    status_t status = NO_ERROR;
1295
1296    static const int32_t kMaxTries = 5;
1297    int32_t tryCounter = kMaxTries;
1298
1299    do {
1300        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1301        // keep them from going away if another thread re-creates the track during obtainBuffer()
1302        sp<AudioTrackClientProxy> proxy;
1303        sp<IMemory> iMem;
1304
1305        {   // start of lock scope
1306            AutoMutex lock(mLock);
1307
1308            newSequence = mSequence;
1309            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1310            if (status == DEAD_OBJECT) {
1311                // re-create track, unless someone else has already done so
1312                if (newSequence == oldSequence) {
1313                    status = restoreTrack_l("obtainBuffer");
1314                    if (status != NO_ERROR) {
1315                        buffer.mFrameCount = 0;
1316                        buffer.mRaw = NULL;
1317                        buffer.mNonContig = 0;
1318                        break;
1319                    }
1320                }
1321            }
1322            oldSequence = newSequence;
1323
1324            // Keep the extra references
1325            proxy = mProxy;
1326            iMem = mCblkMemory;
1327
1328            if (mState == STATE_STOPPING) {
1329                status = -EINTR;
1330                buffer.mFrameCount = 0;
1331                buffer.mRaw = NULL;
1332                buffer.mNonContig = 0;
1333                break;
1334            }
1335
1336            // Non-blocking if track is stopped or paused
1337            if (mState != STATE_ACTIVE) {
1338                requested = &ClientProxy::kNonBlocking;
1339            }
1340
1341        }   // end of lock scope
1342
1343        buffer.mFrameCount = audioBuffer->frameCount;
1344        // FIXME starts the requested timeout and elapsed over from scratch
1345        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1346
1347    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1348
1349    audioBuffer->frameCount = buffer.mFrameCount;
1350    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1351    audioBuffer->raw = buffer.mRaw;
1352    if (nonContig != NULL) {
1353        *nonContig = buffer.mNonContig;
1354    }
1355    return status;
1356}
1357
1358void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1359{
1360    if (mTransfer == TRANSFER_SHARED) {
1361        return;
1362    }
1363
1364    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1365    if (stepCount == 0) {
1366        return;
1367    }
1368
1369    Proxy::Buffer buffer;
1370    buffer.mFrameCount = stepCount;
1371    buffer.mRaw = audioBuffer->raw;
1372
1373    AutoMutex lock(mLock);
1374    mReleased += stepCount;
1375    mInUnderrun = false;
1376    mProxy->releaseBuffer(&buffer);
1377
1378    // restart track if it was disabled by audioflinger due to previous underrun
1379    if (mState == STATE_ACTIVE) {
1380        audio_track_cblk_t* cblk = mCblk;
1381        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1382            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1383            // FIXME ignoring status
1384            mAudioTrack->start();
1385        }
1386    }
1387}
1388
1389// -------------------------------------------------------------------------
1390
1391ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1392{
1393    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1394        return INVALID_OPERATION;
1395    }
1396
1397    if (isDirect()) {
1398        AutoMutex lock(mLock);
1399        int32_t flags = android_atomic_and(
1400                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1401                            &mCblk->mFlags);
1402        if (flags & CBLK_INVALID) {
1403            return DEAD_OBJECT;
1404        }
1405    }
1406
1407    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1408        // Sanity-check: user is most-likely passing an error code, and it would
1409        // make the return value ambiguous (actualSize vs error).
1410        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1411        return BAD_VALUE;
1412    }
1413
1414    size_t written = 0;
1415    Buffer audioBuffer;
1416
1417    while (userSize >= mFrameSize) {
1418        audioBuffer.frameCount = userSize / mFrameSize;
1419
1420        status_t err = obtainBuffer(&audioBuffer,
1421                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1422        if (err < 0) {
1423            if (written > 0) {
1424                break;
1425            }
1426            return ssize_t(err);
1427        }
1428
1429        size_t toWrite;
1430        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1431            // Divide capacity by 2 to take expansion into account
1432            toWrite = audioBuffer.size >> 1;
1433            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1434        } else {
1435            toWrite = audioBuffer.size;
1436            memcpy(audioBuffer.i8, buffer, toWrite);
1437        }
1438        buffer = ((const char *) buffer) + toWrite;
1439        userSize -= toWrite;
1440        written += toWrite;
1441
1442        releaseBuffer(&audioBuffer);
1443    }
1444
1445    return written;
1446}
1447
1448// -------------------------------------------------------------------------
1449
1450TimedAudioTrack::TimedAudioTrack() {
1451    mIsTimed = true;
1452}
1453
1454status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1455{
1456    AutoMutex lock(mLock);
1457    status_t result = UNKNOWN_ERROR;
1458
1459#if 1
1460    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1461    // while we are accessing the cblk
1462    sp<IAudioTrack> audioTrack = mAudioTrack;
1463    sp<IMemory> iMem = mCblkMemory;
1464#endif
1465
1466    // If the track is not invalid already, try to allocate a buffer.  alloc
1467    // fails indicating that the server is dead, flag the track as invalid so
1468    // we can attempt to restore in just a bit.
1469    audio_track_cblk_t* cblk = mCblk;
1470    if (!(cblk->mFlags & CBLK_INVALID)) {
1471        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1472        if (result == DEAD_OBJECT) {
1473            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1474        }
1475    }
1476
1477    // If the track is invalid at this point, attempt to restore it. and try the
1478    // allocation one more time.
1479    if (cblk->mFlags & CBLK_INVALID) {
1480        result = restoreTrack_l("allocateTimedBuffer");
1481
1482        if (result == NO_ERROR) {
1483            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1484        }
1485    }
1486
1487    return result;
1488}
1489
1490status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1491                                           int64_t pts)
1492{
1493    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1494    {
1495        AutoMutex lock(mLock);
1496        audio_track_cblk_t* cblk = mCblk;
1497        // restart track if it was disabled by audioflinger due to previous underrun
1498        if (buffer->size() != 0 && status == NO_ERROR &&
1499                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1500            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1501            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1502            // FIXME ignoring status
1503            mAudioTrack->start();
1504        }
1505    }
1506    return status;
1507}
1508
1509status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1510                                                TargetTimeline target)
1511{
1512    return mAudioTrack->setMediaTimeTransform(xform, target);
1513}
1514
1515// -------------------------------------------------------------------------
1516
1517nsecs_t AudioTrack::processAudioBuffer()
1518{
1519    // Currently the AudioTrack thread is not created if there are no callbacks.
1520    // Would it ever make sense to run the thread, even without callbacks?
1521    // If so, then replace this by checks at each use for mCbf != NULL.
1522    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1523
1524    mLock.lock();
1525    if (mAwaitBoost) {
1526        mAwaitBoost = false;
1527        mLock.unlock();
1528        static const int32_t kMaxTries = 5;
1529        int32_t tryCounter = kMaxTries;
1530        uint32_t pollUs = 10000;
1531        do {
1532            int policy = sched_getscheduler(0);
1533            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1534                break;
1535            }
1536            usleep(pollUs);
1537            pollUs <<= 1;
1538        } while (tryCounter-- > 0);
1539        if (tryCounter < 0) {
1540            ALOGE("did not receive expected priority boost on time");
1541        }
1542        // Run again immediately
1543        return 0;
1544    }
1545
1546    // Can only reference mCblk while locked
1547    int32_t flags = android_atomic_and(
1548        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1549
1550    // Check for track invalidation
1551    if (flags & CBLK_INVALID) {
1552        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1553        // AudioSystem cache. We should not exit here but after calling the callback so
1554        // that the upper layers can recreate the track
1555        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1556            status_t status = restoreTrack_l("processAudioBuffer");
1557            mLock.unlock();
1558            // Run again immediately, but with a new IAudioTrack
1559            return 0;
1560        }
1561    }
1562
1563    bool waitStreamEnd = mState == STATE_STOPPING;
1564    bool active = mState == STATE_ACTIVE;
1565
1566    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1567    bool newUnderrun = false;
1568    if (flags & CBLK_UNDERRUN) {
1569#if 0
1570        // Currently in shared buffer mode, when the server reaches the end of buffer,
1571        // the track stays active in continuous underrun state.  It's up to the application
1572        // to pause or stop the track, or set the position to a new offset within buffer.
1573        // This was some experimental code to auto-pause on underrun.   Keeping it here
1574        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1575        if (mTransfer == TRANSFER_SHARED) {
1576            mState = STATE_PAUSED;
1577            active = false;
1578        }
1579#endif
1580        if (!mInUnderrun) {
1581            mInUnderrun = true;
1582            newUnderrun = true;
1583        }
1584    }
1585
1586    // Get current position of server
1587    size_t position = updateAndGetPosition_l();
1588
1589    // Manage marker callback
1590    bool markerReached = false;
1591    size_t markerPosition = mMarkerPosition;
1592    // FIXME fails for wraparound, need 64 bits
1593    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1594        mMarkerReached = markerReached = true;
1595    }
1596
1597    // Determine number of new position callback(s) that will be needed, while locked
1598    size_t newPosCount = 0;
1599    size_t newPosition = mNewPosition;
1600    size_t updatePeriod = mUpdatePeriod;
1601    // FIXME fails for wraparound, need 64 bits
1602    if (updatePeriod > 0 && position >= newPosition) {
1603        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1604        mNewPosition += updatePeriod * newPosCount;
1605    }
1606
1607    // Cache other fields that will be needed soon
1608    uint32_t loopPeriod = mLoopPeriod;
1609    uint32_t sampleRate = mSampleRate;
1610    uint32_t notificationFrames = mNotificationFramesAct;
1611    if (mRefreshRemaining) {
1612        mRefreshRemaining = false;
1613        mRemainingFrames = notificationFrames;
1614        mRetryOnPartialBuffer = false;
1615    }
1616    size_t misalignment = mProxy->getMisalignment();
1617    uint32_t sequence = mSequence;
1618    sp<AudioTrackClientProxy> proxy = mProxy;
1619
1620    // These fields don't need to be cached, because they are assigned only by set():
1621    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1622    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1623
1624    mLock.unlock();
1625
1626    if (waitStreamEnd) {
1627        struct timespec timeout;
1628        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1629        timeout.tv_nsec = 0;
1630
1631        status_t status = proxy->waitStreamEndDone(&timeout);
1632        switch (status) {
1633        case NO_ERROR:
1634        case DEAD_OBJECT:
1635        case TIMED_OUT:
1636            mCbf(EVENT_STREAM_END, mUserData, NULL);
1637            {
1638                AutoMutex lock(mLock);
1639                // The previously assigned value of waitStreamEnd is no longer valid,
1640                // since the mutex has been unlocked and either the callback handler
1641                // or another thread could have re-started the AudioTrack during that time.
1642                waitStreamEnd = mState == STATE_STOPPING;
1643                if (waitStreamEnd) {
1644                    mState = STATE_STOPPED;
1645                    mReleased = 0;
1646                }
1647            }
1648            if (waitStreamEnd && status != DEAD_OBJECT) {
1649               return NS_INACTIVE;
1650            }
1651            break;
1652        }
1653        return 0;
1654    }
1655
1656    // perform callbacks while unlocked
1657    if (newUnderrun) {
1658        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1659    }
1660    // FIXME we will miss loops if loop cycle was signaled several times since last call
1661    //       to processAudioBuffer()
1662    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1663        mCbf(EVENT_LOOP_END, mUserData, NULL);
1664    }
1665    if (flags & CBLK_BUFFER_END) {
1666        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1667    }
1668    if (markerReached) {
1669        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1670    }
1671    while (newPosCount > 0) {
1672        size_t temp = newPosition;
1673        mCbf(EVENT_NEW_POS, mUserData, &temp);
1674        newPosition += updatePeriod;
1675        newPosCount--;
1676    }
1677
1678    if (mObservedSequence != sequence) {
1679        mObservedSequence = sequence;
1680        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1681        // for offloaded tracks, just wait for the upper layers to recreate the track
1682        if (isOffloadedOrDirect()) {
1683            return NS_INACTIVE;
1684        }
1685    }
1686
1687    // if inactive, then don't run me again until re-started
1688    if (!active) {
1689        return NS_INACTIVE;
1690    }
1691
1692    // Compute the estimated time until the next timed event (position, markers, loops)
1693    // FIXME only for non-compressed audio
1694    uint32_t minFrames = ~0;
1695    if (!markerReached && position < markerPosition) {
1696        minFrames = markerPosition - position;
1697    }
1698    if (loopPeriod > 0 && loopPeriod < minFrames) {
1699        minFrames = loopPeriod;
1700    }
1701    if (updatePeriod > 0 && updatePeriod < minFrames) {
1702        minFrames = updatePeriod;
1703    }
1704
1705    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1706    static const uint32_t kPoll = 0;
1707    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1708        minFrames = kPoll * notificationFrames;
1709    }
1710
1711    // Convert frame units to time units
1712    nsecs_t ns = NS_WHENEVER;
1713    if (minFrames != (uint32_t) ~0) {
1714        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1715        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1716        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1717    }
1718
1719    // If not supplying data by EVENT_MORE_DATA, then we're done
1720    if (mTransfer != TRANSFER_CALLBACK) {
1721        return ns;
1722    }
1723
1724    struct timespec timeout;
1725    const struct timespec *requested = &ClientProxy::kForever;
1726    if (ns != NS_WHENEVER) {
1727        timeout.tv_sec = ns / 1000000000LL;
1728        timeout.tv_nsec = ns % 1000000000LL;
1729        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1730        requested = &timeout;
1731    }
1732
1733    while (mRemainingFrames > 0) {
1734
1735        Buffer audioBuffer;
1736        audioBuffer.frameCount = mRemainingFrames;
1737        size_t nonContig;
1738        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1739        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1740                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1741        requested = &ClientProxy::kNonBlocking;
1742        size_t avail = audioBuffer.frameCount + nonContig;
1743        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1744                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1745        if (err != NO_ERROR) {
1746            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1747                    (isOffloaded() && (err == DEAD_OBJECT))) {
1748                return 0;
1749            }
1750            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1751            return NS_NEVER;
1752        }
1753
1754        if (mRetryOnPartialBuffer && !isOffloaded()) {
1755            mRetryOnPartialBuffer = false;
1756            if (avail < mRemainingFrames) {
1757                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1758                if (ns < 0 || myns < ns) {
1759                    ns = myns;
1760                }
1761                return ns;
1762            }
1763        }
1764
1765        // Divide buffer size by 2 to take into account the expansion
1766        // due to 8 to 16 bit conversion: the callback must fill only half
1767        // of the destination buffer
1768        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1769            audioBuffer.size >>= 1;
1770        }
1771
1772        size_t reqSize = audioBuffer.size;
1773        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1774        size_t writtenSize = audioBuffer.size;
1775
1776        // Sanity check on returned size
1777        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1778            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1779                    reqSize, ssize_t(writtenSize));
1780            return NS_NEVER;
1781        }
1782
1783        if (writtenSize == 0) {
1784            // The callback is done filling buffers
1785            // Keep this thread going to handle timed events and
1786            // still try to get more data in intervals of WAIT_PERIOD_MS
1787            // but don't just loop and block the CPU, so wait
1788            return WAIT_PERIOD_MS * 1000000LL;
1789        }
1790
1791        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1792            // 8 to 16 bit conversion, note that source and destination are the same address
1793            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1794            audioBuffer.size <<= 1;
1795        }
1796
1797        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1798        audioBuffer.frameCount = releasedFrames;
1799        mRemainingFrames -= releasedFrames;
1800        if (misalignment >= releasedFrames) {
1801            misalignment -= releasedFrames;
1802        } else {
1803            misalignment = 0;
1804        }
1805
1806        releaseBuffer(&audioBuffer);
1807
1808        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1809        // if callback doesn't like to accept the full chunk
1810        if (writtenSize < reqSize) {
1811            continue;
1812        }
1813
1814        // There could be enough non-contiguous frames available to satisfy the remaining request
1815        if (mRemainingFrames <= nonContig) {
1816            continue;
1817        }
1818
1819#if 0
1820        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1821        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1822        // that total to a sum == notificationFrames.
1823        if (0 < misalignment && misalignment <= mRemainingFrames) {
1824            mRemainingFrames = misalignment;
1825            return (mRemainingFrames * 1100000000LL) / sampleRate;
1826        }
1827#endif
1828
1829    }
1830    mRemainingFrames = notificationFrames;
1831    mRetryOnPartialBuffer = true;
1832
1833    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1834    return 0;
1835}
1836
1837status_t AudioTrack::restoreTrack_l(const char *from)
1838{
1839    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1840          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1841    ++mSequence;
1842    status_t result;
1843
1844    // refresh the audio configuration cache in this process to make sure we get new
1845    // output parameters and new IAudioFlinger in createTrack_l()
1846    AudioSystem::clearAudioConfigCache();
1847
1848    if (isOffloadedOrDirect_l()) {
1849        // FIXME re-creation of offloaded tracks is not yet implemented
1850        return DEAD_OBJECT;
1851    }
1852
1853    // save the old static buffer position
1854    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1855
1856    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1857    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1858    // It will also delete the strong references on previous IAudioTrack and IMemory.
1859    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1860    result = createTrack_l();
1861
1862    // take the frames that will be lost by track recreation into account in saved position
1863    (void) updateAndGetPosition_l();
1864    mPosition = mReleased;
1865
1866    if (result == NO_ERROR) {
1867        // continue playback from last known position, but
1868        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1869        if (mStaticProxy != NULL) {
1870            mLoopPeriod = 0;
1871            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1872        }
1873        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1874        //       track destruction have been played? This is critical for SoundPool implementation
1875        //       This must be broken, and needs to be tested/debugged.
1876#if 0
1877        // restore write index and set other indexes to reflect empty buffer status
1878        if (!strcmp(from, "start")) {
1879            // Make sure that a client relying on callback events indicating underrun or
1880            // the actual amount of audio frames played (e.g SoundPool) receives them.
1881            if (mSharedBuffer == 0) {
1882                // restart playback even if buffer is not completely filled.
1883                android_atomic_or(CBLK_FORCEREADY, &mCblk->mFlags);
1884            }
1885        }
1886#endif
1887        if (mState == STATE_ACTIVE) {
1888            result = mAudioTrack->start();
1889        }
1890    }
1891    if (result != NO_ERROR) {
1892        ALOGW("restoreTrack_l() failed status %d", result);
1893        mState = STATE_STOPPED;
1894        mReleased = 0;
1895    }
1896
1897    return result;
1898}
1899
1900uint32_t AudioTrack::updateAndGetPosition_l()
1901{
1902    // This is the sole place to read server consumed frames
1903    uint32_t newServer = mProxy->getPosition();
1904    int32_t delta = newServer - mServer;
1905    mServer = newServer;
1906    // TODO There is controversy about whether there can be "negative jitter" in server position.
1907    //      This should be investigated further, and if possible, it should be addressed.
1908    //      A more definite failure mode is infrequent polling by client.
1909    //      One could call (void)getPosition_l() in releaseBuffer(),
1910    //      so mReleased and mPosition are always lock-step as best possible.
1911    //      That should ensure delta never goes negative for infrequent polling
1912    //      unless the server has more than 2^31 frames in its buffer,
1913    //      in which case the use of uint32_t for these counters has bigger issues.
1914    if (delta < 0) {
1915        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1916        delta = 0;
1917    }
1918    return mPosition += (uint32_t) delta;
1919}
1920
1921status_t AudioTrack::setParameters(const String8& keyValuePairs)
1922{
1923    AutoMutex lock(mLock);
1924    return mAudioTrack->setParameters(keyValuePairs);
1925}
1926
1927status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1928{
1929    AutoMutex lock(mLock);
1930    // FIXME not implemented for fast tracks; should use proxy and SSQ
1931    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1932        return INVALID_OPERATION;
1933    }
1934
1935    switch (mState) {
1936    case STATE_ACTIVE:
1937    case STATE_PAUSED:
1938        break; // handle below
1939    case STATE_FLUSHED:
1940    case STATE_STOPPED:
1941        return WOULD_BLOCK;
1942    case STATE_STOPPING:
1943    case STATE_PAUSED_STOPPING:
1944        if (!isOffloaded_l()) {
1945            return INVALID_OPERATION;
1946        }
1947        break; // offloaded tracks handled below
1948    default:
1949        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1950        break;
1951    }
1952
1953    if (mCblk->mFlags & CBLK_INVALID) {
1954        restoreTrack_l("getTimestamp");
1955    }
1956
1957    // The presented frame count must always lag behind the consumed frame count.
1958    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1959    status_t status = mAudioTrack->getTimestamp(timestamp);
1960    if (status != NO_ERROR) {
1961        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1962        return status;
1963    }
1964    if (isOffloadedOrDirect_l()) {
1965        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1966            // use cached paused position in case another offloaded track is running.
1967            timestamp.mPosition = mPausedPosition;
1968            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1969            return NO_ERROR;
1970        }
1971
1972        // Check whether a pending flush or stop has completed, as those commands may
1973        // be asynchronous or return near finish.
1974        if (mStartUs != 0 && mSampleRate != 0) {
1975            static const int kTimeJitterUs = 100000; // 100 ms
1976            static const int k1SecUs = 1000000;
1977
1978            const int64_t timeNow = getNowUs();
1979
1980            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1981                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1982                if (timestampTimeUs < mStartUs) {
1983                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1984                }
1985                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1986                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1987
1988                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1989                    // Verify that the counter can't count faster than the sample rate
1990                    // since the start time.  If greater, then that means we have failed
1991                    // to completely flush or stop the previous playing track.
1992                    ALOGW("incomplete flush or stop:"
1993                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1994                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
1995                            timestamp.mPosition);
1996                    return WOULD_BLOCK;
1997                }
1998            }
1999            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
2000        }
2001    } else {
2002        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2003        (void) updateAndGetPosition_l();
2004        // Server consumed (mServer) and presented both use the same server time base,
2005        // and server consumed is always >= presented.
2006        // The delta between these represents the number of frames in the buffer pipeline.
2007        // If this delta between these is greater than the client position, it means that
2008        // actually presented is still stuck at the starting line (figuratively speaking),
2009        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2010        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
2011            return INVALID_OPERATION;
2012        }
2013        // Convert timestamp position from server time base to client time base.
2014        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2015        // But if we change it to 64-bit then this could fail.
2016        // If (mPosition - mServer) can be negative then should use:
2017        //   (int32_t)(mPosition - mServer)
2018        timestamp.mPosition += mPosition - mServer;
2019        // Immediately after a call to getPosition_l(), mPosition and
2020        // mServer both represent the same frame position.  mPosition is
2021        // in client's point of view, and mServer is in server's point of
2022        // view.  So the difference between them is the "fudge factor"
2023        // between client and server views due to stop() and/or new
2024        // IAudioTrack.  And timestamp.mPosition is initially in server's
2025        // point of view, so we need to apply the same fudge factor to it.
2026    }
2027    return status;
2028}
2029
2030String8 AudioTrack::getParameters(const String8& keys)
2031{
2032    audio_io_handle_t output = getOutput();
2033    if (output != AUDIO_IO_HANDLE_NONE) {
2034        return AudioSystem::getParameters(output, keys);
2035    } else {
2036        return String8::empty();
2037    }
2038}
2039
2040bool AudioTrack::isOffloaded() const
2041{
2042    AutoMutex lock(mLock);
2043    return isOffloaded_l();
2044}
2045
2046bool AudioTrack::isDirect() const
2047{
2048    AutoMutex lock(mLock);
2049    return isDirect_l();
2050}
2051
2052bool AudioTrack::isOffloadedOrDirect() const
2053{
2054    AutoMutex lock(mLock);
2055    return isOffloadedOrDirect_l();
2056}
2057
2058
2059status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2060{
2061
2062    const size_t SIZE = 256;
2063    char buffer[SIZE];
2064    String8 result;
2065
2066    result.append(" AudioTrack::dump\n");
2067    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2068            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2069    result.append(buffer);
2070    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2071            mChannelCount, mFrameCount);
2072    result.append(buffer);
2073    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2074    result.append(buffer);
2075    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2076    result.append(buffer);
2077    ::write(fd, result.string(), result.size());
2078    return NO_ERROR;
2079}
2080
2081uint32_t AudioTrack::getUnderrunFrames() const
2082{
2083    AutoMutex lock(mLock);
2084    return mProxy->getUnderrunFrames();
2085}
2086
2087// =========================================================================
2088
2089void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2090{
2091    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2092    if (audioTrack != 0) {
2093        AutoMutex lock(audioTrack->mLock);
2094        audioTrack->mProxy->binderDied();
2095    }
2096}
2097
2098// =========================================================================
2099
2100AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2101    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2102      mIgnoreNextPausedInt(false)
2103{
2104}
2105
2106AudioTrack::AudioTrackThread::~AudioTrackThread()
2107{
2108}
2109
2110bool AudioTrack::AudioTrackThread::threadLoop()
2111{
2112    {
2113        AutoMutex _l(mMyLock);
2114        if (mPaused) {
2115            mMyCond.wait(mMyLock);
2116            // caller will check for exitPending()
2117            return true;
2118        }
2119        if (mIgnoreNextPausedInt) {
2120            mIgnoreNextPausedInt = false;
2121            mPausedInt = false;
2122        }
2123        if (mPausedInt) {
2124            if (mPausedNs > 0) {
2125                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2126            } else {
2127                mMyCond.wait(mMyLock);
2128            }
2129            mPausedInt = false;
2130            return true;
2131        }
2132    }
2133    if (exitPending()) {
2134        return false;
2135    }
2136    nsecs_t ns = mReceiver.processAudioBuffer();
2137    switch (ns) {
2138    case 0:
2139        return true;
2140    case NS_INACTIVE:
2141        pauseInternal();
2142        return true;
2143    case NS_NEVER:
2144        return false;
2145    case NS_WHENEVER:
2146        // FIXME increase poll interval, or make event-driven
2147        ns = 1000000000LL;
2148        // fall through
2149    default:
2150        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2151        pauseInternal(ns);
2152        return true;
2153    }
2154}
2155
2156void AudioTrack::AudioTrackThread::requestExit()
2157{
2158    // must be in this order to avoid a race condition
2159    Thread::requestExit();
2160    resume();
2161}
2162
2163void AudioTrack::AudioTrackThread::pause()
2164{
2165    AutoMutex _l(mMyLock);
2166    mPaused = true;
2167}
2168
2169void AudioTrack::AudioTrackThread::resume()
2170{
2171    AutoMutex _l(mMyLock);
2172    mIgnoreNextPausedInt = true;
2173    if (mPaused || mPausedInt) {
2174        mPaused = false;
2175        mPausedInt = false;
2176        mMyCond.signal();
2177    }
2178}
2179
2180void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2181{
2182    AutoMutex _l(mMyLock);
2183    mPausedInt = true;
2184    mPausedNs = ns;
2185}
2186
2187}; // namespace android
2188