AudioTrack.cpp revision 1ab85ec401801ef9a9184650d0f5a1639b45eeb9
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18
19//#define LOG_NDEBUG 0
20#define LOG_TAG "AudioTrack"
21
22#include <sys/resource.h>
23#include <audio_utils/primitives.h>
24#include <binder/IPCThreadState.h>
25#include <media/AudioTrack.h>
26#include <utils/Log.h>
27#include <private/media/AudioTrackShared.h>
28#include <media/IAudioFlinger.h>
29
30#define WAIT_PERIOD_MS          10
31
32namespace android {
33// ---------------------------------------------------------------------------
34
35// static
36status_t AudioTrack::getMinFrameCount(
37        size_t* frameCount,
38        audio_stream_type_t streamType,
39        uint32_t sampleRate)
40{
41    if (frameCount == NULL) {
42        return BAD_VALUE;
43    }
44
45    // default to 0 in case of error
46    *frameCount = 0;
47
48    // FIXME merge with similar code in createTrack_l(), except we're missing
49    //       some information here that is available in createTrack_l():
50    //          audio_io_handle_t output
51    //          audio_format_t format
52    //          audio_channel_mask_t channelMask
53    //          audio_output_flags_t flags
54    uint32_t afSampleRate;
55    if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
56        return NO_INIT;
57    }
58    size_t afFrameCount;
59    if (AudioSystem::getOutputFrameCount(&afFrameCount, streamType) != NO_ERROR) {
60        return NO_INIT;
61    }
62    uint32_t afLatency;
63    if (AudioSystem::getOutputLatency(&afLatency, streamType) != NO_ERROR) {
64        return NO_INIT;
65    }
66
67    // Ensure that buffer depth covers at least audio hardware latency
68    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
69    if (minBufCount < 2) {
70        minBufCount = 2;
71    }
72
73    *frameCount = (sampleRate == 0) ? afFrameCount * minBufCount :
74            afFrameCount * minBufCount * sampleRate / afSampleRate;
75    ALOGV("getMinFrameCount=%d: afFrameCount=%d, minBufCount=%d, afSampleRate=%d, afLatency=%d",
76            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
77    return NO_ERROR;
78}
79
80// ---------------------------------------------------------------------------
81
82AudioTrack::AudioTrack()
83    : mStatus(NO_INIT),
84      mIsTimed(false),
85      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
86      mPreviousSchedulingGroup(SP_DEFAULT)
87{
88}
89
90AudioTrack::AudioTrack(
91        audio_stream_type_t streamType,
92        uint32_t sampleRate,
93        audio_format_t format,
94        audio_channel_mask_t channelMask,
95        int frameCount,
96        audio_output_flags_t flags,
97        callback_t cbf,
98        void* user,
99        int notificationFrames,
100        int sessionId,
101        transfer_type transferType,
102        const audio_offload_info_t *offloadInfo)
103    : mStatus(NO_INIT),
104      mIsTimed(false),
105      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
106      mPreviousSchedulingGroup(SP_DEFAULT)
107{
108    mStatus = set(streamType, sampleRate, format, channelMask,
109            frameCount, flags, cbf, user, notificationFrames,
110            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
111}
112
113AudioTrack::AudioTrack(
114        audio_stream_type_t streamType,
115        uint32_t sampleRate,
116        audio_format_t format,
117        audio_channel_mask_t channelMask,
118        const sp<IMemory>& sharedBuffer,
119        audio_output_flags_t flags,
120        callback_t cbf,
121        void* user,
122        int notificationFrames,
123        int sessionId,
124        transfer_type transferType,
125        const audio_offload_info_t *offloadInfo)
126    : mStatus(NO_INIT),
127      mIsTimed(false),
128      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
129      mPreviousSchedulingGroup(SP_DEFAULT)
130{
131    mStatus = set(streamType, sampleRate, format, channelMask,
132            0 /*frameCount*/, flags, cbf, user, notificationFrames,
133            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo);
134}
135
136AudioTrack::~AudioTrack()
137{
138    if (mStatus == NO_ERROR) {
139        // Make sure that callback function exits in the case where
140        // it is looping on buffer full condition in obtainBuffer().
141        // Otherwise the callback thread will never exit.
142        stop();
143        if (mAudioTrackThread != 0) {
144            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
145            mAudioTrackThread->requestExitAndWait();
146            mAudioTrackThread.clear();
147        }
148        if (mAudioTrack != 0) {
149            mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
150            mAudioTrack.clear();
151        }
152        IPCThreadState::self()->flushCommands();
153        AudioSystem::releaseAudioSessionId(mSessionId);
154    }
155}
156
157status_t AudioTrack::set(
158        audio_stream_type_t streamType,
159        uint32_t sampleRate,
160        audio_format_t format,
161        audio_channel_mask_t channelMask,
162        int frameCountInt,
163        audio_output_flags_t flags,
164        callback_t cbf,
165        void* user,
166        int notificationFrames,
167        const sp<IMemory>& sharedBuffer,
168        bool threadCanCallJava,
169        int sessionId,
170        transfer_type transferType,
171        const audio_offload_info_t *offloadInfo)
172{
173    switch (transferType) {
174    case TRANSFER_DEFAULT:
175        if (sharedBuffer != 0) {
176            transferType = TRANSFER_SHARED;
177        } else if (cbf == NULL || threadCanCallJava) {
178            transferType = TRANSFER_SYNC;
179        } else {
180            transferType = TRANSFER_CALLBACK;
181        }
182        break;
183    case TRANSFER_CALLBACK:
184        if (cbf == NULL || sharedBuffer != 0) {
185            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
186            return BAD_VALUE;
187        }
188        break;
189    case TRANSFER_OBTAIN:
190    case TRANSFER_SYNC:
191        if (sharedBuffer != 0) {
192            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
193            return BAD_VALUE;
194        }
195        break;
196    case TRANSFER_SHARED:
197        if (sharedBuffer == 0) {
198            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
199            return BAD_VALUE;
200        }
201        break;
202    default:
203        ALOGE("Invalid transfer type %d", transferType);
204        return BAD_VALUE;
205    }
206    mTransfer = transferType;
207
208    // FIXME "int" here is legacy and will be replaced by size_t later
209    if (frameCountInt < 0) {
210        ALOGE("Invalid frame count %d", frameCountInt);
211        return BAD_VALUE;
212    }
213    size_t frameCount = frameCountInt;
214
215    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
216            sharedBuffer->size());
217
218    ALOGV("set() streamType %d frameCount %u flags %04x", streamType, frameCount, flags);
219
220    AutoMutex lock(mLock);
221
222    if (mAudioTrack != 0) {
223        ALOGE("Track already in use");
224        return INVALID_OPERATION;
225    }
226
227    // handle default values first.
228    if (streamType == AUDIO_STREAM_DEFAULT) {
229        streamType = AUDIO_STREAM_MUSIC;
230    }
231
232    if (sampleRate == 0) {
233        uint32_t afSampleRate;
234        if (AudioSystem::getOutputSamplingRate(&afSampleRate, streamType) != NO_ERROR) {
235            return NO_INIT;
236        }
237        sampleRate = afSampleRate;
238    }
239    mSampleRate = sampleRate;
240
241    // these below should probably come from the audioFlinger too...
242    if (format == AUDIO_FORMAT_DEFAULT) {
243        format = AUDIO_FORMAT_PCM_16_BIT;
244    }
245    if (channelMask == 0) {
246        channelMask = AUDIO_CHANNEL_OUT_STEREO;
247    }
248
249    // validate parameters
250    if (!audio_is_valid_format(format)) {
251        ALOGE("Invalid format %d", format);
252        return BAD_VALUE;
253    }
254
255    // AudioFlinger does not currently support 8-bit data in shared memory
256    if (format == AUDIO_FORMAT_PCM_8_BIT && sharedBuffer != 0) {
257        ALOGE("8-bit data in shared memory is not supported");
258        return BAD_VALUE;
259    }
260
261    // force direct flag if format is not linear PCM
262    if (!audio_is_linear_pcm(format)) {
263        flags = (audio_output_flags_t)
264                // FIXME why can't we allow direct AND fast?
265                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
266    }
267    // only allow deep buffering for music stream type
268    if (streamType != AUDIO_STREAM_MUSIC) {
269        flags = (audio_output_flags_t)(flags &~AUDIO_OUTPUT_FLAG_DEEP_BUFFER);
270    }
271
272    if (!audio_is_output_channel(channelMask)) {
273        ALOGE("Invalid channel mask %#x", channelMask);
274        return BAD_VALUE;
275    }
276    mChannelMask = channelMask;
277    uint32_t channelCount = popcount(channelMask);
278    mChannelCount = channelCount;
279
280    if (audio_is_linear_pcm(format)) {
281        mFrameSize = channelCount * audio_bytes_per_sample(format);
282        mFrameSizeAF = channelCount * sizeof(int16_t);
283    } else {
284        mFrameSize = sizeof(uint8_t);
285        mFrameSizeAF = sizeof(uint8_t);
286    }
287
288    audio_io_handle_t output = AudioSystem::getOutput(
289                                    streamType,
290                                    sampleRate, format, channelMask,
291                                    flags,
292                                    offloadInfo);
293
294    if (output == 0) {
295        ALOGE("Could not get audio output for stream type %d", streamType);
296        return BAD_VALUE;
297    }
298
299    mVolume[LEFT] = 1.0f;
300    mVolume[RIGHT] = 1.0f;
301    mSendLevel = 0.0f;
302    mFrameCount = frameCount;
303    mReqFrameCount = frameCount;
304    mNotificationFramesReq = notificationFrames;
305    mNotificationFramesAct = 0;
306    mSessionId = sessionId;
307    mAuxEffectId = 0;
308    mFlags = flags;
309    mCbf = cbf;
310
311    if (cbf != NULL) {
312        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
313        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
314    }
315
316    // create the IAudioTrack
317    status_t status = createTrack_l(streamType,
318                                  sampleRate,
319                                  format,
320                                  frameCount,
321                                  flags,
322                                  sharedBuffer,
323                                  output,
324                                  0 /*epoch*/);
325
326    if (status != NO_ERROR) {
327        if (mAudioTrackThread != 0) {
328            mAudioTrackThread->requestExit();
329            mAudioTrackThread.clear();
330        }
331        return status;
332    }
333
334    mStatus = NO_ERROR;
335    mStreamType = streamType;
336    mFormat = format;
337    mSharedBuffer = sharedBuffer;
338    mState = STATE_STOPPED;
339    mUserData = user;
340    mLoopPeriod = 0;
341    mMarkerPosition = 0;
342    mMarkerReached = false;
343    mNewPosition = 0;
344    mUpdatePeriod = 0;
345    AudioSystem::acquireAudioSessionId(mSessionId);
346    mSequence = 1;
347    mObservedSequence = mSequence;
348    mInUnderrun = false;
349
350    return NO_ERROR;
351}
352
353// -------------------------------------------------------------------------
354
355void AudioTrack::start()
356{
357    AutoMutex lock(mLock);
358    if (mState == STATE_ACTIVE) {
359        return;
360    }
361
362    mInUnderrun = true;
363
364    State previousState = mState;
365    mState = STATE_ACTIVE;
366    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
367        // reset current position as seen by client to 0
368        mProxy->setEpoch(mProxy->getEpoch() - mProxy->getPosition());
369    }
370    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
371    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->flags);
372
373    sp<AudioTrackThread> t = mAudioTrackThread;
374    if (t != 0) {
375        t->resume();
376    } else {
377        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
378        get_sched_policy(0, &mPreviousSchedulingGroup);
379        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
380    }
381
382    status_t status = NO_ERROR;
383    if (!(flags & CBLK_INVALID)) {
384        status = mAudioTrack->start();
385        if (status == DEAD_OBJECT) {
386            flags |= CBLK_INVALID;
387        }
388    }
389    if (flags & CBLK_INVALID) {
390        status = restoreTrack_l("start");
391    }
392
393    if (status != NO_ERROR) {
394        ALOGE("start() status %d", status);
395        mState = previousState;
396        if (t != 0) {
397            t->pause();
398        } else {
399            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
400            set_sched_policy(0, mPreviousSchedulingGroup);
401        }
402    }
403
404    // FIXME discarding status
405}
406
407void AudioTrack::stop()
408{
409    AutoMutex lock(mLock);
410    // FIXME pause then stop should not be a nop
411    if (mState != STATE_ACTIVE) {
412        return;
413    }
414
415    mState = STATE_STOPPED;
416    mProxy->interrupt();
417    mAudioTrack->stop();
418    // the playback head position will reset to 0, so if a marker is set, we need
419    // to activate it again
420    mMarkerReached = false;
421#if 0
422    // Force flush if a shared buffer is used otherwise audioflinger
423    // will not stop before end of buffer is reached.
424    // It may be needed to make sure that we stop playback, likely in case looping is on.
425    if (mSharedBuffer != 0) {
426        flush_l();
427    }
428#endif
429    sp<AudioTrackThread> t = mAudioTrackThread;
430    if (t != 0) {
431        t->pause();
432    } else {
433        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
434        set_sched_policy(0, mPreviousSchedulingGroup);
435    }
436}
437
438bool AudioTrack::stopped() const
439{
440    AutoMutex lock(mLock);
441    return mState != STATE_ACTIVE;
442}
443
444void AudioTrack::flush()
445{
446    if (mSharedBuffer != 0) {
447        return;
448    }
449    AutoMutex lock(mLock);
450    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
451        return;
452    }
453    flush_l();
454}
455
456void AudioTrack::flush_l()
457{
458    ALOG_ASSERT(mState != STATE_ACTIVE);
459
460    // clear playback marker and periodic update counter
461    mMarkerPosition = 0;
462    mMarkerReached = false;
463    mUpdatePeriod = 0;
464
465    mState = STATE_FLUSHED;
466    mProxy->flush();
467    mAudioTrack->flush();
468}
469
470void AudioTrack::pause()
471{
472    AutoMutex lock(mLock);
473    if (mState != STATE_ACTIVE) {
474        return;
475    }
476    mState = STATE_PAUSED;
477    mProxy->interrupt();
478    mAudioTrack->pause();
479}
480
481status_t AudioTrack::setVolume(float left, float right)
482{
483    if (left < 0.0f || left > 1.0f || right < 0.0f || right > 1.0f) {
484        return BAD_VALUE;
485    }
486
487    AutoMutex lock(mLock);
488    mVolume[LEFT] = left;
489    mVolume[RIGHT] = right;
490
491    mProxy->setVolumeLR((uint32_t(uint16_t(right * 0x1000)) << 16) | uint16_t(left * 0x1000));
492
493    return NO_ERROR;
494}
495
496status_t AudioTrack::setVolume(float volume)
497{
498    return setVolume(volume, volume);
499}
500
501status_t AudioTrack::setAuxEffectSendLevel(float level)
502{
503    if (level < 0.0f || level > 1.0f) {
504        return BAD_VALUE;
505    }
506
507    AutoMutex lock(mLock);
508    mSendLevel = level;
509    mProxy->setSendLevel(level);
510
511    return NO_ERROR;
512}
513
514void AudioTrack::getAuxEffectSendLevel(float* level) const
515{
516    if (level != NULL) {
517        *level = mSendLevel;
518    }
519}
520
521status_t AudioTrack::setSampleRate(uint32_t rate)
522{
523    if (mIsTimed) {
524        return INVALID_OPERATION;
525    }
526
527    uint32_t afSamplingRate;
528    if (AudioSystem::getOutputSamplingRate(&afSamplingRate, mStreamType) != NO_ERROR) {
529        return NO_INIT;
530    }
531    // Resampler implementation limits input sampling rate to 2 x output sampling rate.
532    if (rate == 0 || rate > afSamplingRate*2 ) {
533        return BAD_VALUE;
534    }
535
536    AutoMutex lock(mLock);
537    mSampleRate = rate;
538    mProxy->setSampleRate(rate);
539
540    return NO_ERROR;
541}
542
543uint32_t AudioTrack::getSampleRate() const
544{
545    if (mIsTimed) {
546        return 0;
547    }
548
549    AutoMutex lock(mLock);
550    return mSampleRate;
551}
552
553status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
554{
555    if (mSharedBuffer == 0 || mIsTimed) {
556        return INVALID_OPERATION;
557    }
558
559    if (loopCount == 0) {
560        ;
561    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
562            loopEnd - loopStart >= MIN_LOOP) {
563        ;
564    } else {
565        return BAD_VALUE;
566    }
567
568    AutoMutex lock(mLock);
569    // See setPosition() regarding setting parameters such as loop points or position while active
570    if (mState == STATE_ACTIVE) {
571        return INVALID_OPERATION;
572    }
573    setLoop_l(loopStart, loopEnd, loopCount);
574    return NO_ERROR;
575}
576
577void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
578{
579    // FIXME If setting a loop also sets position to start of loop, then
580    //       this is correct.  Otherwise it should be removed.
581    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
582    mLoopPeriod = loopCount != 0 ? loopEnd - loopStart : 0;
583    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
584}
585
586status_t AudioTrack::setMarkerPosition(uint32_t marker)
587{
588    if (mCbf == NULL) {
589        return INVALID_OPERATION;
590    }
591
592    AutoMutex lock(mLock);
593    mMarkerPosition = marker;
594    mMarkerReached = false;
595
596    return NO_ERROR;
597}
598
599status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
600{
601    if (marker == NULL) {
602        return BAD_VALUE;
603    }
604
605    AutoMutex lock(mLock);
606    *marker = mMarkerPosition;
607
608    return NO_ERROR;
609}
610
611status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
612{
613    if (mCbf == NULL) {
614        return INVALID_OPERATION;
615    }
616
617    AutoMutex lock(mLock);
618    mNewPosition = mProxy->getPosition() + updatePeriod;
619    mUpdatePeriod = updatePeriod;
620
621    return NO_ERROR;
622}
623
624status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
625{
626    if (updatePeriod == NULL) {
627        return BAD_VALUE;
628    }
629
630    AutoMutex lock(mLock);
631    *updatePeriod = mUpdatePeriod;
632
633    return NO_ERROR;
634}
635
636status_t AudioTrack::setPosition(uint32_t position)
637{
638    if (mSharedBuffer == 0 || mIsTimed) {
639        return INVALID_OPERATION;
640    }
641    if (position > mFrameCount) {
642        return BAD_VALUE;
643    }
644
645    AutoMutex lock(mLock);
646    // Currently we require that the player is inactive before setting parameters such as position
647    // or loop points.  Otherwise, there could be a race condition: the application could read the
648    // current position, compute a new position or loop parameters, and then set that position or
649    // loop parameters but it would do the "wrong" thing since the position has continued to advance
650    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
651    // to specify how it wants to handle such scenarios.
652    if (mState == STATE_ACTIVE) {
653        return INVALID_OPERATION;
654    }
655    mNewPosition = mProxy->getPosition() + mUpdatePeriod;
656    mLoopPeriod = 0;
657    // FIXME Check whether loops and setting position are incompatible in old code.
658    // If we use setLoop for both purposes we lose the capability to set the position while looping.
659    mStaticProxy->setLoop(position, mFrameCount, 0);
660
661    return NO_ERROR;
662}
663
664status_t AudioTrack::getPosition(uint32_t *position) const
665{
666    if (position == NULL) {
667        return BAD_VALUE;
668    }
669
670    AutoMutex lock(mLock);
671    // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
672    *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ? 0 :
673            mProxy->getPosition();
674
675    return NO_ERROR;
676}
677
678status_t AudioTrack::getBufferPosition(size_t *position)
679{
680    if (mSharedBuffer == 0 || mIsTimed) {
681        return INVALID_OPERATION;
682    }
683    if (position == NULL) {
684        return BAD_VALUE;
685    }
686
687    AutoMutex lock(mLock);
688    *position = mStaticProxy->getBufferPosition();
689    return NO_ERROR;
690}
691
692status_t AudioTrack::reload()
693{
694    if (mSharedBuffer == 0 || mIsTimed) {
695        return INVALID_OPERATION;
696    }
697
698    AutoMutex lock(mLock);
699    // See setPosition() regarding setting parameters such as loop points or position while active
700    if (mState == STATE_ACTIVE) {
701        return INVALID_OPERATION;
702    }
703    mNewPosition = mUpdatePeriod;
704    mLoopPeriod = 0;
705    // FIXME The new code cannot reload while keeping a loop specified.
706    // Need to check how the old code handled this, and whether it's a significant change.
707    mStaticProxy->setLoop(0, mFrameCount, 0);
708    return NO_ERROR;
709}
710
711audio_io_handle_t AudioTrack::getOutput()
712{
713    AutoMutex lock(mLock);
714    return getOutput_l();
715}
716
717// must be called with mLock held
718audio_io_handle_t AudioTrack::getOutput_l()
719{
720    return AudioSystem::getOutput(mStreamType,
721            mSampleRate, mFormat, mChannelMask, mFlags);
722}
723
724status_t AudioTrack::attachAuxEffect(int effectId)
725{
726    AutoMutex lock(mLock);
727    status_t status = mAudioTrack->attachAuxEffect(effectId);
728    if (status == NO_ERROR) {
729        mAuxEffectId = effectId;
730    }
731    return status;
732}
733
734// -------------------------------------------------------------------------
735
736// must be called with mLock held
737status_t AudioTrack::createTrack_l(
738        audio_stream_type_t streamType,
739        uint32_t sampleRate,
740        audio_format_t format,
741        size_t frameCount,
742        audio_output_flags_t flags,
743        const sp<IMemory>& sharedBuffer,
744        audio_io_handle_t output,
745        size_t epoch)
746{
747    status_t status;
748    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
749    if (audioFlinger == 0) {
750        ALOGE("Could not get audioflinger");
751        return NO_INIT;
752    }
753
754    uint32_t afLatency;
755    if ((status = AudioSystem::getLatency(output, streamType, &afLatency)) != NO_ERROR) {
756        ALOGE("getLatency(%d) failed status %d", output, status);
757        return NO_INIT;
758    }
759
760    // Client decides whether the track is TIMED (see below), but can only express a preference
761    // for FAST.  Server will perform additional tests.
762    if ((flags & AUDIO_OUTPUT_FLAG_FAST) && !(
763            // either of these use cases:
764            // use case 1: shared buffer
765            (sharedBuffer != 0) ||
766            // use case 2: callback handler
767            (mCbf != NULL))) {
768        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
769        // once denied, do not request again if IAudioTrack is re-created
770        flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
771        mFlags = flags;
772    }
773    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
774
775    mNotificationFramesAct = mNotificationFramesReq;
776
777    if (!audio_is_linear_pcm(format)) {
778
779        if (sharedBuffer != 0) {
780            // Same comment as below about ignoring frameCount parameter for set()
781            frameCount = sharedBuffer->size();
782        } else if (frameCount == 0) {
783            size_t afFrameCount;
784            status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
785            if (status != NO_ERROR) {
786                ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType,
787                        status);
788                return NO_INIT;
789            }
790            frameCount = afFrameCount;
791        }
792
793    } else if (sharedBuffer != 0) {
794
795        // Ensure that buffer alignment matches channel count
796        // 8-bit data in shared memory is not currently supported by AudioFlinger
797        size_t alignment = /* format == AUDIO_FORMAT_PCM_8_BIT ? 1 : */ 2;
798        if (mChannelCount > 1) {
799            // More than 2 channels does not require stronger alignment than stereo
800            alignment <<= 1;
801        }
802        if (((size_t)sharedBuffer->pointer() & (alignment - 1)) != 0) {
803            ALOGE("Invalid buffer alignment: address %p, channel count %u",
804                    sharedBuffer->pointer(), mChannelCount);
805            return BAD_VALUE;
806        }
807
808        // When initializing a shared buffer AudioTrack via constructors,
809        // there's no frameCount parameter.
810        // But when initializing a shared buffer AudioTrack via set(),
811        // there _is_ a frameCount parameter.  We silently ignore it.
812        frameCount = sharedBuffer->size()/mChannelCount/sizeof(int16_t);
813
814    } else if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
815
816        // FIXME move these calculations and associated checks to server
817        uint32_t afSampleRate;
818        status = AudioSystem::getSamplingRate(output, streamType, &afSampleRate);
819        if (status != NO_ERROR) {
820            ALOGE("getSamplingRate(output=%d, streamType=%d) status %d", output, streamType,
821                    status);
822            return NO_INIT;
823        }
824        size_t afFrameCount;
825        status = AudioSystem::getFrameCount(output, streamType, &afFrameCount);
826        if (status != NO_ERROR) {
827            ALOGE("getFrameCount(output=%d, streamType=%d) status %d", output, streamType, status);
828            return NO_INIT;
829        }
830
831        // Ensure that buffer depth covers at least audio hardware latency
832        uint32_t minBufCount = afLatency / ((1000 * afFrameCount)/afSampleRate);
833        ALOGV("afFrameCount=%d, minBufCount=%d, afSampleRate=%u, afLatency=%d",
834                afFrameCount, minBufCount, afSampleRate, afLatency);
835        if (minBufCount <= 2) {
836            minBufCount = sampleRate == afSampleRate ? 2 : 3;
837        }
838
839        size_t minFrameCount = (afFrameCount*sampleRate*minBufCount)/afSampleRate;
840        ALOGV("minFrameCount: %u, afFrameCount=%d, minBufCount=%d, sampleRate=%u, afSampleRate=%u"
841                ", afLatency=%d",
842                minFrameCount, afFrameCount, minBufCount, sampleRate, afSampleRate, afLatency);
843
844        if (frameCount == 0) {
845            frameCount = minFrameCount;
846        }
847        // Make sure that application is notified with sufficient margin
848        // before underrun
849        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
850            mNotificationFramesAct = frameCount/2;
851        }
852        if (frameCount < minFrameCount) {
853            // not ALOGW because it happens all the time when playing key clicks over A2DP
854            ALOGV("Minimum buffer size corrected from %d to %d",
855                     frameCount, minFrameCount);
856            frameCount = minFrameCount;
857        }
858
859    } else {
860        // For fast tracks, the frame count calculations and checks are done by server
861    }
862
863    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
864    if (mIsTimed) {
865        trackFlags |= IAudioFlinger::TRACK_TIMED;
866    }
867
868    pid_t tid = -1;
869    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
870        trackFlags |= IAudioFlinger::TRACK_FAST;
871        if (mAudioTrackThread != 0) {
872            tid = mAudioTrackThread->getTid();
873        }
874    }
875
876    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
877                                                      sampleRate,
878                                                      // AudioFlinger only sees 16-bit PCM
879                                                      format == AUDIO_FORMAT_PCM_8_BIT ?
880                                                              AUDIO_FORMAT_PCM_16_BIT : format,
881                                                      mChannelMask,
882                                                      frameCount,
883                                                      &trackFlags,
884                                                      sharedBuffer,
885                                                      output,
886                                                      tid,
887                                                      &mSessionId,
888                                                      &status);
889
890    if (track == 0) {
891        ALOGE("AudioFlinger could not create track, status: %d", status);
892        return status;
893    }
894    sp<IMemory> iMem = track->getCblk();
895    if (iMem == 0) {
896        ALOGE("Could not get control block");
897        return NO_INIT;
898    }
899    if (mAudioTrack != 0) {
900        mAudioTrack->asBinder()->unlinkToDeath(mDeathNotifier, this);
901        mDeathNotifier.clear();
902    }
903    mAudioTrack = track;
904    mCblkMemory = iMem;
905    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMem->pointer());
906    mCblk = cblk;
907    size_t temp = cblk->frameCount_;
908    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
909        // In current design, AudioTrack client checks and ensures frame count validity before
910        // passing it to AudioFlinger so AudioFlinger should not return a different value except
911        // for fast track as it uses a special method of assigning frame count.
912        ALOGW("Requested frameCount %u but received frameCount %u", frameCount, temp);
913    }
914    frameCount = temp;
915    mAwaitBoost = false;
916    if (flags & AUDIO_OUTPUT_FLAG_FAST) {
917        if (trackFlags & IAudioFlinger::TRACK_FAST) {
918            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %u", frameCount);
919            mAwaitBoost = true;
920            if (sharedBuffer == 0) {
921                // double-buffering is not required for fast tracks, due to tighter scheduling
922                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount) {
923                    mNotificationFramesAct = frameCount;
924                }
925            }
926        } else {
927            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %u", frameCount);
928            // once denied, do not request again if IAudioTrack is re-created
929            flags = (audio_output_flags_t) (flags & ~AUDIO_OUTPUT_FLAG_FAST);
930            mFlags = flags;
931            if (sharedBuffer == 0) {
932                if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount/2) {
933                    mNotificationFramesAct = frameCount/2;
934                }
935            }
936        }
937    }
938    mRefreshRemaining = true;
939
940    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
941    // is the value of pointer() for the shared buffer, otherwise buffers points
942    // immediately after the control block.  This address is for the mapping within client
943    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
944    void* buffers;
945    if (sharedBuffer == 0) {
946        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
947    } else {
948        buffers = sharedBuffer->pointer();
949    }
950
951    mAudioTrack->attachAuxEffect(mAuxEffectId);
952    // FIXME don't believe this lie
953    mLatency = afLatency + (1000*frameCount) / sampleRate;
954    mFrameCount = frameCount;
955    // If IAudioTrack is re-created, don't let the requested frameCount
956    // decrease.  This can confuse clients that cache frameCount().
957    if (frameCount > mReqFrameCount) {
958        mReqFrameCount = frameCount;
959    }
960
961    // update proxy
962    if (sharedBuffer == 0) {
963        mStaticProxy.clear();
964        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
965    } else {
966        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSizeAF);
967        mProxy = mStaticProxy;
968    }
969    mProxy->setVolumeLR((uint32_t(uint16_t(mVolume[RIGHT] * 0x1000)) << 16) |
970            uint16_t(mVolume[LEFT] * 0x1000));
971    mProxy->setSendLevel(mSendLevel);
972    mProxy->setSampleRate(mSampleRate);
973    mProxy->setEpoch(epoch);
974    mProxy->setMinimum(mNotificationFramesAct);
975
976    mDeathNotifier = new DeathNotifier(this);
977    mAudioTrack->asBinder()->linkToDeath(mDeathNotifier, this);
978
979    return NO_ERROR;
980}
981
982status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount)
983{
984    if (audioBuffer == NULL) {
985        return BAD_VALUE;
986    }
987    if (mTransfer != TRANSFER_OBTAIN) {
988        audioBuffer->frameCount = 0;
989        audioBuffer->size = 0;
990        audioBuffer->raw = NULL;
991        return INVALID_OPERATION;
992    }
993
994    const struct timespec *requested;
995    if (waitCount == -1) {
996        requested = &ClientProxy::kForever;
997    } else if (waitCount == 0) {
998        requested = &ClientProxy::kNonBlocking;
999    } else if (waitCount > 0) {
1000        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1001        struct timespec timeout;
1002        timeout.tv_sec = ms / 1000;
1003        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1004        requested = &timeout;
1005    } else {
1006        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1007        requested = NULL;
1008    }
1009    return obtainBuffer(audioBuffer, requested);
1010}
1011
1012status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1013        struct timespec *elapsed, size_t *nonContig)
1014{
1015    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1016    uint32_t oldSequence = 0;
1017    uint32_t newSequence;
1018
1019    Proxy::Buffer buffer;
1020    status_t status = NO_ERROR;
1021
1022    static const int32_t kMaxTries = 5;
1023    int32_t tryCounter = kMaxTries;
1024
1025    do {
1026        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1027        // keep them from going away if another thread re-creates the track during obtainBuffer()
1028        sp<AudioTrackClientProxy> proxy;
1029        sp<IMemory> iMem;
1030
1031        {   // start of lock scope
1032            AutoMutex lock(mLock);
1033
1034            newSequence = mSequence;
1035            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1036            if (status == DEAD_OBJECT) {
1037                // re-create track, unless someone else has already done so
1038                if (newSequence == oldSequence) {
1039                    status = restoreTrack_l("obtainBuffer");
1040                    if (status != NO_ERROR) {
1041                        break;
1042                    }
1043                }
1044            }
1045            oldSequence = newSequence;
1046
1047            // Keep the extra references
1048            proxy = mProxy;
1049            iMem = mCblkMemory;
1050
1051            // Non-blocking if track is stopped or paused
1052            if (mState != STATE_ACTIVE) {
1053                requested = &ClientProxy::kNonBlocking;
1054            }
1055
1056        }   // end of lock scope
1057
1058        buffer.mFrameCount = audioBuffer->frameCount;
1059        // FIXME starts the requested timeout and elapsed over from scratch
1060        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1061
1062    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1063
1064    audioBuffer->frameCount = buffer.mFrameCount;
1065    audioBuffer->size = buffer.mFrameCount * mFrameSizeAF;
1066    audioBuffer->raw = buffer.mRaw;
1067    if (nonContig != NULL) {
1068        *nonContig = buffer.mNonContig;
1069    }
1070    return status;
1071}
1072
1073void AudioTrack::releaseBuffer(Buffer* audioBuffer)
1074{
1075    if (mTransfer == TRANSFER_SHARED) {
1076        return;
1077    }
1078
1079    size_t stepCount = audioBuffer->size / mFrameSizeAF;
1080    if (stepCount == 0) {
1081        return;
1082    }
1083
1084    Proxy::Buffer buffer;
1085    buffer.mFrameCount = stepCount;
1086    buffer.mRaw = audioBuffer->raw;
1087
1088    AutoMutex lock(mLock);
1089    mInUnderrun = false;
1090    mProxy->releaseBuffer(&buffer);
1091
1092    // restart track if it was disabled by audioflinger due to previous underrun
1093    if (mState == STATE_ACTIVE) {
1094        audio_track_cblk_t* cblk = mCblk;
1095        if (android_atomic_and(~CBLK_DISABLED, &cblk->flags) & CBLK_DISABLED) {
1096            ALOGW("releaseBuffer() track %p name=%#x disabled due to previous underrun, restarting",
1097                    this, cblk->mName);
1098            // FIXME ignoring status
1099            mAudioTrack->start();
1100        }
1101    }
1102}
1103
1104// -------------------------------------------------------------------------
1105
1106ssize_t AudioTrack::write(const void* buffer, size_t userSize)
1107{
1108    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1109        return INVALID_OPERATION;
1110    }
1111
1112    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1113        // Sanity-check: user is most-likely passing an error code, and it would
1114        // make the return value ambiguous (actualSize vs error).
1115        ALOGE("AudioTrack::write(buffer=%p, size=%u (%d)", buffer, userSize, userSize);
1116        return BAD_VALUE;
1117    }
1118
1119    size_t written = 0;
1120    Buffer audioBuffer;
1121
1122    while (userSize >= mFrameSize) {
1123        audioBuffer.frameCount = userSize / mFrameSize;
1124
1125        status_t err = obtainBuffer(&audioBuffer, &ClientProxy::kForever);
1126        if (err < 0) {
1127            if (written > 0) {
1128                break;
1129            }
1130            return ssize_t(err);
1131        }
1132
1133        size_t toWrite;
1134        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1135            // Divide capacity by 2 to take expansion into account
1136            toWrite = audioBuffer.size >> 1;
1137            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) buffer, toWrite);
1138        } else {
1139            toWrite = audioBuffer.size;
1140            memcpy(audioBuffer.i8, buffer, toWrite);
1141        }
1142        buffer = ((const char *) buffer) + toWrite;
1143        userSize -= toWrite;
1144        written += toWrite;
1145
1146        releaseBuffer(&audioBuffer);
1147    }
1148
1149    return written;
1150}
1151
1152// -------------------------------------------------------------------------
1153
1154TimedAudioTrack::TimedAudioTrack() {
1155    mIsTimed = true;
1156}
1157
1158status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1159{
1160    AutoMutex lock(mLock);
1161    status_t result = UNKNOWN_ERROR;
1162
1163#if 1
1164    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1165    // while we are accessing the cblk
1166    sp<IAudioTrack> audioTrack = mAudioTrack;
1167    sp<IMemory> iMem = mCblkMemory;
1168#endif
1169
1170    // If the track is not invalid already, try to allocate a buffer.  alloc
1171    // fails indicating that the server is dead, flag the track as invalid so
1172    // we can attempt to restore in just a bit.
1173    audio_track_cblk_t* cblk = mCblk;
1174    if (!(cblk->flags & CBLK_INVALID)) {
1175        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1176        if (result == DEAD_OBJECT) {
1177            android_atomic_or(CBLK_INVALID, &cblk->flags);
1178        }
1179    }
1180
1181    // If the track is invalid at this point, attempt to restore it. and try the
1182    // allocation one more time.
1183    if (cblk->flags & CBLK_INVALID) {
1184        result = restoreTrack_l("allocateTimedBuffer");
1185
1186        if (result == NO_ERROR) {
1187            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1188        }
1189    }
1190
1191    return result;
1192}
1193
1194status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1195                                           int64_t pts)
1196{
1197    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1198    {
1199        AutoMutex lock(mLock);
1200        audio_track_cblk_t* cblk = mCblk;
1201        // restart track if it was disabled by audioflinger due to previous underrun
1202        if (buffer->size() != 0 && status == NO_ERROR &&
1203                (mState == STATE_ACTIVE) && (cblk->flags & CBLK_DISABLED)) {
1204            android_atomic_and(~CBLK_DISABLED, &cblk->flags);
1205            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1206            // FIXME ignoring status
1207            mAudioTrack->start();
1208        }
1209    }
1210    return status;
1211}
1212
1213status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1214                                                TargetTimeline target)
1215{
1216    return mAudioTrack->setMediaTimeTransform(xform, target);
1217}
1218
1219// -------------------------------------------------------------------------
1220
1221nsecs_t AudioTrack::processAudioBuffer(const sp<AudioTrackThread>& thread)
1222{
1223    mLock.lock();
1224    if (mAwaitBoost) {
1225        mAwaitBoost = false;
1226        mLock.unlock();
1227        static const int32_t kMaxTries = 5;
1228        int32_t tryCounter = kMaxTries;
1229        uint32_t pollUs = 10000;
1230        do {
1231            int policy = sched_getscheduler(0);
1232            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1233                break;
1234            }
1235            usleep(pollUs);
1236            pollUs <<= 1;
1237        } while (tryCounter-- > 0);
1238        if (tryCounter < 0) {
1239            ALOGE("did not receive expected priority boost on time");
1240        }
1241        return true;
1242    }
1243
1244    // Can only reference mCblk while locked
1245    int32_t flags = android_atomic_and(
1246        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->flags);
1247
1248    // Check for track invalidation
1249    if (flags & CBLK_INVALID) {
1250        (void) restoreTrack_l("processAudioBuffer");
1251        mLock.unlock();
1252        // Run again immediately, but with a new IAudioTrack
1253        return 0;
1254    }
1255
1256    bool active = mState == STATE_ACTIVE;
1257
1258    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1259    bool newUnderrun = false;
1260    if (flags & CBLK_UNDERRUN) {
1261#if 0
1262        // Currently in shared buffer mode, when the server reaches the end of buffer,
1263        // the track stays active in continuous underrun state.  It's up to the application
1264        // to pause or stop the track, or set the position to a new offset within buffer.
1265        // This was some experimental code to auto-pause on underrun.   Keeping it here
1266        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1267        if (mTransfer == TRANSFER_SHARED) {
1268            mState = STATE_PAUSED;
1269            active = false;
1270        }
1271#endif
1272        if (!mInUnderrun) {
1273            mInUnderrun = true;
1274            newUnderrun = true;
1275        }
1276    }
1277
1278    // Get current position of server
1279    size_t position = mProxy->getPosition();
1280
1281    // Manage marker callback
1282    bool markerReached = false;
1283    size_t markerPosition = mMarkerPosition;
1284    // FIXME fails for wraparound, need 64 bits
1285    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1286        mMarkerReached = markerReached = true;
1287    }
1288
1289    // Determine number of new position callback(s) that will be needed, while locked
1290    size_t newPosCount = 0;
1291    size_t newPosition = mNewPosition;
1292    size_t updatePeriod = mUpdatePeriod;
1293    // FIXME fails for wraparound, need 64 bits
1294    if (updatePeriod > 0 && position >= newPosition) {
1295        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1296        mNewPosition += updatePeriod * newPosCount;
1297    }
1298
1299    // Cache other fields that will be needed soon
1300    uint32_t loopPeriod = mLoopPeriod;
1301    uint32_t sampleRate = mSampleRate;
1302    size_t notificationFrames = mNotificationFramesAct;
1303    if (mRefreshRemaining) {
1304        mRefreshRemaining = false;
1305        mRemainingFrames = notificationFrames;
1306        mRetryOnPartialBuffer = false;
1307    }
1308    size_t misalignment = mProxy->getMisalignment();
1309    int32_t sequence = mSequence;
1310
1311    // These fields don't need to be cached, because they are assigned only by set():
1312    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFrameSizeAF, mFlags
1313    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1314
1315    mLock.unlock();
1316
1317    // perform callbacks while unlocked
1318    if (newUnderrun) {
1319        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1320    }
1321    // FIXME we will miss loops if loop cycle was signaled several times since last call
1322    //       to processAudioBuffer()
1323    if (flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) {
1324        mCbf(EVENT_LOOP_END, mUserData, NULL);
1325    }
1326    if (flags & CBLK_BUFFER_END) {
1327        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1328    }
1329    if (markerReached) {
1330        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1331    }
1332    while (newPosCount > 0) {
1333        size_t temp = newPosition;
1334        mCbf(EVENT_NEW_POS, mUserData, &temp);
1335        newPosition += updatePeriod;
1336        newPosCount--;
1337    }
1338    if (mObservedSequence != sequence) {
1339        mObservedSequence = sequence;
1340        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1341    }
1342
1343    // if inactive, then don't run me again until re-started
1344    if (!active) {
1345        return NS_INACTIVE;
1346    }
1347
1348    // Compute the estimated time until the next timed event (position, markers, loops)
1349    // FIXME only for non-compressed audio
1350    uint32_t minFrames = ~0;
1351    if (!markerReached && position < markerPosition) {
1352        minFrames = markerPosition - position;
1353    }
1354    if (loopPeriod > 0 && loopPeriod < minFrames) {
1355        minFrames = loopPeriod;
1356    }
1357    if (updatePeriod > 0 && updatePeriod < minFrames) {
1358        minFrames = updatePeriod;
1359    }
1360
1361    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1362    static const uint32_t kPoll = 0;
1363    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1364        minFrames = kPoll * notificationFrames;
1365    }
1366
1367    // Convert frame units to time units
1368    nsecs_t ns = NS_WHENEVER;
1369    if (minFrames != (uint32_t) ~0) {
1370        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1371        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1372        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1373    }
1374
1375    // If not supplying data by EVENT_MORE_DATA, then we're done
1376    if (mTransfer != TRANSFER_CALLBACK) {
1377        return ns;
1378    }
1379
1380    struct timespec timeout;
1381    const struct timespec *requested = &ClientProxy::kForever;
1382    if (ns != NS_WHENEVER) {
1383        timeout.tv_sec = ns / 1000000000LL;
1384        timeout.tv_nsec = ns % 1000000000LL;
1385        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1386        requested = &timeout;
1387    }
1388
1389    while (mRemainingFrames > 0) {
1390
1391        Buffer audioBuffer;
1392        audioBuffer.frameCount = mRemainingFrames;
1393        size_t nonContig;
1394        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1395        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1396                "obtainBuffer() err=%d frameCount=%u", err, audioBuffer.frameCount);
1397        requested = &ClientProxy::kNonBlocking;
1398        size_t avail = audioBuffer.frameCount + nonContig;
1399        ALOGV("obtainBuffer(%u) returned %u = %u + %u",
1400                mRemainingFrames, avail, audioBuffer.frameCount, nonContig);
1401        if (err != NO_ERROR) {
1402            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR) {
1403                return 0;
1404            }
1405            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1406            return NS_NEVER;
1407        }
1408
1409        if (mRetryOnPartialBuffer) {
1410            mRetryOnPartialBuffer = false;
1411            if (avail < mRemainingFrames) {
1412                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1413                if (ns < 0 || myns < ns) {
1414                    ns = myns;
1415                }
1416                return ns;
1417            }
1418        }
1419
1420        // Divide buffer size by 2 to take into account the expansion
1421        // due to 8 to 16 bit conversion: the callback must fill only half
1422        // of the destination buffer
1423        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1424            audioBuffer.size >>= 1;
1425        }
1426
1427        size_t reqSize = audioBuffer.size;
1428        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1429        size_t writtenSize = audioBuffer.size;
1430        size_t writtenFrames = writtenSize / mFrameSize;
1431
1432        // Sanity check on returned size
1433        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1434            ALOGE("EVENT_MORE_DATA requested %u bytes but callback returned %d bytes",
1435                    reqSize, (int) writtenSize);
1436            return NS_NEVER;
1437        }
1438
1439        if (writtenSize == 0) {
1440            // The callback is done filling buffers
1441            // Keep this thread going to handle timed events and
1442            // still try to get more data in intervals of WAIT_PERIOD_MS
1443            // but don't just loop and block the CPU, so wait
1444            return WAIT_PERIOD_MS * 1000000LL;
1445        }
1446
1447        if (mFormat == AUDIO_FORMAT_PCM_8_BIT && !(mFlags & AUDIO_OUTPUT_FLAG_DIRECT)) {
1448            // 8 to 16 bit conversion, note that source and destination are the same address
1449            memcpy_to_i16_from_u8(audioBuffer.i16, (const uint8_t *) audioBuffer.i8, writtenSize);
1450            audioBuffer.size <<= 1;
1451        }
1452
1453        size_t releasedFrames = audioBuffer.size / mFrameSizeAF;
1454        audioBuffer.frameCount = releasedFrames;
1455        mRemainingFrames -= releasedFrames;
1456        if (misalignment >= releasedFrames) {
1457            misalignment -= releasedFrames;
1458        } else {
1459            misalignment = 0;
1460        }
1461
1462        releaseBuffer(&audioBuffer);
1463
1464        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1465        // if callback doesn't like to accept the full chunk
1466        if (writtenSize < reqSize) {
1467            continue;
1468        }
1469
1470        // There could be enough non-contiguous frames available to satisfy the remaining request
1471        if (mRemainingFrames <= nonContig) {
1472            continue;
1473        }
1474
1475#if 0
1476        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1477        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1478        // that total to a sum == notificationFrames.
1479        if (0 < misalignment && misalignment <= mRemainingFrames) {
1480            mRemainingFrames = misalignment;
1481            return (mRemainingFrames * 1100000000LL) / sampleRate;
1482        }
1483#endif
1484
1485    }
1486    mRemainingFrames = notificationFrames;
1487    mRetryOnPartialBuffer = true;
1488
1489    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1490    return 0;
1491}
1492
1493status_t AudioTrack::restoreTrack_l(const char *from)
1494{
1495    ALOGW("dead IAudioTrack, creating a new one from %s()", from);
1496    ++mSequence;
1497    status_t result;
1498
1499    // refresh the audio configuration cache in this process to make sure we get new
1500    // output parameters in getOutput_l() and createTrack_l()
1501    AudioSystem::clearAudioConfigCache();
1502
1503    // if the new IAudioTrack is created, createTrack_l() will modify the
1504    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1505    // It will also delete the strong references on previous IAudioTrack and IMemory
1506    size_t position = mProxy->getPosition();
1507    mNewPosition = position + mUpdatePeriod;
1508    size_t bufferPosition = mStaticProxy != NULL ? mStaticProxy->getBufferPosition() : 0;
1509    result = createTrack_l(mStreamType,
1510                           mSampleRate,
1511                           mFormat,
1512                           mReqFrameCount,  // so that frame count never goes down
1513                           mFlags,
1514                           mSharedBuffer,
1515                           getOutput_l(),
1516                           position /*epoch*/);
1517
1518    if (result == NO_ERROR) {
1519        // continue playback from last known position, but
1520        // don't attempt to restore loop after invalidation; it's difficult and not worthwhile
1521        if (mStaticProxy != NULL) {
1522            mLoopPeriod = 0;
1523            mStaticProxy->setLoop(bufferPosition, mFrameCount, 0);
1524        }
1525        // FIXME How do we simulate the fact that all frames present in the buffer at the time of
1526        //       track destruction have been played? This is critical for SoundPool implementation
1527        //       This must be broken, and needs to be tested/debugged.
1528#if 0
1529        // restore write index and set other indexes to reflect empty buffer status
1530        if (!strcmp(from, "start")) {
1531            // Make sure that a client relying on callback events indicating underrun or
1532            // the actual amount of audio frames played (e.g SoundPool) receives them.
1533            if (mSharedBuffer == 0) {
1534                // restart playback even if buffer is not completely filled.
1535                android_atomic_or(CBLK_FORCEREADY, &mCblk->flags);
1536            }
1537        }
1538#endif
1539        if (mState == STATE_ACTIVE) {
1540            result = mAudioTrack->start();
1541        }
1542    }
1543    if (result != NO_ERROR) {
1544        ALOGW("restoreTrack_l() failed status %d", result);
1545        mState = STATE_STOPPED;
1546    }
1547
1548    return result;
1549}
1550
1551status_t AudioTrack::setParameters(const String8& keyValuePairs)
1552{
1553    AutoMutex lock(mLock);
1554    if (mAudioTrack != 0) {
1555        return mAudioTrack->setParameters(keyValuePairs);
1556    } else {
1557        return NO_INIT;
1558    }
1559}
1560
1561String8 AudioTrack::getParameters(const String8& keys)
1562{
1563    return String8::empty();
1564}
1565
1566status_t AudioTrack::dump(int fd, const Vector<String16>& args) const
1567{
1568
1569    const size_t SIZE = 256;
1570    char buffer[SIZE];
1571    String8 result;
1572
1573    result.append(" AudioTrack::dump\n");
1574    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
1575            mVolume[0], mVolume[1]);
1576    result.append(buffer);
1577    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%d)\n", mFormat,
1578            mChannelCount, mFrameCount);
1579    result.append(buffer);
1580    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
1581    result.append(buffer);
1582    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
1583    result.append(buffer);
1584    ::write(fd, result.string(), result.size());
1585    return NO_ERROR;
1586}
1587
1588uint32_t AudioTrack::getUnderrunFrames() const
1589{
1590    AutoMutex lock(mLock);
1591    return mProxy->getUnderrunFrames();
1592}
1593
1594// =========================================================================
1595
1596void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who)
1597{
1598    sp<AudioTrack> audioTrack = mAudioTrack.promote();
1599    if (audioTrack != 0) {
1600        AutoMutex lock(audioTrack->mLock);
1601        audioTrack->mProxy->binderDied();
1602    }
1603}
1604
1605// =========================================================================
1606
1607AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
1608    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mResumeLatch(false)
1609{
1610}
1611
1612AudioTrack::AudioTrackThread::~AudioTrackThread()
1613{
1614}
1615
1616bool AudioTrack::AudioTrackThread::threadLoop()
1617{
1618    {
1619        AutoMutex _l(mMyLock);
1620        if (mPaused) {
1621            mMyCond.wait(mMyLock);
1622            // caller will check for exitPending()
1623            return true;
1624        }
1625    }
1626    nsecs_t ns = mReceiver.processAudioBuffer(this);
1627    switch (ns) {
1628    case 0:
1629        return true;
1630    case NS_WHENEVER:
1631        sleep(1);
1632        return true;
1633    case NS_INACTIVE:
1634        pauseConditional();
1635        return true;
1636    case NS_NEVER:
1637        return false;
1638    default:
1639        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %lld", ns);
1640        struct timespec req;
1641        req.tv_sec = ns / 1000000000LL;
1642        req.tv_nsec = ns % 1000000000LL;
1643        nanosleep(&req, NULL /*rem*/);
1644        return true;
1645    }
1646}
1647
1648void AudioTrack::AudioTrackThread::requestExit()
1649{
1650    // must be in this order to avoid a race condition
1651    Thread::requestExit();
1652    resume();
1653}
1654
1655void AudioTrack::AudioTrackThread::pause()
1656{
1657    AutoMutex _l(mMyLock);
1658    mPaused = true;
1659    mResumeLatch = false;
1660}
1661
1662void AudioTrack::AudioTrackThread::pauseConditional()
1663{
1664    AutoMutex _l(mMyLock);
1665    if (mResumeLatch) {
1666        mResumeLatch = false;
1667    } else {
1668        mPaused = true;
1669    }
1670}
1671
1672void AudioTrack::AudioTrackThread::resume()
1673{
1674    AutoMutex _l(mMyLock);
1675    if (mPaused) {
1676        mPaused = false;
1677        mResumeLatch = false;
1678        mMyCond.signal();
1679    } else {
1680        mResumeLatch = true;
1681    }
1682}
1683
1684}; // namespace android
1685