AudioTrack.cpp revision 8839430158e22382f0f6450c9274071eca945989
1/*
2**
3** Copyright 2007, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18//#define LOG_NDEBUG 0
19#define LOG_TAG "AudioTrack"
20
21#include <inttypes.h>
22#include <math.h>
23#include <sys/resource.h>
24
25#include <audio_utils/clock.h>
26#include <audio_utils/primitives.h>
27#include <binder/IPCThreadState.h>
28#include <media/AudioTrack.h>
29#include <utils/Log.h>
30#include <private/media/AudioTrackShared.h>
31#include <media/IAudioFlinger.h>
32#include <media/AudioPolicyHelper.h>
33#include <media/AudioResamplerPublic.h>
34#include <media/MediaAnalyticsItem.h>
35#include <media/TypeConverter.h>
36
37#define WAIT_PERIOD_MS                  10
38#define WAIT_STREAM_END_TIMEOUT_SEC     120
39static const int kMaxLoopCountNotifications = 32;
40
41namespace android {
42// ---------------------------------------------------------------------------
43
44using media::VolumeShaper;
45
46// TODO: Move to a separate .h
47
48template <typename T>
49static inline const T &min(const T &x, const T &y) {
50    return x < y ? x : y;
51}
52
53template <typename T>
54static inline const T &max(const T &x, const T &y) {
55    return x > y ? x : y;
56}
57
58static const int32_t NANOS_PER_SECOND = 1000000000;
59
60static inline nsecs_t framesToNanoseconds(ssize_t frames, uint32_t sampleRate, float speed)
61{
62    return ((double)frames * 1000000000) / ((double)sampleRate * speed);
63}
64
65static int64_t convertTimespecToUs(const struct timespec &tv)
66{
67    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
68}
69
70// TODO move to audio_utils.
71static inline struct timespec convertNsToTimespec(int64_t ns) {
72    struct timespec tv;
73    tv.tv_sec = static_cast<time_t>(ns / NANOS_PER_SECOND);
74    tv.tv_nsec = static_cast<long>(ns % NANOS_PER_SECOND);
75    return tv;
76}
77
78// current monotonic time in microseconds.
79static int64_t getNowUs()
80{
81    struct timespec tv;
82    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
83    return convertTimespecToUs(tv);
84}
85
86// FIXME: we don't use the pitch setting in the time stretcher (not working);
87// instead we emulate it using our sample rate converter.
88static const bool kFixPitch = true; // enable pitch fix
89static inline uint32_t adjustSampleRate(uint32_t sampleRate, float pitch)
90{
91    return kFixPitch ? (sampleRate * pitch + 0.5) : sampleRate;
92}
93
94static inline float adjustSpeed(float speed, float pitch)
95{
96    return kFixPitch ? speed / max(pitch, AUDIO_TIMESTRETCH_PITCH_MIN_DELTA) : speed;
97}
98
99static inline float adjustPitch(float pitch)
100{
101    return kFixPitch ? AUDIO_TIMESTRETCH_PITCH_NORMAL : pitch;
102}
103
104// static
105status_t AudioTrack::getMinFrameCount(
106        size_t* frameCount,
107        audio_stream_type_t streamType,
108        uint32_t sampleRate)
109{
110    if (frameCount == NULL) {
111        return BAD_VALUE;
112    }
113
114    // FIXME handle in server, like createTrack_l(), possible missing info:
115    //          audio_io_handle_t output
116    //          audio_format_t format
117    //          audio_channel_mask_t channelMask
118    //          audio_output_flags_t flags (FAST)
119    uint32_t afSampleRate;
120    status_t status;
121    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
122    if (status != NO_ERROR) {
123        ALOGE("Unable to query output sample rate for stream type %d; status %d",
124                streamType, status);
125        return status;
126    }
127    size_t afFrameCount;
128    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
129    if (status != NO_ERROR) {
130        ALOGE("Unable to query output frame count for stream type %d; status %d",
131                streamType, status);
132        return status;
133    }
134    uint32_t afLatency;
135    status = AudioSystem::getOutputLatency(&afLatency, streamType);
136    if (status != NO_ERROR) {
137        ALOGE("Unable to query output latency for stream type %d; status %d",
138                streamType, status);
139        return status;
140    }
141
142    // When called from createTrack, speed is 1.0f (normal speed).
143    // This is rechecked again on setting playback rate (TODO: on setting sample rate, too).
144    *frameCount = AudioSystem::calculateMinFrameCount(afLatency, afFrameCount, afSampleRate,
145                                              sampleRate, 1.0f /*, 0 notificationsPerBufferReq*/);
146
147    // The formula above should always produce a non-zero value under normal circumstances:
148    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
149    // Return error in the unlikely event that it does not, as that's part of the API contract.
150    if (*frameCount == 0) {
151        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
152                streamType, sampleRate);
153        return BAD_VALUE;
154    }
155    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, afSampleRate=%u, afLatency=%u",
156            *frameCount, afFrameCount, afSampleRate, afLatency);
157    return NO_ERROR;
158}
159
160// ---------------------------------------------------------------------------
161
162static std::string audioContentTypeString(audio_content_type_t value) {
163    std::string contentType;
164    if (AudioContentTypeConverter::toString(value, contentType)) {
165        return contentType;
166    }
167    char rawbuffer[16];  // room for "%d"
168    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
169    return rawbuffer;
170}
171
172static std::string audioUsageString(audio_usage_t value) {
173    std::string usage;
174    if (UsageTypeConverter::toString(value, usage)) {
175        return usage;
176    }
177    char rawbuffer[16];  // room for "%d"
178    snprintf(rawbuffer, sizeof(rawbuffer), "%d", value);
179    return rawbuffer;
180}
181
182void AudioTrack::MediaMetrics::gather(const AudioTrack *track)
183{
184
185    // key for media statistics is defined in the header
186    // attrs for media statistics
187    static constexpr char kAudioTrackStreamType[] = "android.media.audiotrack.streamtype";
188    static constexpr char kAudioTrackContentType[] = "android.media.audiotrack.type";
189    static constexpr char kAudioTrackUsage[] = "android.media.audiotrack.usage";
190    static constexpr char kAudioTrackSampleRate[] = "android.media.audiotrack.samplerate";
191    static constexpr char kAudioTrackChannelMask[] = "android.media.audiotrack.channelmask";
192    static constexpr char kAudioTrackUnderrunFrames[] = "android.media.audiotrack.underrunframes";
193    static constexpr char kAudioTrackStartupGlitch[] = "android.media.audiotrack.glitch.startup";
194
195    // only if we're in a good state...
196    // XXX: shall we gather alternative info if failing?
197    const status_t lstatus = track->initCheck();
198    if (lstatus != NO_ERROR) {
199        ALOGD("no metrics gathered, track status=%d", (int) lstatus);
200        return;
201    }
202
203    // constructor guarantees mAnalyticsItem is valid
204
205    const int32_t underrunFrames = track->getUnderrunFrames();
206    if (underrunFrames != 0) {
207        mAnalyticsItem->setInt32(kAudioTrackUnderrunFrames, underrunFrames);
208    }
209
210    if (track->mTimestampStartupGlitchReported) {
211        mAnalyticsItem->setInt32(kAudioTrackStartupGlitch, 1);
212    }
213
214    if (track->mStreamType != -1) {
215        // deprecated, but this will tell us who still uses it.
216        mAnalyticsItem->setInt32(kAudioTrackStreamType, track->mStreamType);
217    }
218    // XXX: consider including from mAttributes: source type
219    mAnalyticsItem->setCString(kAudioTrackContentType,
220                               audioContentTypeString(track->mAttributes.content_type).c_str());
221    mAnalyticsItem->setCString(kAudioTrackUsage,
222                               audioUsageString(track->mAttributes.usage).c_str());
223    mAnalyticsItem->setInt32(kAudioTrackSampleRate, track->mSampleRate);
224    mAnalyticsItem->setInt64(kAudioTrackChannelMask, track->mChannelMask);
225}
226
227// hand the user a snapshot of the metrics.
228status_t AudioTrack::getMetrics(MediaAnalyticsItem * &item)
229{
230    mMediaMetrics.gather(this);
231    MediaAnalyticsItem *tmp = mMediaMetrics.dup();
232    if (tmp == nullptr) {
233        return BAD_VALUE;
234    }
235    item = tmp;
236    return NO_ERROR;
237}
238
239AudioTrack::AudioTrack()
240    : mStatus(NO_INIT),
241      mState(STATE_STOPPED),
242      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
243      mPreviousSchedulingGroup(SP_DEFAULT),
244      mPausedPosition(0),
245      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE),
246      mRoutedDeviceId(AUDIO_PORT_HANDLE_NONE)
247{
248    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
249    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
250    mAttributes.flags = 0x0;
251    strcpy(mAttributes.tags, "");
252}
253
254AudioTrack::AudioTrack(
255        audio_stream_type_t streamType,
256        uint32_t sampleRate,
257        audio_format_t format,
258        audio_channel_mask_t channelMask,
259        size_t frameCount,
260        audio_output_flags_t flags,
261        callback_t cbf,
262        void* user,
263        int32_t notificationFrames,
264        audio_session_t sessionId,
265        transfer_type transferType,
266        const audio_offload_info_t *offloadInfo,
267        uid_t uid,
268        pid_t pid,
269        const audio_attributes_t* pAttributes,
270        bool doNotReconnect,
271        float maxRequiredSpeed,
272        audio_port_handle_t selectedDeviceId)
273    : mStatus(NO_INIT),
274      mState(STATE_STOPPED),
275      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
276      mPreviousSchedulingGroup(SP_DEFAULT),
277      mPausedPosition(0)
278{
279    (void)set(streamType, sampleRate, format, channelMask,
280            frameCount, flags, cbf, user, notificationFrames,
281            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
282            offloadInfo, uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed, selectedDeviceId);
283}
284
285AudioTrack::AudioTrack(
286        audio_stream_type_t streamType,
287        uint32_t sampleRate,
288        audio_format_t format,
289        audio_channel_mask_t channelMask,
290        const sp<IMemory>& sharedBuffer,
291        audio_output_flags_t flags,
292        callback_t cbf,
293        void* user,
294        int32_t notificationFrames,
295        audio_session_t sessionId,
296        transfer_type transferType,
297        const audio_offload_info_t *offloadInfo,
298        uid_t uid,
299        pid_t pid,
300        const audio_attributes_t* pAttributes,
301        bool doNotReconnect,
302        float maxRequiredSpeed)
303    : mStatus(NO_INIT),
304      mState(STATE_STOPPED),
305      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
306      mPreviousSchedulingGroup(SP_DEFAULT),
307      mPausedPosition(0),
308      mSelectedDeviceId(AUDIO_PORT_HANDLE_NONE)
309{
310    (void)set(streamType, sampleRate, format, channelMask,
311            0 /*frameCount*/, flags, cbf, user, notificationFrames,
312            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
313            uid, pid, pAttributes, doNotReconnect, maxRequiredSpeed);
314}
315
316AudioTrack::~AudioTrack()
317{
318    // pull together the numbers, before we clean up our structures
319    mMediaMetrics.gather(this);
320
321    if (mStatus == NO_ERROR) {
322        // Make sure that callback function exits in the case where
323        // it is looping on buffer full condition in obtainBuffer().
324        // Otherwise the callback thread will never exit.
325        stop();
326        if (mAudioTrackThread != 0) {
327            mProxy->interrupt();
328            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
329            mAudioTrackThread->requestExitAndWait();
330            mAudioTrackThread.clear();
331        }
332        // No lock here: worst case we remove a NULL callback which will be a nop
333        if (mDeviceCallback != 0 && mOutput != AUDIO_IO_HANDLE_NONE) {
334            AudioSystem::removeAudioDeviceCallback(this, mOutput);
335        }
336        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
337        mAudioTrack.clear();
338        mCblkMemory.clear();
339        mSharedBuffer.clear();
340        IPCThreadState::self()->flushCommands();
341        ALOGV("~AudioTrack, releasing session id %d from %d on behalf of %d",
342                mSessionId, IPCThreadState::self()->getCallingPid(), mClientPid);
343        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
344    }
345}
346
347status_t AudioTrack::set(
348        audio_stream_type_t streamType,
349        uint32_t sampleRate,
350        audio_format_t format,
351        audio_channel_mask_t channelMask,
352        size_t frameCount,
353        audio_output_flags_t flags,
354        callback_t cbf,
355        void* user,
356        int32_t notificationFrames,
357        const sp<IMemory>& sharedBuffer,
358        bool threadCanCallJava,
359        audio_session_t sessionId,
360        transfer_type transferType,
361        const audio_offload_info_t *offloadInfo,
362        uid_t uid,
363        pid_t pid,
364        const audio_attributes_t* pAttributes,
365        bool doNotReconnect,
366        float maxRequiredSpeed,
367        audio_port_handle_t selectedDeviceId)
368{
369    status_t status;
370    uint32_t channelCount;
371    pid_t callingPid;
372    pid_t myPid;
373
374    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
375          "flags #%x, notificationFrames %d, sessionId %d, transferType %d, uid %d, pid %d",
376          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
377          sessionId, transferType, uid, pid);
378
379    mThreadCanCallJava = threadCanCallJava;
380    mSelectedDeviceId = selectedDeviceId;
381    mSessionId = sessionId;
382
383    switch (transferType) {
384    case TRANSFER_DEFAULT:
385        if (sharedBuffer != 0) {
386            transferType = TRANSFER_SHARED;
387        } else if (cbf == NULL || threadCanCallJava) {
388            transferType = TRANSFER_SYNC;
389        } else {
390            transferType = TRANSFER_CALLBACK;
391        }
392        break;
393    case TRANSFER_CALLBACK:
394        if (cbf == NULL || sharedBuffer != 0) {
395            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
396            status = BAD_VALUE;
397            goto exit;
398        }
399        break;
400    case TRANSFER_OBTAIN:
401    case TRANSFER_SYNC:
402        if (sharedBuffer != 0) {
403            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
404            status = BAD_VALUE;
405            goto exit;
406        }
407        break;
408    case TRANSFER_SHARED:
409        if (sharedBuffer == 0) {
410            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
411            status = BAD_VALUE;
412            goto exit;
413        }
414        break;
415    default:
416        ALOGE("Invalid transfer type %d", transferType);
417        status = BAD_VALUE;
418        goto exit;
419    }
420    mSharedBuffer = sharedBuffer;
421    mTransfer = transferType;
422    mDoNotReconnect = doNotReconnect;
423
424    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %zu", sharedBuffer->pointer(),
425            sharedBuffer->size());
426
427    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
428
429    // invariant that mAudioTrack != 0 is true only after set() returns successfully
430    if (mAudioTrack != 0) {
431        ALOGE("Track already in use");
432        status = INVALID_OPERATION;
433        goto exit;
434    }
435
436    // handle default values first.
437    if (streamType == AUDIO_STREAM_DEFAULT) {
438        streamType = AUDIO_STREAM_MUSIC;
439    }
440    if (pAttributes == NULL) {
441        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
442            ALOGE("Invalid stream type %d", streamType);
443            status = BAD_VALUE;
444            goto exit;
445        }
446        mStreamType = streamType;
447
448    } else {
449        // stream type shouldn't be looked at, this track has audio attributes
450        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
451        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
452                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
453        mStreamType = AUDIO_STREAM_DEFAULT;
454        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
455            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
456        }
457        if ((mAttributes.flags & AUDIO_FLAG_LOW_LATENCY) != 0) {
458            flags = (audio_output_flags_t) (flags | AUDIO_OUTPUT_FLAG_FAST);
459        }
460        // check deep buffer after flags have been modified above
461        if (flags == AUDIO_OUTPUT_FLAG_NONE && (mAttributes.flags & AUDIO_FLAG_DEEP_BUFFER) != 0) {
462            flags = AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
463        }
464    }
465
466    // these below should probably come from the audioFlinger too...
467    if (format == AUDIO_FORMAT_DEFAULT) {
468        format = AUDIO_FORMAT_PCM_16_BIT;
469    } else if (format == AUDIO_FORMAT_IEC61937) { // HDMI pass-through?
470        mAttributes.flags |= AUDIO_OUTPUT_FLAG_IEC958_NONAUDIO;
471    }
472
473    // validate parameters
474    if (!audio_is_valid_format(format)) {
475        ALOGE("Invalid format %#x", format);
476        status = BAD_VALUE;
477        goto exit;
478    }
479    mFormat = format;
480
481    if (!audio_is_output_channel(channelMask)) {
482        ALOGE("Invalid channel mask %#x", channelMask);
483        status = BAD_VALUE;
484        goto exit;
485    }
486    mChannelMask = channelMask;
487    channelCount = audio_channel_count_from_out_mask(channelMask);
488    mChannelCount = channelCount;
489
490    // force direct flag if format is not linear PCM
491    // or offload was requested
492    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
493            || !audio_is_linear_pcm(format)) {
494        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
495                    ? "Offload request, forcing to Direct Output"
496                    : "Not linear PCM, forcing to Direct Output");
497        flags = (audio_output_flags_t)
498                // FIXME why can't we allow direct AND fast?
499                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
500    }
501
502    // force direct flag if HW A/V sync requested
503    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
504        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
505    }
506
507    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
508        if (audio_has_proportional_frames(format)) {
509            mFrameSize = channelCount * audio_bytes_per_sample(format);
510        } else {
511            mFrameSize = sizeof(uint8_t);
512        }
513    } else {
514        ALOG_ASSERT(audio_has_proportional_frames(format));
515        mFrameSize = channelCount * audio_bytes_per_sample(format);
516        // createTrack will return an error if PCM format is not supported by server,
517        // so no need to check for specific PCM formats here
518    }
519
520    // sampling rate must be specified for direct outputs
521    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
522        status = BAD_VALUE;
523        goto exit;
524    }
525    mSampleRate = sampleRate;
526    mOriginalSampleRate = sampleRate;
527    mPlaybackRate = AUDIO_PLAYBACK_RATE_DEFAULT;
528    // 1.0 <= mMaxRequiredSpeed <= AUDIO_TIMESTRETCH_SPEED_MAX
529    mMaxRequiredSpeed = min(max(maxRequiredSpeed, 1.0f), AUDIO_TIMESTRETCH_SPEED_MAX);
530
531    // Make copy of input parameter offloadInfo so that in the future:
532    //  (a) createTrack_l doesn't need it as an input parameter
533    //  (b) we can support re-creation of offloaded tracks
534    if (offloadInfo != NULL) {
535        mOffloadInfoCopy = *offloadInfo;
536        mOffloadInfo = &mOffloadInfoCopy;
537    } else {
538        mOffloadInfo = NULL;
539        memset(&mOffloadInfoCopy, 0, sizeof(audio_offload_info_t));
540    }
541
542    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
543    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
544    mSendLevel = 0.0f;
545    // mFrameCount is initialized in createTrack_l
546    mReqFrameCount = frameCount;
547    if (notificationFrames >= 0) {
548        mNotificationFramesReq = notificationFrames;
549        mNotificationsPerBufferReq = 0;
550    } else {
551        if (!(flags & AUDIO_OUTPUT_FLAG_FAST)) {
552            ALOGE("notificationFrames=%d not permitted for non-fast track",
553                    notificationFrames);
554            status = BAD_VALUE;
555            goto exit;
556        }
557        if (frameCount > 0) {
558            ALOGE("notificationFrames=%d not permitted with non-zero frameCount=%zu",
559                    notificationFrames, frameCount);
560            status = BAD_VALUE;
561            goto exit;
562        }
563        mNotificationFramesReq = 0;
564        const uint32_t minNotificationsPerBuffer = 1;
565        const uint32_t maxNotificationsPerBuffer = 8;
566        mNotificationsPerBufferReq = min(maxNotificationsPerBuffer,
567                max((uint32_t) -notificationFrames, minNotificationsPerBuffer));
568        ALOGW_IF(mNotificationsPerBufferReq != (uint32_t) -notificationFrames,
569                "notificationFrames=%d clamped to the range -%u to -%u",
570                notificationFrames, minNotificationsPerBuffer, maxNotificationsPerBuffer);
571    }
572    mNotificationFramesAct = 0;
573    callingPid = IPCThreadState::self()->getCallingPid();
574    myPid = getpid();
575    if (uid == AUDIO_UID_INVALID || (callingPid != myPid)) {
576        mClientUid = IPCThreadState::self()->getCallingUid();
577    } else {
578        mClientUid = uid;
579    }
580    if (pid == -1 || (callingPid != myPid)) {
581        mClientPid = callingPid;
582    } else {
583        mClientPid = pid;
584    }
585    mAuxEffectId = 0;
586    mOrigFlags = mFlags = flags;
587    mCbf = cbf;
588
589    if (cbf != NULL) {
590        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
591        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
592        // thread begins in paused state, and will not reference us until start()
593    }
594
595    // create the IAudioTrack
596    status = createTrack_l();
597
598    if (status != NO_ERROR) {
599        if (mAudioTrackThread != 0) {
600            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
601            mAudioTrackThread->requestExitAndWait();
602            mAudioTrackThread.clear();
603        }
604        goto exit;
605    }
606
607    mUserData = user;
608    mLoopCount = 0;
609    mLoopStart = 0;
610    mLoopEnd = 0;
611    mLoopCountNotified = 0;
612    mMarkerPosition = 0;
613    mMarkerReached = false;
614    mNewPosition = 0;
615    mUpdatePeriod = 0;
616    mPosition = 0;
617    mReleased = 0;
618    mStartNs = 0;
619    mStartFromZeroUs = 0;
620    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
621    mSequence = 1;
622    mObservedSequence = mSequence;
623    mInUnderrun = false;
624    mPreviousTimestampValid = false;
625    mTimestampStartupGlitchReported = false;
626    mRetrogradeMotionReported = false;
627    mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
628    mStartTs.mPosition = 0;
629    mUnderrunCountOffset = 0;
630    mFramesWritten = 0;
631    mFramesWrittenServerOffset = 0;
632    mFramesWrittenAtRestore = -1; // -1 is a unique initializer.
633    mVolumeHandler = new media::VolumeHandler();
634
635exit:
636    mStatus = status;
637    return status;
638}
639
640// -------------------------------------------------------------------------
641
642status_t AudioTrack::start()
643{
644    AutoMutex lock(mLock);
645
646    if (mState == STATE_ACTIVE) {
647        return INVALID_OPERATION;
648    }
649
650    mInUnderrun = true;
651
652    State previousState = mState;
653    if (previousState == STATE_PAUSED_STOPPING) {
654        mState = STATE_STOPPING;
655    } else {
656        mState = STATE_ACTIVE;
657    }
658    (void) updateAndGetPosition_l();
659
660    // save start timestamp
661    if (isOffloadedOrDirect_l()) {
662        if (getTimestamp_l(mStartTs) != OK) {
663            mStartTs.mPosition = 0;
664        }
665    } else {
666        if (getTimestamp_l(&mStartEts) != OK) {
667            mStartEts.clear();
668        }
669    }
670    mStartNs = systemTime(); // save this for timestamp adjustment after starting.
671    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
672        // reset current position as seen by client to 0
673        mPosition = 0;
674        mPreviousTimestampValid = false;
675        mTimestampStartupGlitchReported = false;
676        mRetrogradeMotionReported = false;
677        mPreviousLocation = ExtendedTimestamp::LOCATION_INVALID;
678
679        if (!isOffloadedOrDirect_l()
680                && mStartEts.mTimeNs[ExtendedTimestamp::LOCATION_SERVER] > 0) {
681            // Server side has consumed something, but is it finished consuming?
682            // It is possible since flush and stop are asynchronous that the server
683            // is still active at this point.
684            ALOGV("start: server read:%lld  cumulative flushed:%lld  client written:%lld",
685                    (long long)(mFramesWrittenServerOffset
686                            + mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER]),
687                    (long long)mStartEts.mFlushed,
688                    (long long)mFramesWritten);
689            // mStartEts is already adjusted by mFramesWrittenServerOffset, so we delta adjust.
690            mFramesWrittenServerOffset -= mStartEts.mPosition[ExtendedTimestamp::LOCATION_SERVER];
691        }
692        mFramesWritten = 0;
693        mProxy->clearTimestamp(); // need new server push for valid timestamp
694        mMarkerReached = false;
695
696        // For offloaded tracks, we don't know if the hardware counters are really zero here,
697        // since the flush is asynchronous and stop may not fully drain.
698        // We save the time when the track is started to later verify whether
699        // the counters are realistic (i.e. start from zero after this time).
700        mStartFromZeroUs = mStartNs / 1000;
701
702        // force refresh of remaining frames by processAudioBuffer() as last
703        // write before stop could be partial.
704        mRefreshRemaining = true;
705    }
706    mNewPosition = mPosition + mUpdatePeriod;
707    int32_t flags = android_atomic_and(~(CBLK_STREAM_END_DONE | CBLK_DISABLED), &mCblk->mFlags);
708
709    status_t status = NO_ERROR;
710    if (!(flags & CBLK_INVALID)) {
711        status = mAudioTrack->start();
712        if (status == DEAD_OBJECT) {
713            flags |= CBLK_INVALID;
714        }
715    }
716    if (flags & CBLK_INVALID) {
717        status = restoreTrack_l("start");
718    }
719
720    // resume or pause the callback thread as needed.
721    sp<AudioTrackThread> t = mAudioTrackThread;
722    if (status == NO_ERROR) {
723        if (t != 0) {
724            if (previousState == STATE_STOPPING) {
725                mProxy->interrupt();
726            } else {
727                t->resume();
728            }
729        } else {
730            mPreviousPriority = getpriority(PRIO_PROCESS, 0);
731            get_sched_policy(0, &mPreviousSchedulingGroup);
732            androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
733        }
734
735        // Start our local VolumeHandler for restoration purposes.
736        mVolumeHandler->setStarted();
737    } else {
738        ALOGE("start() status %d", status);
739        mState = previousState;
740        if (t != 0) {
741            if (previousState != STATE_STOPPING) {
742                t->pause();
743            }
744        } else {
745            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
746            set_sched_policy(0, mPreviousSchedulingGroup);
747        }
748    }
749
750    return status;
751}
752
753void AudioTrack::stop()
754{
755    AutoMutex lock(mLock);
756    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
757        return;
758    }
759
760    if (isOffloaded_l()) {
761        mState = STATE_STOPPING;
762    } else {
763        mState = STATE_STOPPED;
764        ALOGD_IF(mSharedBuffer == nullptr,
765                "stop() called with %u frames delivered", mReleased.value());
766        mReleased = 0;
767    }
768
769    mProxy->interrupt();
770    mAudioTrack->stop();
771
772    // Note: legacy handling - stop does not clear playback marker
773    // and periodic update counter, but flush does for streaming tracks.
774
775    if (mSharedBuffer != 0) {
776        // clear buffer position and loop count.
777        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
778                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
779    }
780
781    sp<AudioTrackThread> t = mAudioTrackThread;
782    if (t != 0) {
783        if (!isOffloaded_l()) {
784            t->pause();
785        }
786    } else {
787        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
788        set_sched_policy(0, mPreviousSchedulingGroup);
789    }
790}
791
792bool AudioTrack::stopped() const
793{
794    AutoMutex lock(mLock);
795    return mState != STATE_ACTIVE;
796}
797
798void AudioTrack::flush()
799{
800    if (mSharedBuffer != 0) {
801        return;
802    }
803    AutoMutex lock(mLock);
804    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
805        return;
806    }
807    flush_l();
808}
809
810void AudioTrack::flush_l()
811{
812    ALOG_ASSERT(mState != STATE_ACTIVE);
813
814    // clear playback marker and periodic update counter
815    mMarkerPosition = 0;
816    mMarkerReached = false;
817    mUpdatePeriod = 0;
818    mRefreshRemaining = true;
819
820    mState = STATE_FLUSHED;
821    mReleased = 0;
822    if (isOffloaded_l()) {
823        mProxy->interrupt();
824    }
825    mProxy->flush();
826    mAudioTrack->flush();
827}
828
829void AudioTrack::pause()
830{
831    AutoMutex lock(mLock);
832    if (mState == STATE_ACTIVE) {
833        mState = STATE_PAUSED;
834    } else if (mState == STATE_STOPPING) {
835        mState = STATE_PAUSED_STOPPING;
836    } else {
837        return;
838    }
839    mProxy->interrupt();
840    mAudioTrack->pause();
841
842    if (isOffloaded_l()) {
843        if (mOutput != AUDIO_IO_HANDLE_NONE) {
844            // An offload output can be re-used between two audio tracks having
845            // the same configuration. A timestamp query for a paused track
846            // while the other is running would return an incorrect time.
847            // To fix this, cache the playback position on a pause() and return
848            // this time when requested until the track is resumed.
849
850            // OffloadThread sends HAL pause in its threadLoop. Time saved
851            // here can be slightly off.
852
853            // TODO: check return code for getRenderPosition.
854
855            uint32_t halFrames;
856            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
857            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
858        }
859    }
860}
861
862status_t AudioTrack::setVolume(float left, float right)
863{
864    // This duplicates a test by AudioTrack JNI, but that is not the only caller
865    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
866            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
867        return BAD_VALUE;
868    }
869
870    AutoMutex lock(mLock);
871    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
872    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
873
874    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
875
876    if (isOffloaded_l()) {
877        mAudioTrack->signal();
878    }
879    return NO_ERROR;
880}
881
882status_t AudioTrack::setVolume(float volume)
883{
884    return setVolume(volume, volume);
885}
886
887status_t AudioTrack::setAuxEffectSendLevel(float level)
888{
889    // This duplicates a test by AudioTrack JNI, but that is not the only caller
890    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
891        return BAD_VALUE;
892    }
893
894    AutoMutex lock(mLock);
895    mSendLevel = level;
896    mProxy->setSendLevel(level);
897
898    return NO_ERROR;
899}
900
901void AudioTrack::getAuxEffectSendLevel(float* level) const
902{
903    if (level != NULL) {
904        *level = mSendLevel;
905    }
906}
907
908status_t AudioTrack::setSampleRate(uint32_t rate)
909{
910    AutoMutex lock(mLock);
911    if (rate == mSampleRate) {
912        return NO_ERROR;
913    }
914    if (isOffloadedOrDirect_l() || (mFlags & AUDIO_OUTPUT_FLAG_FAST)) {
915        return INVALID_OPERATION;
916    }
917    if (mOutput == AUDIO_IO_HANDLE_NONE) {
918        return NO_INIT;
919    }
920    // NOTE: it is theoretically possible, but highly unlikely, that a device change
921    // could mean a previously allowed sampling rate is no longer allowed.
922    uint32_t afSamplingRate;
923    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
924        return NO_INIT;
925    }
926    // pitch is emulated by adjusting speed and sampleRate
927    const uint32_t effectiveSampleRate = adjustSampleRate(rate, mPlaybackRate.mPitch);
928    if (rate == 0 || effectiveSampleRate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
929        return BAD_VALUE;
930    }
931    // TODO: Should we also check if the buffer size is compatible?
932
933    mSampleRate = rate;
934    mProxy->setSampleRate(effectiveSampleRate);
935
936    return NO_ERROR;
937}
938
939uint32_t AudioTrack::getSampleRate() const
940{
941    AutoMutex lock(mLock);
942
943    // sample rate can be updated during playback by the offloaded decoder so we need to
944    // query the HAL and update if needed.
945// FIXME use Proxy return channel to update the rate from server and avoid polling here
946    if (isOffloadedOrDirect_l()) {
947        if (mOutput != AUDIO_IO_HANDLE_NONE) {
948            uint32_t sampleRate = 0;
949            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
950            if (status == NO_ERROR) {
951                mSampleRate = sampleRate;
952            }
953        }
954    }
955    return mSampleRate;
956}
957
958uint32_t AudioTrack::getOriginalSampleRate() const
959{
960    return mOriginalSampleRate;
961}
962
963status_t AudioTrack::setPlaybackRate(const AudioPlaybackRate &playbackRate)
964{
965    AutoMutex lock(mLock);
966    if (isAudioPlaybackRateEqual(playbackRate, mPlaybackRate)) {
967        return NO_ERROR;
968    }
969    if (isOffloadedOrDirect_l()) {
970        return INVALID_OPERATION;
971    }
972    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
973        return INVALID_OPERATION;
974    }
975
976    ALOGV("setPlaybackRate (input): mSampleRate:%u  mSpeed:%f  mPitch:%f",
977            mSampleRate, playbackRate.mSpeed, playbackRate.mPitch);
978    // pitch is emulated by adjusting speed and sampleRate
979    const uint32_t effectiveRate = adjustSampleRate(mSampleRate, playbackRate.mPitch);
980    const float effectiveSpeed = adjustSpeed(playbackRate.mSpeed, playbackRate.mPitch);
981    const float effectivePitch = adjustPitch(playbackRate.mPitch);
982    AudioPlaybackRate playbackRateTemp = playbackRate;
983    playbackRateTemp.mSpeed = effectiveSpeed;
984    playbackRateTemp.mPitch = effectivePitch;
985
986    ALOGV("setPlaybackRate (effective): mSampleRate:%u  mSpeed:%f  mPitch:%f",
987            effectiveRate, effectiveSpeed, effectivePitch);
988
989    if (!isAudioPlaybackRateValid(playbackRateTemp)) {
990        ALOGW("setPlaybackRate(%f, %f) failed (effective rate out of bounds)",
991                playbackRate.mSpeed, playbackRate.mPitch);
992        return BAD_VALUE;
993    }
994    // Check if the buffer size is compatible.
995    if (!isSampleRateSpeedAllowed_l(effectiveRate, effectiveSpeed)) {
996        ALOGW("setPlaybackRate(%f, %f) failed (buffer size)",
997                playbackRate.mSpeed, playbackRate.mPitch);
998        return BAD_VALUE;
999    }
1000
1001    // Check resampler ratios are within bounds
1002    if ((uint64_t)effectiveRate > (uint64_t)mSampleRate *
1003            (uint64_t)AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
1004        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate exceeds max accepted value",
1005                playbackRate.mSpeed, playbackRate.mPitch);
1006        return BAD_VALUE;
1007    }
1008
1009    if ((uint64_t)effectiveRate * (uint64_t)AUDIO_RESAMPLER_UP_RATIO_MAX < (uint64_t)mSampleRate) {
1010        ALOGW("setPlaybackRate(%f, %f) failed. Resample rate below min accepted value",
1011                        playbackRate.mSpeed, playbackRate.mPitch);
1012        return BAD_VALUE;
1013    }
1014    mPlaybackRate = playbackRate;
1015    //set effective rates
1016    mProxy->setPlaybackRate(playbackRateTemp);
1017    mProxy->setSampleRate(effectiveRate); // FIXME: not quite "atomic" with setPlaybackRate
1018    return NO_ERROR;
1019}
1020
1021const AudioPlaybackRate& AudioTrack::getPlaybackRate() const
1022{
1023    AutoMutex lock(mLock);
1024    return mPlaybackRate;
1025}
1026
1027ssize_t AudioTrack::getBufferSizeInFrames()
1028{
1029    AutoMutex lock(mLock);
1030    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1031        return NO_INIT;
1032    }
1033    return (ssize_t) mProxy->getBufferSizeInFrames();
1034}
1035
1036status_t AudioTrack::getBufferDurationInUs(int64_t *duration)
1037{
1038    if (duration == nullptr) {
1039        return BAD_VALUE;
1040    }
1041    AutoMutex lock(mLock);
1042    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1043        return NO_INIT;
1044    }
1045    ssize_t bufferSizeInFrames = (ssize_t) mProxy->getBufferSizeInFrames();
1046    if (bufferSizeInFrames < 0) {
1047        return (status_t)bufferSizeInFrames;
1048    }
1049    *duration = (int64_t)((double)bufferSizeInFrames * 1000000
1050            / ((double)mSampleRate * mPlaybackRate.mSpeed));
1051    return NO_ERROR;
1052}
1053
1054ssize_t AudioTrack::setBufferSizeInFrames(size_t bufferSizeInFrames)
1055{
1056    AutoMutex lock(mLock);
1057    if (mOutput == AUDIO_IO_HANDLE_NONE || mProxy.get() == 0) {
1058        return NO_INIT;
1059    }
1060    // Reject if timed track or compressed audio.
1061    if (!audio_is_linear_pcm(mFormat)) {
1062        return INVALID_OPERATION;
1063    }
1064    return (ssize_t) mProxy->setBufferSizeInFrames((uint32_t) bufferSizeInFrames);
1065}
1066
1067status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1068{
1069    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1070        return INVALID_OPERATION;
1071    }
1072
1073    if (loopCount == 0) {
1074        ;
1075    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
1076            loopEnd - loopStart >= MIN_LOOP) {
1077        ;
1078    } else {
1079        return BAD_VALUE;
1080    }
1081
1082    AutoMutex lock(mLock);
1083    // See setPosition() regarding setting parameters such as loop points or position while active
1084    if (mState == STATE_ACTIVE) {
1085        return INVALID_OPERATION;
1086    }
1087    setLoop_l(loopStart, loopEnd, loopCount);
1088    return NO_ERROR;
1089}
1090
1091void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
1092{
1093    // We do not update the periodic notification point.
1094    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1095    mLoopCount = loopCount;
1096    mLoopEnd = loopEnd;
1097    mLoopStart = loopStart;
1098    mLoopCountNotified = loopCount;
1099    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
1100
1101    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1102}
1103
1104status_t AudioTrack::setMarkerPosition(uint32_t marker)
1105{
1106    // The only purpose of setting marker position is to get a callback
1107    if (mCbf == NULL || isOffloadedOrDirect()) {
1108        return INVALID_OPERATION;
1109    }
1110
1111    AutoMutex lock(mLock);
1112    mMarkerPosition = marker;
1113    mMarkerReached = false;
1114
1115    sp<AudioTrackThread> t = mAudioTrackThread;
1116    if (t != 0) {
1117        t->wake();
1118    }
1119    return NO_ERROR;
1120}
1121
1122status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
1123{
1124    if (isOffloadedOrDirect()) {
1125        return INVALID_OPERATION;
1126    }
1127    if (marker == NULL) {
1128        return BAD_VALUE;
1129    }
1130
1131    AutoMutex lock(mLock);
1132    mMarkerPosition.getValue(marker);
1133
1134    return NO_ERROR;
1135}
1136
1137status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
1138{
1139    // The only purpose of setting position update period is to get a callback
1140    if (mCbf == NULL || isOffloadedOrDirect()) {
1141        return INVALID_OPERATION;
1142    }
1143
1144    AutoMutex lock(mLock);
1145    mNewPosition = updateAndGetPosition_l() + updatePeriod;
1146    mUpdatePeriod = updatePeriod;
1147
1148    sp<AudioTrackThread> t = mAudioTrackThread;
1149    if (t != 0) {
1150        t->wake();
1151    }
1152    return NO_ERROR;
1153}
1154
1155status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
1156{
1157    if (isOffloadedOrDirect()) {
1158        return INVALID_OPERATION;
1159    }
1160    if (updatePeriod == NULL) {
1161        return BAD_VALUE;
1162    }
1163
1164    AutoMutex lock(mLock);
1165    *updatePeriod = mUpdatePeriod;
1166
1167    return NO_ERROR;
1168}
1169
1170status_t AudioTrack::setPosition(uint32_t position)
1171{
1172    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1173        return INVALID_OPERATION;
1174    }
1175    if (position > mFrameCount) {
1176        return BAD_VALUE;
1177    }
1178
1179    AutoMutex lock(mLock);
1180    // Currently we require that the player is inactive before setting parameters such as position
1181    // or loop points.  Otherwise, there could be a race condition: the application could read the
1182    // current position, compute a new position or loop parameters, and then set that position or
1183    // loop parameters but it would do the "wrong" thing since the position has continued to advance
1184    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
1185    // to specify how it wants to handle such scenarios.
1186    if (mState == STATE_ACTIVE) {
1187        return INVALID_OPERATION;
1188    }
1189    // After setting the position, use full update period before notification.
1190    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
1191    mStaticProxy->setBufferPosition(position);
1192
1193    // Waking the AudioTrackThread is not needed as this cannot be called when active.
1194    return NO_ERROR;
1195}
1196
1197status_t AudioTrack::getPosition(uint32_t *position)
1198{
1199    if (position == NULL) {
1200        return BAD_VALUE;
1201    }
1202
1203    AutoMutex lock(mLock);
1204    // FIXME: offloaded and direct tracks call into the HAL for render positions
1205    // for compressed/synced data; however, we use proxy position for pure linear pcm data
1206    // as we do not know the capability of the HAL for pcm position support and standby.
1207    // There may be some latency differences between the HAL position and the proxy position.
1208    if (isOffloadedOrDirect_l() && !isPurePcmData_l()) {
1209        uint32_t dspFrames = 0;
1210
1211        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
1212            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
1213            *position = mPausedPosition;
1214            return NO_ERROR;
1215        }
1216
1217        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1218            uint32_t halFrames; // actually unused
1219            (void) AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
1220            // FIXME: on getRenderPosition() error, we return OK with frame position 0.
1221        }
1222        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
1223        // due to hardware latency. We leave this behavior for now.
1224        *position = dspFrames;
1225    } else {
1226        if (mCblk->mFlags & CBLK_INVALID) {
1227            (void) restoreTrack_l("getPosition");
1228            // FIXME: for compatibility with the Java API we ignore the restoreTrack_l()
1229            // error here (e.g. DEAD_OBJECT) and return OK with the last recorded server position.
1230        }
1231
1232        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
1233        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
1234                0 : updateAndGetPosition_l().value();
1235    }
1236    return NO_ERROR;
1237}
1238
1239status_t AudioTrack::getBufferPosition(uint32_t *position)
1240{
1241    if (mSharedBuffer == 0) {
1242        return INVALID_OPERATION;
1243    }
1244    if (position == NULL) {
1245        return BAD_VALUE;
1246    }
1247
1248    AutoMutex lock(mLock);
1249    *position = mStaticProxy->getBufferPosition();
1250    return NO_ERROR;
1251}
1252
1253status_t AudioTrack::reload()
1254{
1255    if (mSharedBuffer == 0 || isOffloadedOrDirect()) {
1256        return INVALID_OPERATION;
1257    }
1258
1259    AutoMutex lock(mLock);
1260    // See setPosition() regarding setting parameters such as loop points or position while active
1261    if (mState == STATE_ACTIVE) {
1262        return INVALID_OPERATION;
1263    }
1264    mNewPosition = mUpdatePeriod;
1265    (void) updateAndGetPosition_l();
1266    mPosition = 0;
1267    mPreviousTimestampValid = false;
1268#if 0
1269    // The documentation is not clear on the behavior of reload() and the restoration
1270    // of loop count. Historically we have not restored loop count, start, end,
1271    // but it makes sense if one desires to repeat playing a particular sound.
1272    if (mLoopCount != 0) {
1273        mLoopCountNotified = mLoopCount;
1274        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
1275    }
1276#endif
1277    mStaticProxy->setBufferPosition(0);
1278    return NO_ERROR;
1279}
1280
1281audio_io_handle_t AudioTrack::getOutput() const
1282{
1283    AutoMutex lock(mLock);
1284    return mOutput;
1285}
1286
1287status_t AudioTrack::setOutputDevice(audio_port_handle_t deviceId) {
1288    AutoMutex lock(mLock);
1289    if (mSelectedDeviceId != deviceId) {
1290        mSelectedDeviceId = deviceId;
1291        if (mStatus == NO_ERROR) {
1292            android_atomic_or(CBLK_INVALID, &mCblk->mFlags);
1293            mProxy->interrupt();
1294        }
1295    }
1296    return NO_ERROR;
1297}
1298
1299audio_port_handle_t AudioTrack::getOutputDevice() {
1300    AutoMutex lock(mLock);
1301    return mSelectedDeviceId;
1302}
1303
1304// must be called with mLock held
1305void AudioTrack::updateRoutedDeviceId_l()
1306{
1307    // if the track is inactive, do not update actual device as the output stream maybe routed
1308    // to a device not relevant to this client because of other active use cases.
1309    if (mState != STATE_ACTIVE) {
1310        return;
1311    }
1312    if (mOutput != AUDIO_IO_HANDLE_NONE) {
1313        audio_port_handle_t deviceId = AudioSystem::getDeviceIdForIo(mOutput);
1314        if (deviceId != AUDIO_PORT_HANDLE_NONE) {
1315            mRoutedDeviceId = deviceId;
1316        }
1317    }
1318}
1319
1320audio_port_handle_t AudioTrack::getRoutedDeviceId() {
1321    AutoMutex lock(mLock);
1322    updateRoutedDeviceId_l();
1323    return mRoutedDeviceId;
1324}
1325
1326status_t AudioTrack::attachAuxEffect(int effectId)
1327{
1328    AutoMutex lock(mLock);
1329    status_t status = mAudioTrack->attachAuxEffect(effectId);
1330    if (status == NO_ERROR) {
1331        mAuxEffectId = effectId;
1332    }
1333    return status;
1334}
1335
1336audio_stream_type_t AudioTrack::streamType() const
1337{
1338    if (mStreamType == AUDIO_STREAM_DEFAULT) {
1339        return audio_attributes_to_stream_type(&mAttributes);
1340    }
1341    return mStreamType;
1342}
1343
1344uint32_t AudioTrack::latency()
1345{
1346    AutoMutex lock(mLock);
1347    updateLatency_l();
1348    return mLatency;
1349}
1350
1351// -------------------------------------------------------------------------
1352
1353// must be called with mLock held
1354void AudioTrack::updateLatency_l()
1355{
1356    status_t status = AudioSystem::getLatency(mOutput, &mAfLatency);
1357    if (status != NO_ERROR) {
1358        ALOGW("getLatency(%d) failed status %d", mOutput, status);
1359    } else {
1360        // FIXME don't believe this lie
1361        mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1362    }
1363}
1364
1365// TODO Move this macro to a common header file for enum to string conversion in audio framework.
1366#define MEDIA_CASE_ENUM(name) case name: return #name
1367const char * AudioTrack::convertTransferToText(transfer_type transferType) {
1368    switch (transferType) {
1369        MEDIA_CASE_ENUM(TRANSFER_DEFAULT);
1370        MEDIA_CASE_ENUM(TRANSFER_CALLBACK);
1371        MEDIA_CASE_ENUM(TRANSFER_OBTAIN);
1372        MEDIA_CASE_ENUM(TRANSFER_SYNC);
1373        MEDIA_CASE_ENUM(TRANSFER_SHARED);
1374        default:
1375            return "UNRECOGNIZED";
1376    }
1377}
1378
1379status_t AudioTrack::createTrack_l()
1380{
1381    status_t status;
1382    bool callbackAdded = false;
1383
1384    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
1385    if (audioFlinger == 0) {
1386        ALOGE("Could not get audioflinger");
1387        status = NO_INIT;
1388        goto exit;
1389    }
1390
1391    {
1392    // mFlags (not mOrigFlags) is modified depending on whether fast request is accepted.
1393    // After fast request is denied, we will request again if IAudioTrack is re-created.
1394    // Client can only express a preference for FAST.  Server will perform additional tests.
1395    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1396        // either of these use cases:
1397        // use case 1: shared buffer
1398        bool sharedBuffer = mSharedBuffer != 0;
1399        bool transferAllowed =
1400            // use case 2: callback transfer mode
1401            (mTransfer == TRANSFER_CALLBACK) ||
1402            // use case 3: obtain/release mode
1403            (mTransfer == TRANSFER_OBTAIN) ||
1404            // use case 4: synchronous write
1405            ((mTransfer == TRANSFER_SYNC) && mThreadCanCallJava);
1406
1407        bool fastAllowed = sharedBuffer || transferAllowed;
1408        if (!fastAllowed) {
1409            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client, not shared buffer and transfer = %s",
1410                  convertTransferToText(mTransfer));
1411            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1412        }
1413    }
1414
1415    IAudioFlinger::CreateTrackInput input;
1416    if (mStreamType != AUDIO_STREAM_DEFAULT) {
1417        stream_type_to_audio_attributes(mStreamType, &input.attr);
1418    } else {
1419        input.attr = mAttributes;
1420    }
1421    input.config = AUDIO_CONFIG_INITIALIZER;
1422    input.config.sample_rate = mSampleRate;
1423    input.config.channel_mask = mChannelMask;
1424    input.config.format = mFormat;
1425    input.config.offload_info = mOffloadInfoCopy;
1426    input.clientInfo.clientUid = mClientUid;
1427    input.clientInfo.clientPid = mClientPid;
1428    input.clientInfo.clientTid = -1;
1429    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1430        // It is currently meaningless to request SCHED_FIFO for a Java thread.  Even if the
1431        // application-level code follows all non-blocking design rules, the language runtime
1432        // doesn't also follow those rules, so the thread will not benefit overall.
1433        if (mAudioTrackThread != 0 && !mThreadCanCallJava) {
1434            input.clientInfo.clientTid = mAudioTrackThread->getTid();
1435        }
1436    }
1437    input.sharedBuffer = mSharedBuffer;
1438    input.notificationsPerBuffer = mNotificationsPerBufferReq;
1439    input.speed = 1.0;
1440    if (audio_has_proportional_frames(mFormat) && mSharedBuffer == 0 &&
1441            (mFlags & AUDIO_OUTPUT_FLAG_FAST) == 0) {
1442        input.speed  = !isPurePcmData_l() || isOffloadedOrDirect_l() ? 1.0f :
1443                        max(mMaxRequiredSpeed, mPlaybackRate.mSpeed);
1444    }
1445    input.flags = mFlags;
1446    input.frameCount = mReqFrameCount;
1447    input.notificationFrameCount = mNotificationFramesReq;
1448    input.selectedDeviceId = mSelectedDeviceId;
1449    input.sessionId = mSessionId;
1450
1451    IAudioFlinger::CreateTrackOutput output;
1452
1453    sp<IAudioTrack> track = audioFlinger->createTrack(input,
1454                                                      output,
1455                                                      &status);
1456
1457    if (status != NO_ERROR || output.outputId == AUDIO_IO_HANDLE_NONE) {
1458        ALOGE("AudioFlinger could not create track, status: %d output %d", status, output.outputId);
1459        if (status == NO_ERROR) {
1460            status = NO_INIT;
1461        }
1462        goto exit;
1463    }
1464    ALOG_ASSERT(track != 0);
1465
1466    mFrameCount = output.frameCount;
1467    mNotificationFramesAct = (uint32_t)output.notificationFrameCount;
1468    mRoutedDeviceId = output.selectedDeviceId;
1469    mSessionId = output.sessionId;
1470
1471    mSampleRate = output.sampleRate;
1472    if (mOriginalSampleRate == 0) {
1473        mOriginalSampleRate = mSampleRate;
1474    }
1475
1476    mAfFrameCount = output.afFrameCount;
1477    mAfSampleRate = output.afSampleRate;
1478    mAfLatency = output.afLatencyMs;
1479
1480    mLatency = mAfLatency + (1000LL * mFrameCount) / mSampleRate;
1481
1482    // AudioFlinger now owns the reference to the I/O handle,
1483    // so we are no longer responsible for releasing it.
1484
1485    // FIXME compare to AudioRecord
1486    sp<IMemory> iMem = track->getCblk();
1487    if (iMem == 0) {
1488        ALOGE("Could not get control block");
1489        status = NO_INIT;
1490        goto exit;
1491    }
1492    void *iMemPointer = iMem->pointer();
1493    if (iMemPointer == NULL) {
1494        ALOGE("Could not get control block pointer");
1495        status = NO_INIT;
1496        goto exit;
1497    }
1498    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1499    if (mAudioTrack != 0) {
1500        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1501        mDeathNotifier.clear();
1502    }
1503    mAudioTrack = track;
1504    mCblkMemory = iMem;
1505    IPCThreadState::self()->flushCommands();
1506
1507    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1508    mCblk = cblk;
1509
1510    mAwaitBoost = false;
1511    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1512        if (output.flags & AUDIO_OUTPUT_FLAG_FAST) {
1513            ALOGI("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu -> %zu",
1514                  mReqFrameCount, mFrameCount);
1515            if (!mThreadCanCallJava) {
1516                mAwaitBoost = true;
1517            }
1518        } else {
1519            ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu -> %zu", mReqFrameCount,
1520                  mFrameCount);
1521        }
1522    }
1523    mFlags = output.flags;
1524
1525    //mOutput != output includes the case where mOutput == AUDIO_IO_HANDLE_NONE for first creation
1526    if (mDeviceCallback != 0 && mOutput != output.outputId) {
1527        if (mOutput != AUDIO_IO_HANDLE_NONE) {
1528            AudioSystem::removeAudioDeviceCallback(this, mOutput);
1529        }
1530        AudioSystem::addAudioDeviceCallback(this, output.outputId);
1531        callbackAdded = true;
1532    }
1533
1534    // We retain a copy of the I/O handle, but don't own the reference
1535    mOutput = output.outputId;
1536    mRefreshRemaining = true;
1537
1538    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1539    // is the value of pointer() for the shared buffer, otherwise buffers points
1540    // immediately after the control block.  This address is for the mapping within client
1541    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1542    void* buffers;
1543    if (mSharedBuffer == 0) {
1544        buffers = cblk + 1;
1545    } else {
1546        buffers = mSharedBuffer->pointer();
1547        if (buffers == NULL) {
1548            ALOGE("Could not get buffer pointer");
1549            status = NO_INIT;
1550            goto exit;
1551        }
1552    }
1553
1554    mAudioTrack->attachAuxEffect(mAuxEffectId);
1555
1556    // If IAudioTrack is re-created, don't let the requested frameCount
1557    // decrease.  This can confuse clients that cache frameCount().
1558    if (mFrameCount > mReqFrameCount) {
1559        mReqFrameCount = mFrameCount;
1560    }
1561
1562    // reset server position to 0 as we have new cblk.
1563    mServer = 0;
1564
1565    // update proxy
1566    if (mSharedBuffer == 0) {
1567        mStaticProxy.clear();
1568        mProxy = new AudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1569    } else {
1570        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, mFrameCount, mFrameSize);
1571        mProxy = mStaticProxy;
1572    }
1573
1574    mProxy->setVolumeLR(gain_minifloat_pack(
1575            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1576            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1577
1578    mProxy->setSendLevel(mSendLevel);
1579    const uint32_t effectiveSampleRate = adjustSampleRate(mSampleRate, mPlaybackRate.mPitch);
1580    const float effectiveSpeed = adjustSpeed(mPlaybackRate.mSpeed, mPlaybackRate.mPitch);
1581    const float effectivePitch = adjustPitch(mPlaybackRate.mPitch);
1582    mProxy->setSampleRate(effectiveSampleRate);
1583
1584    AudioPlaybackRate playbackRateTemp = mPlaybackRate;
1585    playbackRateTemp.mSpeed = effectiveSpeed;
1586    playbackRateTemp.mPitch = effectivePitch;
1587    mProxy->setPlaybackRate(playbackRateTemp);
1588    mProxy->setMinimum(mNotificationFramesAct);
1589
1590    mDeathNotifier = new DeathNotifier(this);
1591    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1592
1593    }
1594
1595exit:
1596    if (status != NO_ERROR && callbackAdded) {
1597        // note: mOutput is always valid is callbackAdded is true
1598        AudioSystem::removeAudioDeviceCallback(this, mOutput);
1599    }
1600
1601    mStatus = status;
1602
1603    // sp<IAudioTrack> track destructor will cause releaseOutput() to be called by AudioFlinger
1604    return status;
1605}
1606
1607status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1608{
1609    if (audioBuffer == NULL) {
1610        if (nonContig != NULL) {
1611            *nonContig = 0;
1612        }
1613        return BAD_VALUE;
1614    }
1615    if (mTransfer != TRANSFER_OBTAIN) {
1616        audioBuffer->frameCount = 0;
1617        audioBuffer->size = 0;
1618        audioBuffer->raw = NULL;
1619        if (nonContig != NULL) {
1620            *nonContig = 0;
1621        }
1622        return INVALID_OPERATION;
1623    }
1624
1625    const struct timespec *requested;
1626    struct timespec timeout;
1627    if (waitCount == -1) {
1628        requested = &ClientProxy::kForever;
1629    } else if (waitCount == 0) {
1630        requested = &ClientProxy::kNonBlocking;
1631    } else if (waitCount > 0) {
1632        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1633        timeout.tv_sec = ms / 1000;
1634        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1635        requested = &timeout;
1636    } else {
1637        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1638        requested = NULL;
1639    }
1640    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1641}
1642
1643status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1644        struct timespec *elapsed, size_t *nonContig)
1645{
1646    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1647    uint32_t oldSequence = 0;
1648    uint32_t newSequence;
1649
1650    Proxy::Buffer buffer;
1651    status_t status = NO_ERROR;
1652
1653    static const int32_t kMaxTries = 5;
1654    int32_t tryCounter = kMaxTries;
1655
1656    do {
1657        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1658        // keep them from going away if another thread re-creates the track during obtainBuffer()
1659        sp<AudioTrackClientProxy> proxy;
1660        sp<IMemory> iMem;
1661
1662        {   // start of lock scope
1663            AutoMutex lock(mLock);
1664
1665            newSequence = mSequence;
1666            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1667            if (status == DEAD_OBJECT) {
1668                // re-create track, unless someone else has already done so
1669                if (newSequence == oldSequence) {
1670                    status = restoreTrack_l("obtainBuffer");
1671                    if (status != NO_ERROR) {
1672                        buffer.mFrameCount = 0;
1673                        buffer.mRaw = NULL;
1674                        buffer.mNonContig = 0;
1675                        break;
1676                    }
1677                }
1678            }
1679            oldSequence = newSequence;
1680
1681            if (status == NOT_ENOUGH_DATA) {
1682                restartIfDisabled();
1683            }
1684
1685            // Keep the extra references
1686            proxy = mProxy;
1687            iMem = mCblkMemory;
1688
1689            if (mState == STATE_STOPPING) {
1690                status = -EINTR;
1691                buffer.mFrameCount = 0;
1692                buffer.mRaw = NULL;
1693                buffer.mNonContig = 0;
1694                break;
1695            }
1696
1697            // Non-blocking if track is stopped or paused
1698            if (mState != STATE_ACTIVE) {
1699                requested = &ClientProxy::kNonBlocking;
1700            }
1701
1702        }   // end of lock scope
1703
1704        buffer.mFrameCount = audioBuffer->frameCount;
1705        // FIXME starts the requested timeout and elapsed over from scratch
1706        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1707    } while (((status == DEAD_OBJECT) || (status == NOT_ENOUGH_DATA)) && (tryCounter-- > 0));
1708
1709    audioBuffer->frameCount = buffer.mFrameCount;
1710    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1711    audioBuffer->raw = buffer.mRaw;
1712    if (nonContig != NULL) {
1713        *nonContig = buffer.mNonContig;
1714    }
1715    return status;
1716}
1717
1718void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1719{
1720    // FIXME add error checking on mode, by adding an internal version
1721    if (mTransfer == TRANSFER_SHARED) {
1722        return;
1723    }
1724
1725    size_t stepCount = audioBuffer->size / mFrameSize;
1726    if (stepCount == 0) {
1727        return;
1728    }
1729
1730    Proxy::Buffer buffer;
1731    buffer.mFrameCount = stepCount;
1732    buffer.mRaw = audioBuffer->raw;
1733
1734    AutoMutex lock(mLock);
1735    mReleased += stepCount;
1736    mInUnderrun = false;
1737    mProxy->releaseBuffer(&buffer);
1738
1739    // restart track if it was disabled by audioflinger due to previous underrun
1740    restartIfDisabled();
1741}
1742
1743void AudioTrack::restartIfDisabled()
1744{
1745    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
1746    if ((mState == STATE_ACTIVE) && (flags & CBLK_DISABLED)) {
1747        ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1748        // FIXME ignoring status
1749        mAudioTrack->start();
1750    }
1751}
1752
1753// -------------------------------------------------------------------------
1754
1755ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1756{
1757    if (mTransfer != TRANSFER_SYNC) {
1758        return INVALID_OPERATION;
1759    }
1760
1761    if (isDirect()) {
1762        AutoMutex lock(mLock);
1763        int32_t flags = android_atomic_and(
1764                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1765                            &mCblk->mFlags);
1766        if (flags & CBLK_INVALID) {
1767            return DEAD_OBJECT;
1768        }
1769    }
1770
1771    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1772        // Sanity-check: user is most-likely passing an error code, and it would
1773        // make the return value ambiguous (actualSize vs error).
1774        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1775        return BAD_VALUE;
1776    }
1777
1778    size_t written = 0;
1779    Buffer audioBuffer;
1780
1781    while (userSize >= mFrameSize) {
1782        audioBuffer.frameCount = userSize / mFrameSize;
1783
1784        status_t err = obtainBuffer(&audioBuffer,
1785                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1786        if (err < 0) {
1787            if (written > 0) {
1788                break;
1789            }
1790            if (err == TIMED_OUT || err == -EINTR) {
1791                err = WOULD_BLOCK;
1792            }
1793            return ssize_t(err);
1794        }
1795
1796        size_t toWrite = audioBuffer.size;
1797        memcpy(audioBuffer.i8, buffer, toWrite);
1798        buffer = ((const char *) buffer) + toWrite;
1799        userSize -= toWrite;
1800        written += toWrite;
1801
1802        releaseBuffer(&audioBuffer);
1803    }
1804
1805    if (written > 0) {
1806        mFramesWritten += written / mFrameSize;
1807    }
1808    return written;
1809}
1810
1811// -------------------------------------------------------------------------
1812
1813nsecs_t AudioTrack::processAudioBuffer()
1814{
1815    // Currently the AudioTrack thread is not created if there are no callbacks.
1816    // Would it ever make sense to run the thread, even without callbacks?
1817    // If so, then replace this by checks at each use for mCbf != NULL.
1818    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1819
1820    mLock.lock();
1821    if (mAwaitBoost) {
1822        mAwaitBoost = false;
1823        mLock.unlock();
1824        static const int32_t kMaxTries = 5;
1825        int32_t tryCounter = kMaxTries;
1826        uint32_t pollUs = 10000;
1827        do {
1828            int policy = sched_getscheduler(0) & ~SCHED_RESET_ON_FORK;
1829            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1830                break;
1831            }
1832            usleep(pollUs);
1833            pollUs <<= 1;
1834        } while (tryCounter-- > 0);
1835        if (tryCounter < 0) {
1836            ALOGE("did not receive expected priority boost on time");
1837        }
1838        // Run again immediately
1839        return 0;
1840    }
1841
1842    // Can only reference mCblk while locked
1843    int32_t flags = android_atomic_and(
1844        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1845
1846    // Check for track invalidation
1847    if (flags & CBLK_INVALID) {
1848        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1849        // AudioSystem cache. We should not exit here but after calling the callback so
1850        // that the upper layers can recreate the track
1851        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1852            status_t status __unused = restoreTrack_l("processAudioBuffer");
1853            // FIXME unused status
1854            // after restoration, continue below to make sure that the loop and buffer events
1855            // are notified because they have been cleared from mCblk->mFlags above.
1856        }
1857    }
1858
1859    bool waitStreamEnd = mState == STATE_STOPPING;
1860    bool active = mState == STATE_ACTIVE;
1861
1862    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1863    bool newUnderrun = false;
1864    if (flags & CBLK_UNDERRUN) {
1865#if 0
1866        // Currently in shared buffer mode, when the server reaches the end of buffer,
1867        // the track stays active in continuous underrun state.  It's up to the application
1868        // to pause or stop the track, or set the position to a new offset within buffer.
1869        // This was some experimental code to auto-pause on underrun.   Keeping it here
1870        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1871        if (mTransfer == TRANSFER_SHARED) {
1872            mState = STATE_PAUSED;
1873            active = false;
1874        }
1875#endif
1876        if (!mInUnderrun) {
1877            mInUnderrun = true;
1878            newUnderrun = true;
1879        }
1880    }
1881
1882    // Get current position of server
1883    Modulo<uint32_t> position(updateAndGetPosition_l());
1884
1885    // Manage marker callback
1886    bool markerReached = false;
1887    Modulo<uint32_t> markerPosition(mMarkerPosition);
1888    // uses 32 bit wraparound for comparison with position.
1889    if (!mMarkerReached && markerPosition.value() > 0 && position >= markerPosition) {
1890        mMarkerReached = markerReached = true;
1891    }
1892
1893    // Determine number of new position callback(s) that will be needed, while locked
1894    size_t newPosCount = 0;
1895    Modulo<uint32_t> newPosition(mNewPosition);
1896    uint32_t updatePeriod = mUpdatePeriod;
1897    // FIXME fails for wraparound, need 64 bits
1898    if (updatePeriod > 0 && position >= newPosition) {
1899        newPosCount = ((position - newPosition).value() / updatePeriod) + 1;
1900        mNewPosition += updatePeriod * newPosCount;
1901    }
1902
1903    // Cache other fields that will be needed soon
1904    uint32_t sampleRate = mSampleRate;
1905    float speed = mPlaybackRate.mSpeed;
1906    const uint32_t notificationFrames = mNotificationFramesAct;
1907    if (mRefreshRemaining) {
1908        mRefreshRemaining = false;
1909        mRemainingFrames = notificationFrames;
1910        mRetryOnPartialBuffer = false;
1911    }
1912    size_t misalignment = mProxy->getMisalignment();
1913    uint32_t sequence = mSequence;
1914    sp<AudioTrackClientProxy> proxy = mProxy;
1915
1916    // Determine the number of new loop callback(s) that will be needed, while locked.
1917    int loopCountNotifications = 0;
1918    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1919
1920    if (mLoopCount > 0) {
1921        int loopCount;
1922        size_t bufferPosition;
1923        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1924        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1925        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1926        mLoopCountNotified = loopCount; // discard any excess notifications
1927    } else if (mLoopCount < 0) {
1928        // FIXME: We're not accurate with notification count and position with infinite looping
1929        // since loopCount from server side will always return -1 (we could decrement it).
1930        size_t bufferPosition = mStaticProxy->getBufferPosition();
1931        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1932        loopPeriod = mLoopEnd - bufferPosition;
1933    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1934        size_t bufferPosition = mStaticProxy->getBufferPosition();
1935        loopPeriod = mFrameCount - bufferPosition;
1936    }
1937
1938    // These fields don't need to be cached, because they are assigned only by set():
1939    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1940    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1941
1942    mLock.unlock();
1943
1944    // get anchor time to account for callbacks.
1945    const nsecs_t timeBeforeCallbacks = systemTime();
1946
1947    if (waitStreamEnd) {
1948        // FIXME:  Instead of blocking in proxy->waitStreamEndDone(), Callback thread
1949        // should wait on proxy futex and handle CBLK_STREAM_END_DONE within this function
1950        // (and make sure we don't callback for more data while we're stopping).
1951        // This helps with position, marker notifications, and track invalidation.
1952        struct timespec timeout;
1953        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1954        timeout.tv_nsec = 0;
1955
1956        status_t status = proxy->waitStreamEndDone(&timeout);
1957        switch (status) {
1958        case NO_ERROR:
1959        case DEAD_OBJECT:
1960        case TIMED_OUT:
1961            if (status != DEAD_OBJECT) {
1962                // for DEAD_OBJECT, we do not send a EVENT_STREAM_END after stop();
1963                // instead, the application should handle the EVENT_NEW_IAUDIOTRACK.
1964                mCbf(EVENT_STREAM_END, mUserData, NULL);
1965            }
1966            {
1967                AutoMutex lock(mLock);
1968                // The previously assigned value of waitStreamEnd is no longer valid,
1969                // since the mutex has been unlocked and either the callback handler
1970                // or another thread could have re-started the AudioTrack during that time.
1971                waitStreamEnd = mState == STATE_STOPPING;
1972                if (waitStreamEnd) {
1973                    mState = STATE_STOPPED;
1974                    mReleased = 0;
1975                }
1976            }
1977            if (waitStreamEnd && status != DEAD_OBJECT) {
1978               return NS_INACTIVE;
1979            }
1980            break;
1981        }
1982        return 0;
1983    }
1984
1985    // perform callbacks while unlocked
1986    if (newUnderrun) {
1987        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1988    }
1989    while (loopCountNotifications > 0) {
1990        mCbf(EVENT_LOOP_END, mUserData, NULL);
1991        --loopCountNotifications;
1992    }
1993    if (flags & CBLK_BUFFER_END) {
1994        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1995    }
1996    if (markerReached) {
1997        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1998    }
1999    while (newPosCount > 0) {
2000        size_t temp = newPosition.value(); // FIXME size_t != uint32_t
2001        mCbf(EVENT_NEW_POS, mUserData, &temp);
2002        newPosition += updatePeriod;
2003        newPosCount--;
2004    }
2005
2006    if (mObservedSequence != sequence) {
2007        mObservedSequence = sequence;
2008        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
2009        // for offloaded tracks, just wait for the upper layers to recreate the track
2010        if (isOffloadedOrDirect()) {
2011            return NS_INACTIVE;
2012        }
2013    }
2014
2015    // if inactive, then don't run me again until re-started
2016    if (!active) {
2017        return NS_INACTIVE;
2018    }
2019
2020    // Compute the estimated time until the next timed event (position, markers, loops)
2021    // FIXME only for non-compressed audio
2022    uint32_t minFrames = ~0;
2023    if (!markerReached && position < markerPosition) {
2024        minFrames = (markerPosition - position).value();
2025    }
2026    if (loopPeriod > 0 && loopPeriod < minFrames) {
2027        // loopPeriod is already adjusted for actual position.
2028        minFrames = loopPeriod;
2029    }
2030    if (updatePeriod > 0) {
2031        minFrames = min(minFrames, (newPosition - position).value());
2032    }
2033
2034    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
2035    static const uint32_t kPoll = 0;
2036    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
2037        minFrames = kPoll * notificationFrames;
2038    }
2039
2040    // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
2041    static const nsecs_t kWaitPeriodNs = WAIT_PERIOD_MS * 1000000LL;
2042    const nsecs_t timeAfterCallbacks = systemTime();
2043
2044    // Convert frame units to time units
2045    nsecs_t ns = NS_WHENEVER;
2046    if (minFrames != (uint32_t) ~0) {
2047        // AudioFlinger consumption of client data may be irregular when coming out of device
2048        // standby since the kernel buffers require filling. This is throttled to no more than 2x
2049        // the expected rate in the MixerThread. Hence, we reduce the estimated time to wait by one
2050        // half (but no more than half a second) to improve callback accuracy during these temporary
2051        // data surges.
2052        const nsecs_t estimatedNs = framesToNanoseconds(minFrames, sampleRate, speed);
2053        constexpr nsecs_t maxThrottleCompensationNs = 500000000LL;
2054        ns = estimatedNs - min(estimatedNs / 2, maxThrottleCompensationNs) + kWaitPeriodNs;
2055        ns -= (timeAfterCallbacks - timeBeforeCallbacks);  // account for callback time
2056        // TODO: Should we warn if the callback time is too long?
2057        if (ns < 0) ns = 0;
2058    }
2059
2060    // If not supplying data by EVENT_MORE_DATA, then we're done
2061    if (mTransfer != TRANSFER_CALLBACK) {
2062        return ns;
2063    }
2064
2065    // EVENT_MORE_DATA callback handling.
2066    // Timing for linear pcm audio data formats can be derived directly from the
2067    // buffer fill level.
2068    // Timing for compressed data is not directly available from the buffer fill level,
2069    // rather indirectly from waiting for blocking mode callbacks or waiting for obtain()
2070    // to return a certain fill level.
2071
2072    struct timespec timeout;
2073    const struct timespec *requested = &ClientProxy::kForever;
2074    if (ns != NS_WHENEVER) {
2075        timeout.tv_sec = ns / 1000000000LL;
2076        timeout.tv_nsec = ns % 1000000000LL;
2077        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
2078        requested = &timeout;
2079    }
2080
2081    size_t writtenFrames = 0;
2082    while (mRemainingFrames > 0) {
2083
2084        Buffer audioBuffer;
2085        audioBuffer.frameCount = mRemainingFrames;
2086        size_t nonContig;
2087        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
2088        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
2089                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
2090        requested = &ClientProxy::kNonBlocking;
2091        size_t avail = audioBuffer.frameCount + nonContig;
2092        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
2093                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
2094        if (err != NO_ERROR) {
2095            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
2096                    (isOffloaded() && (err == DEAD_OBJECT))) {
2097                // FIXME bug 25195759
2098                return 1000000;
2099            }
2100            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
2101            return NS_NEVER;
2102        }
2103
2104        if (mRetryOnPartialBuffer && audio_has_proportional_frames(mFormat)) {
2105            mRetryOnPartialBuffer = false;
2106            if (avail < mRemainingFrames) {
2107                if (ns > 0) { // account for obtain time
2108                    const nsecs_t timeNow = systemTime();
2109                    ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2110                }
2111                nsecs_t myns = framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2112                if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2113                    ns = myns;
2114                }
2115                return ns;
2116            }
2117        }
2118
2119        size_t reqSize = audioBuffer.size;
2120        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
2121        size_t writtenSize = audioBuffer.size;
2122
2123        // Sanity check on returned size
2124        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
2125            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
2126                    reqSize, ssize_t(writtenSize));
2127            return NS_NEVER;
2128        }
2129
2130        if (writtenSize == 0) {
2131            // The callback is done filling buffers
2132            // Keep this thread going to handle timed events and
2133            // still try to get more data in intervals of WAIT_PERIOD_MS
2134            // but don't just loop and block the CPU, so wait
2135
2136            // mCbf(EVENT_MORE_DATA, ...) might either
2137            // (1) Block until it can fill the buffer, returning 0 size on EOS.
2138            // (2) Block until it can fill the buffer, returning 0 data (silence) on EOS.
2139            // (3) Return 0 size when no data is available, does not wait for more data.
2140            //
2141            // (1) and (2) occurs with AudioPlayer/AwesomePlayer; (3) occurs with NuPlayer.
2142            // We try to compute the wait time to avoid a tight sleep-wait cycle,
2143            // especially for case (3).
2144            //
2145            // The decision to support (1) and (2) affect the sizing of mRemainingFrames
2146            // and this loop; whereas for case (3) we could simply check once with the full
2147            // buffer size and skip the loop entirely.
2148
2149            nsecs_t myns;
2150            if (audio_has_proportional_frames(mFormat)) {
2151                // time to wait based on buffer occupancy
2152                const nsecs_t datans = mRemainingFrames <= avail ? 0 :
2153                        framesToNanoseconds(mRemainingFrames - avail, sampleRate, speed);
2154                // audio flinger thread buffer size (TODO: adjust for fast tracks)
2155                // FIXME: use mAfFrameCountHAL instead of mAfFrameCount below for fast tracks.
2156                const nsecs_t afns = framesToNanoseconds(mAfFrameCount, mAfSampleRate, speed);
2157                // add a half the AudioFlinger buffer time to avoid soaking CPU if datans is 0.
2158                myns = datans + (afns / 2);
2159            } else {
2160                // FIXME: This could ping quite a bit if the buffer isn't full.
2161                // Note that when mState is stopping we waitStreamEnd, so it never gets here.
2162                myns = kWaitPeriodNs;
2163            }
2164            if (ns > 0) { // account for obtain and callback time
2165                const nsecs_t timeNow = systemTime();
2166                ns = max((nsecs_t)0, ns - (timeNow - timeAfterCallbacks));
2167            }
2168            if (ns < 0 /* NS_WHENEVER */ || myns < ns) {
2169                ns = myns;
2170            }
2171            return ns;
2172        }
2173
2174        size_t releasedFrames = writtenSize / mFrameSize;
2175        audioBuffer.frameCount = releasedFrames;
2176        mRemainingFrames -= releasedFrames;
2177        if (misalignment >= releasedFrames) {
2178            misalignment -= releasedFrames;
2179        } else {
2180            misalignment = 0;
2181        }
2182
2183        releaseBuffer(&audioBuffer);
2184        writtenFrames += releasedFrames;
2185
2186        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
2187        // if callback doesn't like to accept the full chunk
2188        if (writtenSize < reqSize) {
2189            continue;
2190        }
2191
2192        // There could be enough non-contiguous frames available to satisfy the remaining request
2193        if (mRemainingFrames <= nonContig) {
2194            continue;
2195        }
2196
2197#if 0
2198        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
2199        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
2200        // that total to a sum == notificationFrames.
2201        if (0 < misalignment && misalignment <= mRemainingFrames) {
2202            mRemainingFrames = misalignment;
2203            return ((double)mRemainingFrames * 1100000000) / ((double)sampleRate * speed);
2204        }
2205#endif
2206
2207    }
2208    if (writtenFrames > 0) {
2209        AutoMutex lock(mLock);
2210        mFramesWritten += writtenFrames;
2211    }
2212    mRemainingFrames = notificationFrames;
2213    mRetryOnPartialBuffer = true;
2214
2215    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
2216    return 0;
2217}
2218
2219status_t AudioTrack::restoreTrack_l(const char *from)
2220{
2221    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
2222          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
2223    ++mSequence;
2224
2225    // refresh the audio configuration cache in this process to make sure we get new
2226    // output parameters and new IAudioFlinger in createTrack_l()
2227    AudioSystem::clearAudioConfigCache();
2228
2229    if (isOffloadedOrDirect_l() || mDoNotReconnect) {
2230        // FIXME re-creation of offloaded and direct tracks is not yet implemented;
2231        // reconsider enabling for linear PCM encodings when position can be preserved.
2232        return DEAD_OBJECT;
2233    }
2234
2235    // Save so we can return count since creation.
2236    mUnderrunCountOffset = getUnderrunCount_l();
2237
2238    // save the old static buffer position
2239    uint32_t staticPosition = 0;
2240    size_t bufferPosition = 0;
2241    int loopCount = 0;
2242    if (mStaticProxy != 0) {
2243        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
2244        staticPosition = mStaticProxy->getPosition().unsignedValue();
2245    }
2246
2247    mFlags = mOrigFlags;
2248
2249    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
2250    // following member variables: mAudioTrack, mCblkMemory and mCblk.
2251    // It will also delete the strong references on previous IAudioTrack and IMemory.
2252    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
2253    status_t result = createTrack_l();
2254
2255    if (result == NO_ERROR) {
2256        // take the frames that will be lost by track recreation into account in saved position
2257        // For streaming tracks, this is the amount we obtained from the user/client
2258        // (not the number actually consumed at the server - those are already lost).
2259        if (mStaticProxy == 0) {
2260            mPosition = mReleased;
2261        }
2262        // Continue playback from last known position and restore loop.
2263        if (mStaticProxy != 0) {
2264            if (loopCount != 0) {
2265                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
2266                        mLoopStart, mLoopEnd, loopCount);
2267            } else {
2268                mStaticProxy->setBufferPosition(bufferPosition);
2269                if (bufferPosition == mFrameCount) {
2270                    ALOGD("restoring track at end of static buffer");
2271                }
2272            }
2273        }
2274        // restore volume handler
2275        mVolumeHandler->forall([this](const VolumeShaper &shaper) -> VolumeShaper::Status {
2276            sp<VolumeShaper::Operation> operationToEnd =
2277                    new VolumeShaper::Operation(shaper.mOperation);
2278            // TODO: Ideally we would restore to the exact xOffset position
2279            // as returned by getVolumeShaperState(), but we don't have that
2280            // information when restoring at the client unless we periodically poll
2281            // the server or create shared memory state.
2282            //
2283            // For now, we simply advance to the end of the VolumeShaper effect
2284            // if it has been started.
2285            if (shaper.isStarted()) {
2286                operationToEnd->setNormalizedTime(1.f);
2287            }
2288            return mAudioTrack->applyVolumeShaper(shaper.mConfiguration, operationToEnd);
2289        });
2290
2291        if (mState == STATE_ACTIVE) {
2292            result = mAudioTrack->start();
2293        }
2294        // server resets to zero so we offset
2295        mFramesWrittenServerOffset =
2296                mStaticProxy.get() != nullptr ? staticPosition : mFramesWritten;
2297        mFramesWrittenAtRestore = mFramesWrittenServerOffset;
2298    }
2299    if (result != NO_ERROR) {
2300        ALOGW("restoreTrack_l() failed status %d", result);
2301        mState = STATE_STOPPED;
2302        mReleased = 0;
2303    }
2304
2305    return result;
2306}
2307
2308Modulo<uint32_t> AudioTrack::updateAndGetPosition_l()
2309{
2310    // This is the sole place to read server consumed frames
2311    Modulo<uint32_t> newServer(mProxy->getPosition());
2312    const int32_t delta = (newServer - mServer).signedValue();
2313    // TODO There is controversy about whether there can be "negative jitter" in server position.
2314    //      This should be investigated further, and if possible, it should be addressed.
2315    //      A more definite failure mode is infrequent polling by client.
2316    //      One could call (void)getPosition_l() in releaseBuffer(),
2317    //      so mReleased and mPosition are always lock-step as best possible.
2318    //      That should ensure delta never goes negative for infrequent polling
2319    //      unless the server has more than 2^31 frames in its buffer,
2320    //      in which case the use of uint32_t for these counters has bigger issues.
2321    ALOGE_IF(delta < 0,
2322            "detected illegal retrograde motion by the server: mServer advanced by %d",
2323            delta);
2324    mServer = newServer;
2325    if (delta > 0) { // avoid retrograde
2326        mPosition += delta;
2327    }
2328    return mPosition;
2329}
2330
2331bool AudioTrack::isSampleRateSpeedAllowed_l(uint32_t sampleRate, float speed)
2332{
2333    updateLatency_l();
2334    // applicable for mixing tracks only (not offloaded or direct)
2335    if (mStaticProxy != 0) {
2336        return true; // static tracks do not have issues with buffer sizing.
2337    }
2338    const size_t minFrameCount =
2339            AudioSystem::calculateMinFrameCount(mAfLatency, mAfFrameCount, mAfSampleRate,
2340                                            sampleRate, speed /*, 0 mNotificationsPerBufferReq*/);
2341    const bool allowed = mFrameCount >= minFrameCount;
2342    ALOGD_IF(!allowed,
2343            "isSampleRateSpeedAllowed_l denied "
2344            "mAfLatency:%u  mAfFrameCount:%zu  mAfSampleRate:%u  sampleRate:%u  speed:%f "
2345            "mFrameCount:%zu < minFrameCount:%zu",
2346            mAfLatency, mAfFrameCount, mAfSampleRate, sampleRate, speed,
2347            mFrameCount, minFrameCount);
2348    return allowed;
2349}
2350
2351status_t AudioTrack::setParameters(const String8& keyValuePairs)
2352{
2353    AutoMutex lock(mLock);
2354    return mAudioTrack->setParameters(keyValuePairs);
2355}
2356
2357VolumeShaper::Status AudioTrack::applyVolumeShaper(
2358        const sp<VolumeShaper::Configuration>& configuration,
2359        const sp<VolumeShaper::Operation>& operation)
2360{
2361    AutoMutex lock(mLock);
2362    mVolumeHandler->setIdIfNecessary(configuration);
2363    VolumeShaper::Status status = mAudioTrack->applyVolumeShaper(configuration, operation);
2364
2365    if (status == DEAD_OBJECT) {
2366        if (restoreTrack_l("applyVolumeShaper") == OK) {
2367            status = mAudioTrack->applyVolumeShaper(configuration, operation);
2368        }
2369    }
2370    if (status >= 0) {
2371        // save VolumeShaper for restore
2372        mVolumeHandler->applyVolumeShaper(configuration, operation);
2373        if (mState == STATE_ACTIVE || mState == STATE_STOPPING) {
2374            mVolumeHandler->setStarted();
2375        }
2376    } else {
2377        // warn only if not an expected restore failure.
2378        ALOGW_IF(!((isOffloadedOrDirect_l() || mDoNotReconnect) && status == DEAD_OBJECT),
2379                "applyVolumeShaper failed: %d", status);
2380    }
2381    return status;
2382}
2383
2384sp<VolumeShaper::State> AudioTrack::getVolumeShaperState(int id)
2385{
2386    AutoMutex lock(mLock);
2387    sp<VolumeShaper::State> state = mAudioTrack->getVolumeShaperState(id);
2388    if (state.get() == nullptr && (mCblk->mFlags & CBLK_INVALID) != 0) {
2389        if (restoreTrack_l("getVolumeShaperState") == OK) {
2390            state = mAudioTrack->getVolumeShaperState(id);
2391        }
2392    }
2393    return state;
2394}
2395
2396status_t AudioTrack::getTimestamp(ExtendedTimestamp *timestamp)
2397{
2398    if (timestamp == nullptr) {
2399        return BAD_VALUE;
2400    }
2401    AutoMutex lock(mLock);
2402    return getTimestamp_l(timestamp);
2403}
2404
2405status_t AudioTrack::getTimestamp_l(ExtendedTimestamp *timestamp)
2406{
2407    if (mCblk->mFlags & CBLK_INVALID) {
2408        const status_t status = restoreTrack_l("getTimestampExtended");
2409        if (status != OK) {
2410            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2411            // recommending that the track be recreated.
2412            return DEAD_OBJECT;
2413        }
2414    }
2415    // check for offloaded/direct here in case restoring somehow changed those flags.
2416    if (isOffloadedOrDirect_l()) {
2417        return INVALID_OPERATION; // not supported
2418    }
2419    status_t status = mProxy->getTimestamp(timestamp);
2420    LOG_ALWAYS_FATAL_IF(status != OK, "status %d not allowed from proxy getTimestamp", status);
2421    bool found = false;
2422    timestamp->mPosition[ExtendedTimestamp::LOCATION_CLIENT] = mFramesWritten;
2423    timestamp->mTimeNs[ExtendedTimestamp::LOCATION_CLIENT] = 0;
2424    // server side frame offset in case AudioTrack has been restored.
2425    for (int i = ExtendedTimestamp::LOCATION_SERVER;
2426            i < ExtendedTimestamp::LOCATION_MAX; ++i) {
2427        if (timestamp->mTimeNs[i] >= 0) {
2428            // apply server offset (frames flushed is ignored
2429            // so we don't report the jump when the flush occurs).
2430            timestamp->mPosition[i] += mFramesWrittenServerOffset;
2431            found = true;
2432        }
2433    }
2434    return found ? OK : WOULD_BLOCK;
2435}
2436
2437status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
2438{
2439    AutoMutex lock(mLock);
2440    return getTimestamp_l(timestamp);
2441}
2442
2443status_t AudioTrack::getTimestamp_l(AudioTimestamp& timestamp)
2444{
2445    bool previousTimestampValid = mPreviousTimestampValid;
2446    // Set false here to cover all the error return cases.
2447    mPreviousTimestampValid = false;
2448
2449    switch (mState) {
2450    case STATE_ACTIVE:
2451    case STATE_PAUSED:
2452        break; // handle below
2453    case STATE_FLUSHED:
2454    case STATE_STOPPED:
2455        return WOULD_BLOCK;
2456    case STATE_STOPPING:
2457    case STATE_PAUSED_STOPPING:
2458        if (!isOffloaded_l()) {
2459            return INVALID_OPERATION;
2460        }
2461        break; // offloaded tracks handled below
2462    default:
2463        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
2464        break;
2465    }
2466
2467    if (mCblk->mFlags & CBLK_INVALID) {
2468        const status_t status = restoreTrack_l("getTimestamp");
2469        if (status != OK) {
2470            // per getTimestamp() API doc in header, we return DEAD_OBJECT here,
2471            // recommending that the track be recreated.
2472            return DEAD_OBJECT;
2473        }
2474    }
2475
2476    // The presented frame count must always lag behind the consumed frame count.
2477    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
2478
2479    status_t status;
2480    if (isOffloadedOrDirect_l()) {
2481        // use Binder to get timestamp
2482        status = mAudioTrack->getTimestamp(timestamp);
2483    } else {
2484        // read timestamp from shared memory
2485        ExtendedTimestamp ets;
2486        status = mProxy->getTimestamp(&ets);
2487        if (status == OK) {
2488            ExtendedTimestamp::Location location;
2489            status = ets.getBestTimestamp(&timestamp, &location);
2490
2491            if (status == OK) {
2492                updateLatency_l();
2493                // It is possible that the best location has moved from the kernel to the server.
2494                // In this case we adjust the position from the previous computed latency.
2495                if (location == ExtendedTimestamp::LOCATION_SERVER) {
2496                    ALOGW_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_KERNEL,
2497                            "getTimestamp() location moved from kernel to server");
2498                    // check that the last kernel OK time info exists and the positions
2499                    // are valid (if they predate the current track, the positions may
2500                    // be zero or negative).
2501                    const int64_t frames =
2502                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2503                            ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0 ||
2504                            ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] <= 0 ||
2505                            ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] <= 0)
2506                            ?
2507                            int64_t((double)mAfLatency * mSampleRate * mPlaybackRate.mSpeed
2508                                    / 1000)
2509                            :
2510                            (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2511                            - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK]);
2512                    ALOGV("frame adjustment:%lld  timestamp:%s",
2513                            (long long)frames, ets.toString().c_str());
2514                    if (frames >= ets.mPosition[location]) {
2515                        timestamp.mPosition = 0;
2516                    } else {
2517                        timestamp.mPosition = (uint32_t)(ets.mPosition[location] - frames);
2518                    }
2519                } else if (location == ExtendedTimestamp::LOCATION_KERNEL) {
2520                    ALOGV_IF(mPreviousLocation == ExtendedTimestamp::LOCATION_SERVER,
2521                            "getTimestamp() location moved from server to kernel");
2522                }
2523
2524                // We update the timestamp time even when paused.
2525                if (mState == STATE_PAUSED /* not needed: STATE_PAUSED_STOPPING */) {
2526                    const int64_t now = systemTime();
2527                    const int64_t at = audio_utils_ns_from_timespec(&timestamp.mTime);
2528                    const int64_t lag =
2529                            (ets.mTimeNs[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK] < 0 ||
2530                                ets.mTimeNs[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK] < 0)
2531                            ? int64_t(mAfLatency * 1000000LL)
2532                            : (ets.mPosition[ExtendedTimestamp::LOCATION_SERVER_LASTKERNELOK]
2533                             - ets.mPosition[ExtendedTimestamp::LOCATION_KERNEL_LASTKERNELOK])
2534                             * NANOS_PER_SECOND / mSampleRate;
2535                    const int64_t limit = now - lag; // no earlier than this limit
2536                    if (at < limit) {
2537                        ALOGV("timestamp pause lag:%lld adjusting from %lld to %lld",
2538                                (long long)lag, (long long)at, (long long)limit);
2539                        timestamp.mTime = convertNsToTimespec(limit);
2540                    }
2541                }
2542                mPreviousLocation = location;
2543            } else {
2544                // right after AudioTrack is started, one may not find a timestamp
2545                ALOGV("getBestTimestamp did not find timestamp");
2546            }
2547        }
2548        if (status == INVALID_OPERATION) {
2549            // INVALID_OPERATION occurs when no timestamp has been issued by the server;
2550            // other failures are signaled by a negative time.
2551            // If we come out of FLUSHED or STOPPED where the position is known
2552            // to be zero we convert this to WOULD_BLOCK (with the implicit meaning of
2553            // "zero" for NuPlayer).  We don't convert for track restoration as position
2554            // does not reset.
2555            ALOGV("timestamp server offset:%lld restore frames:%lld",
2556                    (long long)mFramesWrittenServerOffset, (long long)mFramesWrittenAtRestore);
2557            if (mFramesWrittenServerOffset != mFramesWrittenAtRestore) {
2558                status = WOULD_BLOCK;
2559            }
2560        }
2561    }
2562    if (status != NO_ERROR) {
2563        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
2564        return status;
2565    }
2566    if (isOffloadedOrDirect_l()) {
2567        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
2568            // use cached paused position in case another offloaded track is running.
2569            timestamp.mPosition = mPausedPosition;
2570            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
2571            // TODO: adjust for delay
2572            return NO_ERROR;
2573        }
2574
2575        // Check whether a pending flush or stop has completed, as those commands may
2576        // be asynchronous or return near finish or exhibit glitchy behavior.
2577        //
2578        // Originally this showed up as the first timestamp being a continuation of
2579        // the previous song under gapless playback.
2580        // However, we sometimes see zero timestamps, then a glitch of
2581        // the previous song's position, and then correct timestamps afterwards.
2582        if (mStartFromZeroUs != 0 && mSampleRate != 0) {
2583            static const int kTimeJitterUs = 100000; // 100 ms
2584            static const int k1SecUs = 1000000;
2585
2586            const int64_t timeNow = getNowUs();
2587
2588            if (timeNow < mStartFromZeroUs + k1SecUs) { // within first second of starting
2589                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
2590                if (timestampTimeUs < mStartFromZeroUs) {
2591                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
2592                }
2593                const int64_t deltaTimeUs = timestampTimeUs - mStartFromZeroUs;
2594                const int64_t deltaPositionByUs = (double)timestamp.mPosition * 1000000
2595                        / ((double)mSampleRate * mPlaybackRate.mSpeed);
2596
2597                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
2598                    // Verify that the counter can't count faster than the sample rate
2599                    // since the start time.  If greater, then that means we may have failed
2600                    // to completely flush or stop the previous playing track.
2601                    ALOGW_IF(!mTimestampStartupGlitchReported,
2602                            "getTimestamp startup glitch detected"
2603                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
2604                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
2605                            timestamp.mPosition);
2606                    mTimestampStartupGlitchReported = true;
2607                    if (previousTimestampValid
2608                            && mPreviousTimestamp.mPosition == 0 /* should be true if valid */) {
2609                        timestamp = mPreviousTimestamp;
2610                        mPreviousTimestampValid = true;
2611                        return NO_ERROR;
2612                    }
2613                    return WOULD_BLOCK;
2614                }
2615                if (deltaPositionByUs != 0) {
2616                    mStartFromZeroUs = 0; // don't check again, we got valid nonzero position.
2617                }
2618            } else {
2619                mStartFromZeroUs = 0; // don't check again, start time expired.
2620            }
2621            mTimestampStartupGlitchReported = false;
2622        }
2623    } else {
2624        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
2625        (void) updateAndGetPosition_l();
2626        // Server consumed (mServer) and presented both use the same server time base,
2627        // and server consumed is always >= presented.
2628        // The delta between these represents the number of frames in the buffer pipeline.
2629        // If this delta between these is greater than the client position, it means that
2630        // actually presented is still stuck at the starting line (figuratively speaking),
2631        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
2632        // Note: We explicitly use non-Modulo comparison here - potential wrap issue when
2633        // mPosition exceeds 32 bits.
2634        // TODO Remove when timestamp is updated to contain pipeline status info.
2635        const int32_t pipelineDepthInFrames = (mServer - timestamp.mPosition).signedValue();
2636        if (pipelineDepthInFrames > 0 /* should be true, but we check anyways */
2637                && (uint32_t)pipelineDepthInFrames > mPosition.value()) {
2638            return INVALID_OPERATION;
2639        }
2640        // Convert timestamp position from server time base to client time base.
2641        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2642        // But if we change it to 64-bit then this could fail.
2643        // Use Modulo computation here.
2644        timestamp.mPosition = (mPosition - mServer + timestamp.mPosition).value();
2645        // Immediately after a call to getPosition_l(), mPosition and
2646        // mServer both represent the same frame position.  mPosition is
2647        // in client's point of view, and mServer is in server's point of
2648        // view.  So the difference between them is the "fudge factor"
2649        // between client and server views due to stop() and/or new
2650        // IAudioTrack.  And timestamp.mPosition is initially in server's
2651        // point of view, so we need to apply the same fudge factor to it.
2652    }
2653
2654    // Prevent retrograde motion in timestamp.
2655    // This is sometimes caused by erratic reports of the available space in the ALSA drivers.
2656    if (status == NO_ERROR) {
2657        // previousTimestampValid is set to false when starting after a stop or flush.
2658        if (previousTimestampValid) {
2659            const int64_t previousTimeNanos =
2660                    audio_utils_ns_from_timespec(&mPreviousTimestamp.mTime);
2661            int64_t currentTimeNanos = audio_utils_ns_from_timespec(&timestamp.mTime);
2662
2663            // Fix stale time when checking timestamp right after start().
2664            //
2665            // For offload compatibility, use a default lag value here.
2666            // Any time discrepancy between this update and the pause timestamp is handled
2667            // by the retrograde check afterwards.
2668            const int64_t lagNs = int64_t(mAfLatency * 1000000LL);
2669            const int64_t limitNs = mStartNs - lagNs;
2670            if (currentTimeNanos < limitNs) {
2671                ALOGD("correcting timestamp time for pause, "
2672                        "currentTimeNanos: %lld < limitNs: %lld < mStartNs: %lld",
2673                        (long long)currentTimeNanos, (long long)limitNs, (long long)mStartNs);
2674                timestamp.mTime = convertNsToTimespec(limitNs);
2675                currentTimeNanos = limitNs;
2676            }
2677
2678            // retrograde check
2679            if (currentTimeNanos < previousTimeNanos) {
2680                ALOGW("retrograde timestamp time corrected, %lld < %lld",
2681                        (long long)currentTimeNanos, (long long)previousTimeNanos);
2682                timestamp.mTime = mPreviousTimestamp.mTime;
2683                // currentTimeNanos not used below.
2684            }
2685
2686            // Looking at signed delta will work even when the timestamps
2687            // are wrapping around.
2688            int32_t deltaPosition = (Modulo<uint32_t>(timestamp.mPosition)
2689                    - mPreviousTimestamp.mPosition).signedValue();
2690            if (deltaPosition < 0) {
2691                // Only report once per position instead of spamming the log.
2692                if (!mRetrogradeMotionReported) {
2693                    ALOGW("retrograde timestamp position corrected, %d = %u - %u",
2694                            deltaPosition,
2695                            timestamp.mPosition,
2696                            mPreviousTimestamp.mPosition);
2697                    mRetrogradeMotionReported = true;
2698                }
2699            } else {
2700                mRetrogradeMotionReported = false;
2701            }
2702            if (deltaPosition < 0) {
2703                timestamp.mPosition = mPreviousTimestamp.mPosition;
2704                deltaPosition = 0;
2705            }
2706#if 0
2707            // Uncomment this to verify audio timestamp rate.
2708            const int64_t deltaTime =
2709                    audio_utils_ns_from_timespec(&timestamp.mTime) - previousTimeNanos;
2710            if (deltaTime != 0) {
2711                const int64_t computedSampleRate =
2712                        deltaPosition * (long long)NANOS_PER_SECOND / deltaTime;
2713                ALOGD("computedSampleRate:%u  sampleRate:%u",
2714                        (unsigned)computedSampleRate, mSampleRate);
2715            }
2716#endif
2717        }
2718        mPreviousTimestamp = timestamp;
2719        mPreviousTimestampValid = true;
2720    }
2721
2722    return status;
2723}
2724
2725String8 AudioTrack::getParameters(const String8& keys)
2726{
2727    audio_io_handle_t output = getOutput();
2728    if (output != AUDIO_IO_HANDLE_NONE) {
2729        return AudioSystem::getParameters(output, keys);
2730    } else {
2731        return String8::empty();
2732    }
2733}
2734
2735bool AudioTrack::isOffloaded() const
2736{
2737    AutoMutex lock(mLock);
2738    return isOffloaded_l();
2739}
2740
2741bool AudioTrack::isDirect() const
2742{
2743    AutoMutex lock(mLock);
2744    return isDirect_l();
2745}
2746
2747bool AudioTrack::isOffloadedOrDirect() const
2748{
2749    AutoMutex lock(mLock);
2750    return isOffloadedOrDirect_l();
2751}
2752
2753
2754status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2755{
2756    String8 result;
2757
2758    result.append(" AudioTrack::dump\n");
2759    result.appendFormat("  status(%d), state(%d), session Id(%d), flags(%#x)\n",
2760                        mStatus, mState, mSessionId, mFlags);
2761    result.appendFormat("  stream type(%d), left - right volume(%f, %f)\n",
2762                        (mStreamType == AUDIO_STREAM_DEFAULT) ?
2763                                audio_attributes_to_stream_type(&mAttributes) : mStreamType,
2764                        mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2765    result.appendFormat("  format(%#x), channel mask(%#x), channel count(%u)\n",
2766                  mFormat, mChannelMask, mChannelCount);
2767    result.appendFormat("  sample rate(%u), original sample rate(%u), speed(%f)\n",
2768                  mSampleRate, mOriginalSampleRate, mPlaybackRate.mSpeed);
2769    result.appendFormat("  frame count(%zu), req. frame count(%zu)\n",
2770                  mFrameCount, mReqFrameCount);
2771    result.appendFormat("  notif. frame count(%u), req. notif. frame count(%u),"
2772            " req. notif. per buff(%u)\n",
2773             mNotificationFramesAct, mNotificationFramesReq, mNotificationsPerBufferReq);
2774    result.appendFormat("  latency (%d), selected device Id(%d), routed device Id(%d)\n",
2775                        mLatency, mSelectedDeviceId, mRoutedDeviceId);
2776    result.appendFormat("  output(%d) AF latency (%u) AF frame count(%zu) AF SampleRate(%u)\n",
2777                        mOutput, mAfLatency, mAfFrameCount, mAfSampleRate);
2778    ::write(fd, result.string(), result.size());
2779    return NO_ERROR;
2780}
2781
2782uint32_t AudioTrack::getUnderrunCount() const
2783{
2784    AutoMutex lock(mLock);
2785    return getUnderrunCount_l();
2786}
2787
2788uint32_t AudioTrack::getUnderrunCount_l() const
2789{
2790    return mProxy->getUnderrunCount() + mUnderrunCountOffset;
2791}
2792
2793uint32_t AudioTrack::getUnderrunFrames() const
2794{
2795    AutoMutex lock(mLock);
2796    return mProxy->getUnderrunFrames();
2797}
2798
2799status_t AudioTrack::addAudioDeviceCallback(const sp<AudioSystem::AudioDeviceCallback>& callback)
2800{
2801    if (callback == 0) {
2802        ALOGW("%s adding NULL callback!", __FUNCTION__);
2803        return BAD_VALUE;
2804    }
2805    AutoMutex lock(mLock);
2806    if (mDeviceCallback.unsafe_get() == callback.get()) {
2807        ALOGW("%s adding same callback!", __FUNCTION__);
2808        return INVALID_OPERATION;
2809    }
2810    status_t status = NO_ERROR;
2811    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2812        if (mDeviceCallback != 0) {
2813            ALOGW("%s callback already present!", __FUNCTION__);
2814            AudioSystem::removeAudioDeviceCallback(this, mOutput);
2815        }
2816        status = AudioSystem::addAudioDeviceCallback(this, mOutput);
2817    }
2818    mDeviceCallback = callback;
2819    return status;
2820}
2821
2822status_t AudioTrack::removeAudioDeviceCallback(
2823        const sp<AudioSystem::AudioDeviceCallback>& callback)
2824{
2825    if (callback == 0) {
2826        ALOGW("%s removing NULL callback!", __FUNCTION__);
2827        return BAD_VALUE;
2828    }
2829    AutoMutex lock(mLock);
2830    if (mDeviceCallback.unsafe_get() != callback.get()) {
2831        ALOGW("%s removing different callback!", __FUNCTION__);
2832        return INVALID_OPERATION;
2833    }
2834    mDeviceCallback.clear();
2835    if (mOutput != AUDIO_IO_HANDLE_NONE) {
2836        AudioSystem::removeAudioDeviceCallback(this, mOutput);
2837    }
2838    return NO_ERROR;
2839}
2840
2841
2842void AudioTrack::onAudioDeviceUpdate(audio_io_handle_t audioIo,
2843                                 audio_port_handle_t deviceId)
2844{
2845    sp<AudioSystem::AudioDeviceCallback> callback;
2846    {
2847        AutoMutex lock(mLock);
2848        if (audioIo != mOutput) {
2849            return;
2850        }
2851        callback = mDeviceCallback.promote();
2852        // only update device if the track is active as route changes due to other use cases are
2853        // irrelevant for this client
2854        if (mState == STATE_ACTIVE) {
2855            mRoutedDeviceId = deviceId;
2856        }
2857    }
2858    if (callback.get() != nullptr) {
2859        callback->onAudioDeviceUpdate(mOutput, mRoutedDeviceId);
2860    }
2861}
2862
2863status_t AudioTrack::pendingDuration(int32_t *msec, ExtendedTimestamp::Location location)
2864{
2865    if (msec == nullptr ||
2866            (location != ExtendedTimestamp::LOCATION_SERVER
2867                    && location != ExtendedTimestamp::LOCATION_KERNEL)) {
2868        return BAD_VALUE;
2869    }
2870    AutoMutex lock(mLock);
2871    // inclusive of offloaded and direct tracks.
2872    //
2873    // It is possible, but not enabled, to allow duration computation for non-pcm
2874    // audio_has_proportional_frames() formats because currently they have
2875    // the drain rate equivalent to the pcm sample rate * framesize.
2876    if (!isPurePcmData_l()) {
2877        return INVALID_OPERATION;
2878    }
2879    ExtendedTimestamp ets;
2880    if (getTimestamp_l(&ets) == OK
2881            && ets.mTimeNs[location] > 0) {
2882        int64_t diff = ets.mPosition[ExtendedTimestamp::LOCATION_CLIENT]
2883                - ets.mPosition[location];
2884        if (diff < 0) {
2885            *msec = 0;
2886        } else {
2887            // ms is the playback time by frames
2888            int64_t ms = (int64_t)((double)diff * 1000 /
2889                    ((double)mSampleRate * mPlaybackRate.mSpeed));
2890            // clockdiff is the timestamp age (negative)
2891            int64_t clockdiff = (mState != STATE_ACTIVE) ? 0 :
2892                    ets.mTimeNs[location]
2893                    + ets.mTimebaseOffset[ExtendedTimestamp::TIMEBASE_MONOTONIC]
2894                    - systemTime(SYSTEM_TIME_MONOTONIC);
2895
2896            //ALOGV("ms: %lld  clockdiff: %lld", (long long)ms, (long long)clockdiff);
2897            static const int NANOS_PER_MILLIS = 1000000;
2898            *msec = (int32_t)(ms + clockdiff / NANOS_PER_MILLIS);
2899        }
2900        return NO_ERROR;
2901    }
2902    if (location != ExtendedTimestamp::LOCATION_SERVER) {
2903        return INVALID_OPERATION; // LOCATION_KERNEL is not available
2904    }
2905    // use server position directly (offloaded and direct arrive here)
2906    updateAndGetPosition_l();
2907    int32_t diff = (Modulo<uint32_t>(mFramesWritten) - mPosition).signedValue();
2908    *msec = (diff <= 0) ? 0
2909            : (int32_t)((double)diff * 1000 / ((double)mSampleRate * mPlaybackRate.mSpeed));
2910    return NO_ERROR;
2911}
2912
2913bool AudioTrack::hasStarted()
2914{
2915    AutoMutex lock(mLock);
2916    switch (mState) {
2917    case STATE_STOPPED:
2918        if (isOffloadedOrDirect_l()) {
2919            // check if we have started in the past to return true.
2920            return mStartFromZeroUs > 0;
2921        }
2922        // A normal audio track may still be draining, so
2923        // check if stream has ended.  This covers fasttrack position
2924        // instability and start/stop without any data written.
2925        if (mProxy->getStreamEndDone()) {
2926            return true;
2927        }
2928        // fall through
2929    case STATE_ACTIVE:
2930    case STATE_STOPPING:
2931        break;
2932    case STATE_PAUSED:
2933    case STATE_PAUSED_STOPPING:
2934    case STATE_FLUSHED:
2935        return false;  // we're not active
2936    default:
2937        LOG_ALWAYS_FATAL("Invalid mState in hasStarted(): %d", mState);
2938        break;
2939    }
2940
2941    // wait indicates whether we need to wait for a timestamp.
2942    // This is conservatively figured - if we encounter an unexpected error
2943    // then we will not wait.
2944    bool wait = false;
2945    if (isOffloadedOrDirect_l()) {
2946        AudioTimestamp ts;
2947        status_t status = getTimestamp_l(ts);
2948        if (status == WOULD_BLOCK) {
2949            wait = true;
2950        } else if (status == OK) {
2951            wait = (ts.mPosition == 0 || ts.mPosition == mStartTs.mPosition);
2952        }
2953        ALOGV("hasStarted wait:%d  ts:%u  start position:%lld",
2954                (int)wait,
2955                ts.mPosition,
2956                (long long)mStartTs.mPosition);
2957    } else {
2958        int location = ExtendedTimestamp::LOCATION_SERVER; // for ALOG
2959        ExtendedTimestamp ets;
2960        status_t status = getTimestamp_l(&ets);
2961        if (status == WOULD_BLOCK) {  // no SERVER or KERNEL frame info in ets
2962            wait = true;
2963        } else if (status == OK) {
2964            for (location = ExtendedTimestamp::LOCATION_KERNEL;
2965                    location >= ExtendedTimestamp::LOCATION_SERVER; --location) {
2966                if (ets.mTimeNs[location] < 0 || mStartEts.mTimeNs[location] < 0) {
2967                    continue;
2968                }
2969                wait = ets.mPosition[location] == 0
2970                        || ets.mPosition[location] == mStartEts.mPosition[location];
2971                break;
2972            }
2973        }
2974        ALOGV("hasStarted wait:%d  ets:%lld  start position:%lld",
2975                (int)wait,
2976                (long long)ets.mPosition[location],
2977                (long long)mStartEts.mPosition[location]);
2978    }
2979    return !wait;
2980}
2981
2982// =========================================================================
2983
2984void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2985{
2986    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2987    if (audioTrack != 0) {
2988        AutoMutex lock(audioTrack->mLock);
2989        audioTrack->mProxy->binderDied();
2990    }
2991}
2992
2993// =========================================================================
2994
2995AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2996    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2997      mIgnoreNextPausedInt(false)
2998{
2999}
3000
3001AudioTrack::AudioTrackThread::~AudioTrackThread()
3002{
3003}
3004
3005bool AudioTrack::AudioTrackThread::threadLoop()
3006{
3007    {
3008        AutoMutex _l(mMyLock);
3009        if (mPaused) {
3010            // TODO check return value and handle or log
3011            mMyCond.wait(mMyLock);
3012            // caller will check for exitPending()
3013            return true;
3014        }
3015        if (mIgnoreNextPausedInt) {
3016            mIgnoreNextPausedInt = false;
3017            mPausedInt = false;
3018        }
3019        if (mPausedInt) {
3020            // TODO use futex instead of condition, for event flag "or"
3021            if (mPausedNs > 0) {
3022                // TODO check return value and handle or log
3023                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
3024            } else {
3025                // TODO check return value and handle or log
3026                mMyCond.wait(mMyLock);
3027            }
3028            mPausedInt = false;
3029            return true;
3030        }
3031    }
3032    if (exitPending()) {
3033        return false;
3034    }
3035    nsecs_t ns = mReceiver.processAudioBuffer();
3036    switch (ns) {
3037    case 0:
3038        return true;
3039    case NS_INACTIVE:
3040        pauseInternal();
3041        return true;
3042    case NS_NEVER:
3043        return false;
3044    case NS_WHENEVER:
3045        // Event driven: call wake() when callback notifications conditions change.
3046        ns = INT64_MAX;
3047        // fall through
3048    default:
3049        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
3050        pauseInternal(ns);
3051        return true;
3052    }
3053}
3054
3055void AudioTrack::AudioTrackThread::requestExit()
3056{
3057    // must be in this order to avoid a race condition
3058    Thread::requestExit();
3059    resume();
3060}
3061
3062void AudioTrack::AudioTrackThread::pause()
3063{
3064    AutoMutex _l(mMyLock);
3065    mPaused = true;
3066}
3067
3068void AudioTrack::AudioTrackThread::resume()
3069{
3070    AutoMutex _l(mMyLock);
3071    mIgnoreNextPausedInt = true;
3072    if (mPaused || mPausedInt) {
3073        mPaused = false;
3074        mPausedInt = false;
3075        mMyCond.signal();
3076    }
3077}
3078
3079void AudioTrack::AudioTrackThread::wake()
3080{
3081    AutoMutex _l(mMyLock);
3082    if (!mPaused) {
3083        // wake() might be called while servicing a callback - ignore the next
3084        // pause time and call processAudioBuffer.
3085        mIgnoreNextPausedInt = true;
3086        if (mPausedInt && mPausedNs > 0) {
3087            // audio track is active and internally paused with timeout.
3088            mPausedInt = false;
3089            mMyCond.signal();
3090        }
3091    }
3092}
3093
3094void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
3095{
3096    AutoMutex _l(mMyLock);
3097    mPausedInt = true;
3098    mPausedNs = ns;
3099}
3100
3101} // namespace android
3102