AudioTrack.cpp revision 40bc906252974d0b389ae4a147232d0c9a97193f
12950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower/*
22950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower**
32950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** Copyright 2007, The Android Open Source Project
42950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower**
52950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** Licensed under the Apache License, Version 2.0 (the "License");
62950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** you may not use this file except in compliance with the License.
72950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** You may obtain a copy of the License at
82950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower**
92950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower**     http://www.apache.org/licenses/LICENSE-2.0
102950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower**
112950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** Unless required by applicable law or agreed to in writing, software
122950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** distributed under the License is distributed on an "AS IS" BASIS,
132950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
142950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** See the License for the specific language governing permissions and
152950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower** limitations under the License.
162950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower*/
172950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
182950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower//#define LOG_NDEBUG 0
192950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#define LOG_TAG "AudioTrack"
202950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
212950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <inttypes.h>
222950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <math.h>
232950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <sys/resource.h>
242950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
252950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <audio_utils/primitives.h>
262950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <binder/IPCThreadState.h>
272950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <media/AudioTrack.h>
282950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <utils/Log.h>
292950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <private/media/AudioTrackShared.h>
302950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <media/IAudioFlinger.h>
312950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <media/AudioPolicyHelper.h>
322950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower#include <media/AudioResamplerPublic.h>
333cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo
343cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo#define WAIT_PERIOD_MS                  10
353cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo#define WAIT_STREAM_END_TIMEOUT_SEC     120
363cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdostatic const int kMaxLoopCountNotifications = 32;
373cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo
383cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdonamespace android {
393cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo// ---------------------------------------------------------------------------
402950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
412950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlowertemplate <typename T>
422950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlowerconst T &min(const T &x, const T &y) {
432950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    return x < y ? x : y;
442950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower}
452950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
462950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlowerstatic int64_t convertTimespecToUs(const struct timespec &tv)
472950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower{
482950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    return tv.tv_sec * 1000000ll + tv.tv_nsec / 1000;
492950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower}
502950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
512950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower// current monotonic time in microseconds.
522950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlowerstatic int64_t getNowUs()
532950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower{
542950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    struct timespec tv;
552950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    (void) clock_gettime(CLOCK_MONOTONIC, &tv);
56369b651ffcdb271d654963de4d95bfe0483efc33Eugene Brevdo    return convertTimespecToUs(tv);
57369b651ffcdb271d654963de4d95bfe0483efc33Eugene Brevdo}
582950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
592950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower// static
602950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlowerstatus_t AudioTrack::getMinFrameCount(
612950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        size_t* frameCount,
622950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        audio_stream_type_t streamType,
632950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        uint32_t sampleRate)
64369b651ffcdb271d654963de4d95bfe0483efc33Eugene Brevdo{
655b6f2f22502f0faa4755114c4e61397236af2976Joshua V. Dillon    if (frameCount == NULL) {
665b6f2f22502f0faa4755114c4e61397236af2976Joshua V. Dillon        return BAD_VALUE;
67369b651ffcdb271d654963de4d95bfe0483efc33Eugene Brevdo    }
68369b651ffcdb271d654963de4d95bfe0483efc33Eugene Brevdo
692950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    // FIXME handle in server, like createTrack_l(), possible missing info:
702950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    //          audio_io_handle_t output
712950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    //          audio_format_t format
722950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    //          audio_channel_mask_t channelMask
732950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    //          audio_output_flags_t flags (FAST)
74876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    uint32_t afSampleRate;
75876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    status_t status;
76d9df4bb76a5824403d9095fdfefdbbd571375415A. Unique TensorFlower    status = AudioSystem::getOutputSamplingRate(&afSampleRate, streamType);
77426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower    if (status != NO_ERROR) {
78426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        ALOGE("Unable to query output sample rate for stream type %d; status %d",
792950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower                streamType, status);
80876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower        return status;
81876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    }
82876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    size_t afFrameCount;
83a494308877dba4e888f70460e49910ae653630b6Eugene Brevdo    status = AudioSystem::getOutputFrameCount(&afFrameCount, streamType);
84876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    if (status != NO_ERROR) {
85876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower        ALOGE("Unable to query output frame count for stream type %d; status %d",
86876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower                streamType, status);
87876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower        return status;
88876c4aa5633ccfd80be87c72235ec1e4252d531bA. Unique TensorFlower    }
892950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    uint32_t afLatency;
902950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    status = AudioSystem::getOutputLatency(&afLatency, streamType);
912950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    if (status != NO_ERROR) {
922950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        ALOGE("Unable to query output latency for stream type %d; status %d",
932950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower                streamType, status);
942950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        return status;
952ddb9a0960af173cbc596be8436ec79bc7efb000Joshua V. Dillon    }
96426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower
972950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    // Ensure that buffer depth covers at least audio hardware latency
982ddb9a0960af173cbc596be8436ec79bc7efb000Joshua V. Dillon    uint32_t minBufCount = afLatency / ((1000 * afFrameCount) / afSampleRate);
992950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    if (minBufCount < 2) {
1002950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        minBufCount = 2;
1012ddb9a0960af173cbc596be8436ec79bc7efb000Joshua V. Dillon    }
102426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower
1032950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    *frameCount = minBufCount * sourceFramesNeeded(sampleRate, afFrameCount, afSampleRate);
1042ddb9a0960af173cbc596be8436ec79bc7efb000Joshua V. Dillon    // The formula above should always produce a non-zero value under normal circumstances:
1052950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    // AudioTrack.SAMPLE_RATE_HZ_MIN <= sampleRate <= AudioTrack.SAMPLE_RATE_HZ_MAX.
1062950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    // Return error in the unlikely event that it does not, as that's part of the API contract.
1073cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo    if (*frameCount == 0) {
108426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        ALOGE("AudioTrack::getMinFrameCount failed for streamType %d, sampleRate %u",
109426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower                streamType, sampleRate);
110426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        return BAD_VALUE;
1112950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    }
1123cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo    ALOGV("getMinFrameCount=%zu: afFrameCount=%zu, minBufCount=%u, afSampleRate=%u, afLatency=%u",
113426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower            *frameCount, afFrameCount, minBufCount, afSampleRate, afLatency);
114426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower    return NO_ERROR;
1152950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower}
116426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower
117426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower// ---------------------------------------------------------------------------
1182950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower
119426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlowerAudioTrack::AudioTrack()
120426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower    : mStatus(NO_INIT),
121426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower      mIsTimed(false),
1222950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
123426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower      mPreviousSchedulingGroup(SP_DEFAULT),
124426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower      mPausedPosition(0)
1252950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower{
126426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower    mAttributes.content_type = AUDIO_CONTENT_TYPE_UNKNOWN;
127426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower    mAttributes.usage = AUDIO_USAGE_UNKNOWN;
1282950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower    mAttributes.flags = 0x0;
1293cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo    strcpy(mAttributes.tags, "");
1303cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo}
1313cb343915a9726b943d2fbb45e26148e9b83fa0eEugene Brevdo
1323cb343915a9726b943d2fbb45e26148e9b83fa0eEugene BrevdoAudioTrack::AudioTrack(
133426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        audio_stream_type_t streamType,
134426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        uint32_t sampleRate,
1352950a61bce4c80e653c63af4a2075519fdd40b54A. Unique TensorFlower        audio_format_t format,
136426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        audio_channel_mask_t channelMask,
137426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        size_t frameCount,
13822c13e3abd91b659b2953e382e9d0992dda9d810A. Unique TensorFlower        audio_output_flags_t flags,
139426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        callback_t cbf,
140426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        void* user,
141426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        uint32_t notificationFrames,
142426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        int sessionId,
143426e36d6f351dd556ccb1c8defa1ddd88015942dA. Unique TensorFlower        transfer_type transferType,
144        const audio_offload_info_t *offloadInfo,
145        int uid,
146        pid_t pid,
147        const audio_attributes_t* pAttributes)
148    : mStatus(NO_INIT),
149      mIsTimed(false),
150      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
151      mPreviousSchedulingGroup(SP_DEFAULT),
152      mPausedPosition(0)
153{
154    mStatus = set(streamType, sampleRate, format, channelMask,
155            frameCount, flags, cbf, user, notificationFrames,
156            0 /*sharedBuffer*/, false /*threadCanCallJava*/, sessionId, transferType,
157            offloadInfo, uid, pid, pAttributes);
158}
159
160AudioTrack::AudioTrack(
161        audio_stream_type_t streamType,
162        uint32_t sampleRate,
163        audio_format_t format,
164        audio_channel_mask_t channelMask,
165        const sp<IMemory>& sharedBuffer,
166        audio_output_flags_t flags,
167        callback_t cbf,
168        void* user,
169        uint32_t notificationFrames,
170        int sessionId,
171        transfer_type transferType,
172        const audio_offload_info_t *offloadInfo,
173        int uid,
174        pid_t pid,
175        const audio_attributes_t* pAttributes)
176    : mStatus(NO_INIT),
177      mIsTimed(false),
178      mPreviousPriority(ANDROID_PRIORITY_NORMAL),
179      mPreviousSchedulingGroup(SP_DEFAULT),
180      mPausedPosition(0)
181{
182    mStatus = set(streamType, sampleRate, format, channelMask,
183            0 /*frameCount*/, flags, cbf, user, notificationFrames,
184            sharedBuffer, false /*threadCanCallJava*/, sessionId, transferType, offloadInfo,
185            uid, pid, pAttributes);
186}
187
188AudioTrack::~AudioTrack()
189{
190    if (mStatus == NO_ERROR) {
191        // Make sure that callback function exits in the case where
192        // it is looping on buffer full condition in obtainBuffer().
193        // Otherwise the callback thread will never exit.
194        stop();
195        if (mAudioTrackThread != 0) {
196            mProxy->interrupt();
197            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
198            mAudioTrackThread->requestExitAndWait();
199            mAudioTrackThread.clear();
200        }
201        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
202        mAudioTrack.clear();
203        mCblkMemory.clear();
204        mSharedBuffer.clear();
205        IPCThreadState::self()->flushCommands();
206        ALOGV("~AudioTrack, releasing session id from %d on behalf of %d",
207                IPCThreadState::self()->getCallingPid(), mClientPid);
208        AudioSystem::releaseAudioSessionId(mSessionId, mClientPid);
209    }
210}
211
212status_t AudioTrack::set(
213        audio_stream_type_t streamType,
214        uint32_t sampleRate,
215        audio_format_t format,
216        audio_channel_mask_t channelMask,
217        size_t frameCount,
218        audio_output_flags_t flags,
219        callback_t cbf,
220        void* user,
221        uint32_t notificationFrames,
222        const sp<IMemory>& sharedBuffer,
223        bool threadCanCallJava,
224        int sessionId,
225        transfer_type transferType,
226        const audio_offload_info_t *offloadInfo,
227        int uid,
228        pid_t pid,
229        const audio_attributes_t* pAttributes)
230{
231    ALOGV("set(): streamType %d, sampleRate %u, format %#x, channelMask %#x, frameCount %zu, "
232          "flags #%x, notificationFrames %u, sessionId %d, transferType %d",
233          streamType, sampleRate, format, channelMask, frameCount, flags, notificationFrames,
234          sessionId, transferType);
235
236    switch (transferType) {
237    case TRANSFER_DEFAULT:
238        if (sharedBuffer != 0) {
239            transferType = TRANSFER_SHARED;
240        } else if (cbf == NULL || threadCanCallJava) {
241            transferType = TRANSFER_SYNC;
242        } else {
243            transferType = TRANSFER_CALLBACK;
244        }
245        break;
246    case TRANSFER_CALLBACK:
247        if (cbf == NULL || sharedBuffer != 0) {
248            ALOGE("Transfer type TRANSFER_CALLBACK but cbf == NULL || sharedBuffer != 0");
249            return BAD_VALUE;
250        }
251        break;
252    case TRANSFER_OBTAIN:
253    case TRANSFER_SYNC:
254        if (sharedBuffer != 0) {
255            ALOGE("Transfer type TRANSFER_OBTAIN but sharedBuffer != 0");
256            return BAD_VALUE;
257        }
258        break;
259    case TRANSFER_SHARED:
260        if (sharedBuffer == 0) {
261            ALOGE("Transfer type TRANSFER_SHARED but sharedBuffer == 0");
262            return BAD_VALUE;
263        }
264        break;
265    default:
266        ALOGE("Invalid transfer type %d", transferType);
267        return BAD_VALUE;
268    }
269    mSharedBuffer = sharedBuffer;
270    mTransfer = transferType;
271
272    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
273            sharedBuffer->size());
274
275    ALOGV("set() streamType %d frameCount %zu flags %04x", streamType, frameCount, flags);
276
277    AutoMutex lock(mLock);
278
279    // invariant that mAudioTrack != 0 is true only after set() returns successfully
280    if (mAudioTrack != 0) {
281        ALOGE("Track already in use");
282        return INVALID_OPERATION;
283    }
284
285    // handle default values first.
286    if (streamType == AUDIO_STREAM_DEFAULT) {
287        streamType = AUDIO_STREAM_MUSIC;
288    }
289    if (pAttributes == NULL) {
290        if (uint32_t(streamType) >= AUDIO_STREAM_PUBLIC_CNT) {
291            ALOGE("Invalid stream type %d", streamType);
292            return BAD_VALUE;
293        }
294        mStreamType = streamType;
295
296    } else {
297        // stream type shouldn't be looked at, this track has audio attributes
298        memcpy(&mAttributes, pAttributes, sizeof(audio_attributes_t));
299        ALOGV("Building AudioTrack with attributes: usage=%d content=%d flags=0x%x tags=[%s]",
300                mAttributes.usage, mAttributes.content_type, mAttributes.flags, mAttributes.tags);
301        mStreamType = AUDIO_STREAM_DEFAULT;
302        if ((mAttributes.flags & AUDIO_FLAG_HW_AV_SYNC) != 0) {
303            flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_HW_AV_SYNC);
304        }
305    }
306
307    // these below should probably come from the audioFlinger too...
308    if (format == AUDIO_FORMAT_DEFAULT) {
309        format = AUDIO_FORMAT_PCM_16_BIT;
310    }
311
312    // validate parameters
313    if (!audio_is_valid_format(format)) {
314        ALOGE("Invalid format %#x", format);
315        return BAD_VALUE;
316    }
317    mFormat = format;
318
319    if (!audio_is_output_channel(channelMask)) {
320        ALOGE("Invalid channel mask %#x", channelMask);
321        return BAD_VALUE;
322    }
323    mChannelMask = channelMask;
324    uint32_t channelCount = audio_channel_count_from_out_mask(channelMask);
325    mChannelCount = channelCount;
326
327    // force direct flag if format is not linear PCM
328    // or offload was requested
329    if ((flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
330            || !audio_is_linear_pcm(format)) {
331        ALOGV( (flags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD)
332                    ? "Offload request, forcing to Direct Output"
333                    : "Not linear PCM, forcing to Direct Output");
334        flags = (audio_output_flags_t)
335                // FIXME why can't we allow direct AND fast?
336                ((flags | AUDIO_OUTPUT_FLAG_DIRECT) & ~AUDIO_OUTPUT_FLAG_FAST);
337    }
338
339    // force direct flag if HW A/V sync requested
340    if ((flags & AUDIO_OUTPUT_FLAG_HW_AV_SYNC) != 0) {
341        flags = (audio_output_flags_t)(flags | AUDIO_OUTPUT_FLAG_DIRECT);
342    }
343
344    if (flags & AUDIO_OUTPUT_FLAG_DIRECT) {
345        if (audio_is_linear_pcm(format)) {
346            mFrameSize = channelCount * audio_bytes_per_sample(format);
347        } else {
348            mFrameSize = sizeof(uint8_t);
349        }
350    } else {
351        ALOG_ASSERT(audio_is_linear_pcm(format));
352        mFrameSize = channelCount * audio_bytes_per_sample(format);
353        // createTrack will return an error if PCM format is not supported by server,
354        // so no need to check for specific PCM formats here
355    }
356
357    // sampling rate must be specified for direct outputs
358    if (sampleRate == 0 && (flags & AUDIO_OUTPUT_FLAG_DIRECT) != 0) {
359        return BAD_VALUE;
360    }
361    mSampleRate = sampleRate;
362
363    // Make copy of input parameter offloadInfo so that in the future:
364    //  (a) createTrack_l doesn't need it as an input parameter
365    //  (b) we can support re-creation of offloaded tracks
366    if (offloadInfo != NULL) {
367        mOffloadInfoCopy = *offloadInfo;
368        mOffloadInfo = &mOffloadInfoCopy;
369    } else {
370        mOffloadInfo = NULL;
371    }
372
373    mVolume[AUDIO_INTERLEAVE_LEFT] = 1.0f;
374    mVolume[AUDIO_INTERLEAVE_RIGHT] = 1.0f;
375    mSendLevel = 0.0f;
376    // mFrameCount is initialized in createTrack_l
377    mReqFrameCount = frameCount;
378    mNotificationFramesReq = notificationFrames;
379    mNotificationFramesAct = 0;
380    if (sessionId == AUDIO_SESSION_ALLOCATE) {
381        mSessionId = AudioSystem::newAudioUniqueId();
382    } else {
383        mSessionId = sessionId;
384    }
385    int callingpid = IPCThreadState::self()->getCallingPid();
386    int mypid = getpid();
387    if (uid == -1 || (callingpid != mypid)) {
388        mClientUid = IPCThreadState::self()->getCallingUid();
389    } else {
390        mClientUid = uid;
391    }
392    if (pid == -1 || (callingpid != mypid)) {
393        mClientPid = callingpid;
394    } else {
395        mClientPid = pid;
396    }
397    mAuxEffectId = 0;
398    mFlags = flags;
399    mCbf = cbf;
400
401    if (cbf != NULL) {
402        mAudioTrackThread = new AudioTrackThread(*this, threadCanCallJava);
403        mAudioTrackThread->run("AudioTrack", ANDROID_PRIORITY_AUDIO, 0 /*stack*/);
404    }
405
406    // create the IAudioTrack
407    status_t status = createTrack_l();
408
409    if (status != NO_ERROR) {
410        if (mAudioTrackThread != 0) {
411            mAudioTrackThread->requestExit();   // see comment in AudioTrack.h
412            mAudioTrackThread->requestExitAndWait();
413            mAudioTrackThread.clear();
414        }
415        return status;
416    }
417
418    mStatus = NO_ERROR;
419    mState = STATE_STOPPED;
420    mUserData = user;
421    mLoopCount = 0;
422    mLoopStart = 0;
423    mLoopEnd = 0;
424    mLoopCountNotified = 0;
425    mMarkerPosition = 0;
426    mMarkerReached = false;
427    mNewPosition = 0;
428    mUpdatePeriod = 0;
429    mServer = 0;
430    mPosition = 0;
431    mReleased = 0;
432    mStartUs = 0;
433    AudioSystem::acquireAudioSessionId(mSessionId, mClientPid);
434    mSequence = 1;
435    mObservedSequence = mSequence;
436    mInUnderrun = false;
437
438    return NO_ERROR;
439}
440
441// -------------------------------------------------------------------------
442
443status_t AudioTrack::start()
444{
445    AutoMutex lock(mLock);
446
447    if (mState == STATE_ACTIVE) {
448        return INVALID_OPERATION;
449    }
450
451    mInUnderrun = true;
452
453    State previousState = mState;
454    if (previousState == STATE_PAUSED_STOPPING) {
455        mState = STATE_STOPPING;
456    } else {
457        mState = STATE_ACTIVE;
458    }
459    (void) updateAndGetPosition_l();
460    if (previousState == STATE_STOPPED || previousState == STATE_FLUSHED) {
461        // reset current position as seen by client to 0
462        mPosition = 0;
463        // For offloaded tracks, we don't know if the hardware counters are really zero here,
464        // since the flush is asynchronous and stop may not fully drain.
465        // We save the time when the track is started to later verify whether
466        // the counters are realistic (i.e. start from zero after this time).
467        mStartUs = getNowUs();
468
469        // force refresh of remaining frames by processAudioBuffer() as last
470        // write before stop could be partial.
471        mRefreshRemaining = true;
472    }
473    mNewPosition = mPosition + mUpdatePeriod;
474    int32_t flags = android_atomic_and(~CBLK_DISABLED, &mCblk->mFlags);
475
476    sp<AudioTrackThread> t = mAudioTrackThread;
477    if (t != 0) {
478        if (previousState == STATE_STOPPING) {
479            mProxy->interrupt();
480        } else {
481            t->resume();
482        }
483    } else {
484        mPreviousPriority = getpriority(PRIO_PROCESS, 0);
485        get_sched_policy(0, &mPreviousSchedulingGroup);
486        androidSetThreadPriority(0, ANDROID_PRIORITY_AUDIO);
487    }
488
489    status_t status = NO_ERROR;
490    if (!(flags & CBLK_INVALID)) {
491        status = mAudioTrack->start();
492        if (status == DEAD_OBJECT) {
493            flags |= CBLK_INVALID;
494        }
495    }
496    if (flags & CBLK_INVALID) {
497        status = restoreTrack_l("start");
498    }
499
500    if (status != NO_ERROR) {
501        ALOGE("start() status %d", status);
502        mState = previousState;
503        if (t != 0) {
504            if (previousState != STATE_STOPPING) {
505                t->pause();
506            }
507        } else {
508            setpriority(PRIO_PROCESS, 0, mPreviousPriority);
509            set_sched_policy(0, mPreviousSchedulingGroup);
510        }
511    }
512
513    return status;
514}
515
516void AudioTrack::stop()
517{
518    AutoMutex lock(mLock);
519    if (mState != STATE_ACTIVE && mState != STATE_PAUSED) {
520        return;
521    }
522
523    if (isOffloaded_l()) {
524        mState = STATE_STOPPING;
525    } else {
526        mState = STATE_STOPPED;
527        mReleased = 0;
528    }
529
530    mProxy->interrupt();
531    mAudioTrack->stop();
532    // the playback head position will reset to 0, so if a marker is set, we need
533    // to activate it again
534    mMarkerReached = false;
535
536    if (mSharedBuffer != 0) {
537        // clear buffer position and loop count.
538        mStaticProxy->setBufferPositionAndLoop(0 /* position */,
539                0 /* loopStart */, 0 /* loopEnd */, 0 /* loopCount */);
540    }
541
542    sp<AudioTrackThread> t = mAudioTrackThread;
543    if (t != 0) {
544        if (!isOffloaded_l()) {
545            t->pause();
546        }
547    } else {
548        setpriority(PRIO_PROCESS, 0, mPreviousPriority);
549        set_sched_policy(0, mPreviousSchedulingGroup);
550    }
551}
552
553bool AudioTrack::stopped() const
554{
555    AutoMutex lock(mLock);
556    return mState != STATE_ACTIVE;
557}
558
559void AudioTrack::flush()
560{
561    if (mSharedBuffer != 0) {
562        return;
563    }
564    AutoMutex lock(mLock);
565    if (mState == STATE_ACTIVE || mState == STATE_FLUSHED) {
566        return;
567    }
568    flush_l();
569}
570
571void AudioTrack::flush_l()
572{
573    ALOG_ASSERT(mState != STATE_ACTIVE);
574
575    // clear playback marker and periodic update counter
576    mMarkerPosition = 0;
577    mMarkerReached = false;
578    mUpdatePeriod = 0;
579    mRefreshRemaining = true;
580
581    mState = STATE_FLUSHED;
582    mReleased = 0;
583    if (isOffloaded_l()) {
584        mProxy->interrupt();
585    }
586    mProxy->flush();
587    mAudioTrack->flush();
588}
589
590void AudioTrack::pause()
591{
592    AutoMutex lock(mLock);
593    if (mState == STATE_ACTIVE) {
594        mState = STATE_PAUSED;
595    } else if (mState == STATE_STOPPING) {
596        mState = STATE_PAUSED_STOPPING;
597    } else {
598        return;
599    }
600    mProxy->interrupt();
601    mAudioTrack->pause();
602
603    if (isOffloaded_l()) {
604        if (mOutput != AUDIO_IO_HANDLE_NONE) {
605            // An offload output can be re-used between two audio tracks having
606            // the same configuration. A timestamp query for a paused track
607            // while the other is running would return an incorrect time.
608            // To fix this, cache the playback position on a pause() and return
609            // this time when requested until the track is resumed.
610
611            // OffloadThread sends HAL pause in its threadLoop. Time saved
612            // here can be slightly off.
613
614            // TODO: check return code for getRenderPosition.
615
616            uint32_t halFrames;
617            AudioSystem::getRenderPosition(mOutput, &halFrames, &mPausedPosition);
618            ALOGV("AudioTrack::pause for offload, cache current position %u", mPausedPosition);
619        }
620    }
621}
622
623status_t AudioTrack::setVolume(float left, float right)
624{
625    // This duplicates a test by AudioTrack JNI, but that is not the only caller
626    if (isnanf(left) || left < GAIN_FLOAT_ZERO || left > GAIN_FLOAT_UNITY ||
627            isnanf(right) || right < GAIN_FLOAT_ZERO || right > GAIN_FLOAT_UNITY) {
628        return BAD_VALUE;
629    }
630
631    AutoMutex lock(mLock);
632    mVolume[AUDIO_INTERLEAVE_LEFT] = left;
633    mVolume[AUDIO_INTERLEAVE_RIGHT] = right;
634
635    mProxy->setVolumeLR(gain_minifloat_pack(gain_from_float(left), gain_from_float(right)));
636
637    if (isOffloaded_l()) {
638        mAudioTrack->signal();
639    }
640    return NO_ERROR;
641}
642
643status_t AudioTrack::setVolume(float volume)
644{
645    return setVolume(volume, volume);
646}
647
648status_t AudioTrack::setAuxEffectSendLevel(float level)
649{
650    // This duplicates a test by AudioTrack JNI, but that is not the only caller
651    if (isnanf(level) || level < GAIN_FLOAT_ZERO || level > GAIN_FLOAT_UNITY) {
652        return BAD_VALUE;
653    }
654
655    AutoMutex lock(mLock);
656    mSendLevel = level;
657    mProxy->setSendLevel(level);
658
659    return NO_ERROR;
660}
661
662void AudioTrack::getAuxEffectSendLevel(float* level) const
663{
664    if (level != NULL) {
665        *level = mSendLevel;
666    }
667}
668
669status_t AudioTrack::setSampleRate(uint32_t rate)
670{
671    if (mIsTimed || isOffloadedOrDirect()) {
672        return INVALID_OPERATION;
673    }
674
675    AutoMutex lock(mLock);
676    if (mOutput == AUDIO_IO_HANDLE_NONE) {
677        return NO_INIT;
678    }
679    uint32_t afSamplingRate;
680    if (AudioSystem::getSamplingRate(mOutput, &afSamplingRate) != NO_ERROR) {
681        return NO_INIT;
682    }
683    if (rate == 0 || rate > afSamplingRate * AUDIO_RESAMPLER_DOWN_RATIO_MAX) {
684        return BAD_VALUE;
685    }
686
687    mSampleRate = rate;
688    mProxy->setSampleRate(rate);
689
690    return NO_ERROR;
691}
692
693uint32_t AudioTrack::getSampleRate() const
694{
695    if (mIsTimed) {
696        return 0;
697    }
698
699    AutoMutex lock(mLock);
700
701    // sample rate can be updated during playback by the offloaded decoder so we need to
702    // query the HAL and update if needed.
703// FIXME use Proxy return channel to update the rate from server and avoid polling here
704    if (isOffloadedOrDirect_l()) {
705        if (mOutput != AUDIO_IO_HANDLE_NONE) {
706            uint32_t sampleRate = 0;
707            status_t status = AudioSystem::getSamplingRate(mOutput, &sampleRate);
708            if (status == NO_ERROR) {
709                mSampleRate = sampleRate;
710            }
711        }
712    }
713    return mSampleRate;
714}
715
716status_t AudioTrack::setLoop(uint32_t loopStart, uint32_t loopEnd, int loopCount)
717{
718    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
719        return INVALID_OPERATION;
720    }
721
722    if (loopCount == 0) {
723        ;
724    } else if (loopCount >= -1 && loopStart < loopEnd && loopEnd <= mFrameCount &&
725            loopEnd - loopStart >= MIN_LOOP) {
726        ;
727    } else {
728        return BAD_VALUE;
729    }
730
731    AutoMutex lock(mLock);
732    // See setPosition() regarding setting parameters such as loop points or position while active
733    if (mState == STATE_ACTIVE) {
734        return INVALID_OPERATION;
735    }
736    setLoop_l(loopStart, loopEnd, loopCount);
737    return NO_ERROR;
738}
739
740void AudioTrack::setLoop_l(uint32_t loopStart, uint32_t loopEnd, int loopCount)
741{
742    // We do not update the periodic notification point.
743    // mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
744    mLoopCount = loopCount;
745    mLoopEnd = loopEnd;
746    mLoopStart = loopStart;
747    mLoopCountNotified = loopCount;
748    mStaticProxy->setLoop(loopStart, loopEnd, loopCount);
749
750    // Waking the AudioTrackThread is not needed as this cannot be called when active.
751}
752
753status_t AudioTrack::setMarkerPosition(uint32_t marker)
754{
755    // The only purpose of setting marker position is to get a callback
756    if (mCbf == NULL || isOffloadedOrDirect()) {
757        return INVALID_OPERATION;
758    }
759
760    AutoMutex lock(mLock);
761    mMarkerPosition = marker;
762    mMarkerReached = false;
763
764    sp<AudioTrackThread> t = mAudioTrackThread;
765    if (t != 0) {
766        t->wake();
767    }
768    return NO_ERROR;
769}
770
771status_t AudioTrack::getMarkerPosition(uint32_t *marker) const
772{
773    if (isOffloadedOrDirect()) {
774        return INVALID_OPERATION;
775    }
776    if (marker == NULL) {
777        return BAD_VALUE;
778    }
779
780    AutoMutex lock(mLock);
781    *marker = mMarkerPosition;
782
783    return NO_ERROR;
784}
785
786status_t AudioTrack::setPositionUpdatePeriod(uint32_t updatePeriod)
787{
788    // The only purpose of setting position update period is to get a callback
789    if (mCbf == NULL || isOffloadedOrDirect()) {
790        return INVALID_OPERATION;
791    }
792
793    AutoMutex lock(mLock);
794    mNewPosition = updateAndGetPosition_l() + updatePeriod;
795    mUpdatePeriod = updatePeriod;
796
797    sp<AudioTrackThread> t = mAudioTrackThread;
798    if (t != 0) {
799        t->wake();
800    }
801    return NO_ERROR;
802}
803
804status_t AudioTrack::getPositionUpdatePeriod(uint32_t *updatePeriod) const
805{
806    if (isOffloadedOrDirect()) {
807        return INVALID_OPERATION;
808    }
809    if (updatePeriod == NULL) {
810        return BAD_VALUE;
811    }
812
813    AutoMutex lock(mLock);
814    *updatePeriod = mUpdatePeriod;
815
816    return NO_ERROR;
817}
818
819status_t AudioTrack::setPosition(uint32_t position)
820{
821    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
822        return INVALID_OPERATION;
823    }
824    if (position > mFrameCount) {
825        return BAD_VALUE;
826    }
827
828    AutoMutex lock(mLock);
829    // Currently we require that the player is inactive before setting parameters such as position
830    // or loop points.  Otherwise, there could be a race condition: the application could read the
831    // current position, compute a new position or loop parameters, and then set that position or
832    // loop parameters but it would do the "wrong" thing since the position has continued to advance
833    // in the mean time.  If we ever provide a sequencer in server, we could allow a way for the app
834    // to specify how it wants to handle such scenarios.
835    if (mState == STATE_ACTIVE) {
836        return INVALID_OPERATION;
837    }
838    // After setting the position, use full update period before notification.
839    mNewPosition = updateAndGetPosition_l() + mUpdatePeriod;
840    mStaticProxy->setBufferPosition(position);
841
842    // Waking the AudioTrackThread is not needed as this cannot be called when active.
843    return NO_ERROR;
844}
845
846status_t AudioTrack::getPosition(uint32_t *position)
847{
848    if (position == NULL) {
849        return BAD_VALUE;
850    }
851
852    AutoMutex lock(mLock);
853    if (isOffloadedOrDirect_l()) {
854        uint32_t dspFrames = 0;
855
856        if (isOffloaded_l() && ((mState == STATE_PAUSED) || (mState == STATE_PAUSED_STOPPING))) {
857            ALOGV("getPosition called in paused state, return cached position %u", mPausedPosition);
858            *position = mPausedPosition;
859            return NO_ERROR;
860        }
861
862        if (mOutput != AUDIO_IO_HANDLE_NONE) {
863            uint32_t halFrames;
864            AudioSystem::getRenderPosition(mOutput, &halFrames, &dspFrames);
865        }
866        // FIXME: dspFrames may not be zero in (mState == STATE_STOPPED || mState == STATE_FLUSHED)
867        // due to hardware latency. We leave this behavior for now.
868        *position = dspFrames;
869    } else {
870        if (mCblk->mFlags & CBLK_INVALID) {
871            restoreTrack_l("getPosition");
872        }
873
874        // IAudioTrack::stop() isn't synchronous; we don't know when presentation completes
875        *position = (mState == STATE_STOPPED || mState == STATE_FLUSHED) ?
876                0 : updateAndGetPosition_l();
877    }
878    return NO_ERROR;
879}
880
881status_t AudioTrack::getBufferPosition(uint32_t *position)
882{
883    if (mSharedBuffer == 0 || mIsTimed) {
884        return INVALID_OPERATION;
885    }
886    if (position == NULL) {
887        return BAD_VALUE;
888    }
889
890    AutoMutex lock(mLock);
891    *position = mStaticProxy->getBufferPosition();
892    return NO_ERROR;
893}
894
895status_t AudioTrack::reload()
896{
897    if (mSharedBuffer == 0 || mIsTimed || isOffloadedOrDirect()) {
898        return INVALID_OPERATION;
899    }
900
901    AutoMutex lock(mLock);
902    // See setPosition() regarding setting parameters such as loop points or position while active
903    if (mState == STATE_ACTIVE) {
904        return INVALID_OPERATION;
905    }
906    mNewPosition = mUpdatePeriod;
907    (void) updateAndGetPosition_l();
908    mPosition = 0;
909#if 0
910    // The documentation is not clear on the behavior of reload() and the restoration
911    // of loop count. Historically we have not restored loop count, start, end,
912    // but it makes sense if one desires to repeat playing a particular sound.
913    if (mLoopCount != 0) {
914        mLoopCountNotified = mLoopCount;
915        mStaticProxy->setLoop(mLoopStart, mLoopEnd, mLoopCount);
916    }
917#endif
918    mStaticProxy->setBufferPosition(0);
919    return NO_ERROR;
920}
921
922audio_io_handle_t AudioTrack::getOutput() const
923{
924    AutoMutex lock(mLock);
925    return mOutput;
926}
927
928status_t AudioTrack::attachAuxEffect(int effectId)
929{
930    AutoMutex lock(mLock);
931    status_t status = mAudioTrack->attachAuxEffect(effectId);
932    if (status == NO_ERROR) {
933        mAuxEffectId = effectId;
934    }
935    return status;
936}
937
938audio_stream_type_t AudioTrack::streamType() const
939{
940    if (mStreamType == AUDIO_STREAM_DEFAULT) {
941        return audio_attributes_to_stream_type(&mAttributes);
942    }
943    return mStreamType;
944}
945
946// -------------------------------------------------------------------------
947
948// must be called with mLock held
949status_t AudioTrack::createTrack_l()
950{
951    const sp<IAudioFlinger>& audioFlinger = AudioSystem::get_audio_flinger();
952    if (audioFlinger == 0) {
953        ALOGE("Could not get audioflinger");
954        return NO_INIT;
955    }
956
957    audio_io_handle_t output;
958    audio_stream_type_t streamType = mStreamType;
959    audio_attributes_t *attr = (mStreamType == AUDIO_STREAM_DEFAULT) ? &mAttributes : NULL;
960    status_t status = AudioSystem::getOutputForAttr(attr, &output,
961                                                    (audio_session_t)mSessionId, &streamType,
962                                                    mSampleRate, mFormat, mChannelMask,
963                                                    mFlags, mOffloadInfo);
964
965
966    if (status != NO_ERROR || output == AUDIO_IO_HANDLE_NONE) {
967        ALOGE("Could not get audio output for stream type %d, usage %d, sample rate %u, format %#x,"
968              " channel mask %#x, flags %#x",
969              streamType, mAttributes.usage, mSampleRate, mFormat, mChannelMask, mFlags);
970        return BAD_VALUE;
971    }
972    {
973    // Now that we have a reference to an I/O handle and have not yet handed it off to AudioFlinger,
974    // we must release it ourselves if anything goes wrong.
975
976    // Not all of these values are needed under all conditions, but it is easier to get them all
977
978    uint32_t afLatency;
979    status = AudioSystem::getLatency(output, &afLatency);
980    if (status != NO_ERROR) {
981        ALOGE("getLatency(%d) failed status %d", output, status);
982        goto release;
983    }
984
985    size_t afFrameCount;
986    status = AudioSystem::getFrameCount(output, &afFrameCount);
987    if (status != NO_ERROR) {
988        ALOGE("getFrameCount(output=%d) status %d", output, status);
989        goto release;
990    }
991
992    uint32_t afSampleRate;
993    status = AudioSystem::getSamplingRate(output, &afSampleRate);
994    if (status != NO_ERROR) {
995        ALOGE("getSamplingRate(output=%d) status %d", output, status);
996        goto release;
997    }
998    if (mSampleRate == 0) {
999        mSampleRate = afSampleRate;
1000    }
1001    // Client decides whether the track is TIMED (see below), but can only express a preference
1002    // for FAST.  Server will perform additional tests.
1003    if ((mFlags & AUDIO_OUTPUT_FLAG_FAST) && !((
1004            // either of these use cases:
1005            // use case 1: shared buffer
1006            (mSharedBuffer != 0) ||
1007            // use case 2: callback transfer mode
1008            (mTransfer == TRANSFER_CALLBACK) ||
1009            // use case 3: obtain/release mode
1010            (mTransfer == TRANSFER_OBTAIN)) &&
1011            // matching sample rate
1012            (mSampleRate == afSampleRate))) {
1013        ALOGW("AUDIO_OUTPUT_FLAG_FAST denied by client");
1014        // once denied, do not request again if IAudioTrack is re-created
1015        mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1016    }
1017    ALOGV("createTrack_l() output %d afLatency %d", output, afLatency);
1018
1019    // The client's AudioTrack buffer is divided into n parts for purpose of wakeup by server, where
1020    //  n = 1   fast track with single buffering; nBuffering is ignored
1021    //  n = 2   fast track with double buffering
1022    //  n = 2   normal track, (including those with sample rate conversion)
1023    //  n >= 3  very high latency or very small notification interval (unused).
1024    const uint32_t nBuffering = 2;
1025
1026    mNotificationFramesAct = mNotificationFramesReq;
1027
1028    size_t frameCount = mReqFrameCount;
1029    if (!audio_is_linear_pcm(mFormat)) {
1030
1031        if (mSharedBuffer != 0) {
1032            // Same comment as below about ignoring frameCount parameter for set()
1033            frameCount = mSharedBuffer->size();
1034        } else if (frameCount == 0) {
1035            frameCount = afFrameCount;
1036        }
1037        if (mNotificationFramesAct != frameCount) {
1038            mNotificationFramesAct = frameCount;
1039        }
1040    } else if (mSharedBuffer != 0) {
1041        // FIXME: Ensure client side memory buffers need
1042        // not have additional alignment beyond sample
1043        // (e.g. 16 bit stereo accessed as 32 bit frame).
1044        size_t alignment = audio_bytes_per_sample(mFormat);
1045        if (alignment & 1) {
1046            // for AUDIO_FORMAT_PCM_24_BIT_PACKED (not exposed through Java).
1047            alignment = 1;
1048        }
1049        if (mChannelCount > 1) {
1050            // More than 2 channels does not require stronger alignment than stereo
1051            alignment <<= 1;
1052        }
1053        if (((uintptr_t)mSharedBuffer->pointer() & (alignment - 1)) != 0) {
1054            ALOGE("Invalid buffer alignment: address %p, channel count %u",
1055                    mSharedBuffer->pointer(), mChannelCount);
1056            status = BAD_VALUE;
1057            goto release;
1058        }
1059
1060        // When initializing a shared buffer AudioTrack via constructors,
1061        // there's no frameCount parameter.
1062        // But when initializing a shared buffer AudioTrack via set(),
1063        // there _is_ a frameCount parameter.  We silently ignore it.
1064        frameCount = mSharedBuffer->size() / mFrameSize;
1065    } else {
1066        // For fast and normal streaming tracks,
1067        // the frame count calculations and checks are done by server
1068    }
1069
1070    IAudioFlinger::track_flags_t trackFlags = IAudioFlinger::TRACK_DEFAULT;
1071    if (mIsTimed) {
1072        trackFlags |= IAudioFlinger::TRACK_TIMED;
1073    }
1074
1075    pid_t tid = -1;
1076    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1077        trackFlags |= IAudioFlinger::TRACK_FAST;
1078        if (mAudioTrackThread != 0) {
1079            tid = mAudioTrackThread->getTid();
1080        }
1081    }
1082
1083    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1084        trackFlags |= IAudioFlinger::TRACK_OFFLOAD;
1085    }
1086
1087    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1088        trackFlags |= IAudioFlinger::TRACK_DIRECT;
1089    }
1090
1091    size_t temp = frameCount;   // temp may be replaced by a revised value of frameCount,
1092                                // but we will still need the original value also
1093    sp<IAudioTrack> track = audioFlinger->createTrack(streamType,
1094                                                      mSampleRate,
1095                                                      mFormat,
1096                                                      mChannelMask,
1097                                                      &temp,
1098                                                      &trackFlags,
1099                                                      mSharedBuffer,
1100                                                      output,
1101                                                      tid,
1102                                                      &mSessionId,
1103                                                      mClientUid,
1104                                                      &status);
1105
1106    if (status != NO_ERROR) {
1107        ALOGE("AudioFlinger could not create track, status: %d", status);
1108        goto release;
1109    }
1110    ALOG_ASSERT(track != 0);
1111
1112    // AudioFlinger now owns the reference to the I/O handle,
1113    // so we are no longer responsible for releasing it.
1114
1115    sp<IMemory> iMem = track->getCblk();
1116    if (iMem == 0) {
1117        ALOGE("Could not get control block");
1118        return NO_INIT;
1119    }
1120    void *iMemPointer = iMem->pointer();
1121    if (iMemPointer == NULL) {
1122        ALOGE("Could not get control block pointer");
1123        return NO_INIT;
1124    }
1125    // invariant that mAudioTrack != 0 is true only after set() returns successfully
1126    if (mAudioTrack != 0) {
1127        IInterface::asBinder(mAudioTrack)->unlinkToDeath(mDeathNotifier, this);
1128        mDeathNotifier.clear();
1129    }
1130    mAudioTrack = track;
1131    mCblkMemory = iMem;
1132    IPCThreadState::self()->flushCommands();
1133
1134    audio_track_cblk_t* cblk = static_cast<audio_track_cblk_t*>(iMemPointer);
1135    mCblk = cblk;
1136    // note that temp is the (possibly revised) value of frameCount
1137    if (temp < frameCount || (frameCount == 0 && temp == 0)) {
1138        // In current design, AudioTrack client checks and ensures frame count validity before
1139        // passing it to AudioFlinger so AudioFlinger should not return a different value except
1140        // for fast track as it uses a special method of assigning frame count.
1141        ALOGW("Requested frameCount %zu but received frameCount %zu", frameCount, temp);
1142    }
1143    frameCount = temp;
1144
1145    mAwaitBoost = false;
1146    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1147        if (trackFlags & IAudioFlinger::TRACK_FAST) {
1148            ALOGV("AUDIO_OUTPUT_FLAG_FAST successful; frameCount %zu", frameCount);
1149            mAwaitBoost = true;
1150        } else {
1151            ALOGV("AUDIO_OUTPUT_FLAG_FAST denied by server; frameCount %zu", frameCount);
1152            // once denied, do not request again if IAudioTrack is re-created
1153            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_FAST);
1154        }
1155    }
1156    if (mFlags & AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD) {
1157        if (trackFlags & IAudioFlinger::TRACK_OFFLOAD) {
1158            ALOGV("AUDIO_OUTPUT_FLAG_OFFLOAD successful");
1159        } else {
1160            ALOGW("AUDIO_OUTPUT_FLAG_OFFLOAD denied by server");
1161            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD);
1162            // FIXME This is a warning, not an error, so don't return error status
1163            //return NO_INIT;
1164        }
1165    }
1166    if (mFlags & AUDIO_OUTPUT_FLAG_DIRECT) {
1167        if (trackFlags & IAudioFlinger::TRACK_DIRECT) {
1168            ALOGV("AUDIO_OUTPUT_FLAG_DIRECT successful");
1169        } else {
1170            ALOGW("AUDIO_OUTPUT_FLAG_DIRECT denied by server");
1171            mFlags = (audio_output_flags_t) (mFlags & ~AUDIO_OUTPUT_FLAG_DIRECT);
1172            // FIXME This is a warning, not an error, so don't return error status
1173            //return NO_INIT;
1174        }
1175    }
1176    // Make sure that application is notified with sufficient margin before underrun
1177    if (mSharedBuffer == 0 && audio_is_linear_pcm(mFormat)) {
1178        // Theoretically double-buffering is not required for fast tracks,
1179        // due to tighter scheduling.  But in practice, to accommodate kernels with
1180        // scheduling jitter, and apps with computation jitter, we use double-buffering
1181        // for fast tracks just like normal streaming tracks.
1182        if (mNotificationFramesAct == 0 || mNotificationFramesAct > frameCount / nBuffering) {
1183            mNotificationFramesAct = frameCount / nBuffering;
1184        }
1185    }
1186
1187    // We retain a copy of the I/O handle, but don't own the reference
1188    mOutput = output;
1189    mRefreshRemaining = true;
1190
1191    // Starting address of buffers in shared memory.  If there is a shared buffer, buffers
1192    // is the value of pointer() for the shared buffer, otherwise buffers points
1193    // immediately after the control block.  This address is for the mapping within client
1194    // address space.  AudioFlinger::TrackBase::mBuffer is for the server address space.
1195    void* buffers;
1196    if (mSharedBuffer == 0) {
1197        buffers = (char*)cblk + sizeof(audio_track_cblk_t);
1198    } else {
1199        buffers = mSharedBuffer->pointer();
1200    }
1201
1202    mAudioTrack->attachAuxEffect(mAuxEffectId);
1203    // FIXME don't believe this lie
1204    mLatency = afLatency + (1000*frameCount) / mSampleRate;
1205
1206    mFrameCount = frameCount;
1207    // If IAudioTrack is re-created, don't let the requested frameCount
1208    // decrease.  This can confuse clients that cache frameCount().
1209    if (frameCount > mReqFrameCount) {
1210        mReqFrameCount = frameCount;
1211    }
1212
1213    // update proxy
1214    if (mSharedBuffer == 0) {
1215        mStaticProxy.clear();
1216        mProxy = new AudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1217    } else {
1218        mStaticProxy = new StaticAudioTrackClientProxy(cblk, buffers, frameCount, mFrameSize);
1219        mProxy = mStaticProxy;
1220    }
1221
1222    mProxy->setVolumeLR(gain_minifloat_pack(
1223            gain_from_float(mVolume[AUDIO_INTERLEAVE_LEFT]),
1224            gain_from_float(mVolume[AUDIO_INTERLEAVE_RIGHT])));
1225
1226    mProxy->setSendLevel(mSendLevel);
1227    mProxy->setSampleRate(mSampleRate);
1228    mProxy->setMinimum(mNotificationFramesAct);
1229
1230    mDeathNotifier = new DeathNotifier(this);
1231    IInterface::asBinder(mAudioTrack)->linkToDeath(mDeathNotifier, this);
1232
1233    return NO_ERROR;
1234    }
1235
1236release:
1237    AudioSystem::releaseOutput(output, streamType, (audio_session_t)mSessionId);
1238    if (status == NO_ERROR) {
1239        status = NO_INIT;
1240    }
1241    return status;
1242}
1243
1244status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, int32_t waitCount, size_t *nonContig)
1245{
1246    if (audioBuffer == NULL) {
1247        return BAD_VALUE;
1248    }
1249    if (mTransfer != TRANSFER_OBTAIN) {
1250        audioBuffer->frameCount = 0;
1251        audioBuffer->size = 0;
1252        audioBuffer->raw = NULL;
1253        return INVALID_OPERATION;
1254    }
1255
1256    const struct timespec *requested;
1257    struct timespec timeout;
1258    if (waitCount == -1) {
1259        requested = &ClientProxy::kForever;
1260    } else if (waitCount == 0) {
1261        requested = &ClientProxy::kNonBlocking;
1262    } else if (waitCount > 0) {
1263        long long ms = WAIT_PERIOD_MS * (long long) waitCount;
1264        timeout.tv_sec = ms / 1000;
1265        timeout.tv_nsec = (int) (ms % 1000) * 1000000;
1266        requested = &timeout;
1267    } else {
1268        ALOGE("%s invalid waitCount %d", __func__, waitCount);
1269        requested = NULL;
1270    }
1271    return obtainBuffer(audioBuffer, requested, NULL /*elapsed*/, nonContig);
1272}
1273
1274status_t AudioTrack::obtainBuffer(Buffer* audioBuffer, const struct timespec *requested,
1275        struct timespec *elapsed, size_t *nonContig)
1276{
1277    // previous and new IAudioTrack sequence numbers are used to detect track re-creation
1278    uint32_t oldSequence = 0;
1279    uint32_t newSequence;
1280
1281    Proxy::Buffer buffer;
1282    status_t status = NO_ERROR;
1283
1284    static const int32_t kMaxTries = 5;
1285    int32_t tryCounter = kMaxTries;
1286
1287    do {
1288        // obtainBuffer() is called with mutex unlocked, so keep extra references to these fields to
1289        // keep them from going away if another thread re-creates the track during obtainBuffer()
1290        sp<AudioTrackClientProxy> proxy;
1291        sp<IMemory> iMem;
1292
1293        {   // start of lock scope
1294            AutoMutex lock(mLock);
1295
1296            newSequence = mSequence;
1297            // did previous obtainBuffer() fail due to media server death or voluntary invalidation?
1298            if (status == DEAD_OBJECT) {
1299                // re-create track, unless someone else has already done so
1300                if (newSequence == oldSequence) {
1301                    status = restoreTrack_l("obtainBuffer");
1302                    if (status != NO_ERROR) {
1303                        buffer.mFrameCount = 0;
1304                        buffer.mRaw = NULL;
1305                        buffer.mNonContig = 0;
1306                        break;
1307                    }
1308                }
1309            }
1310            oldSequence = newSequence;
1311
1312            // Keep the extra references
1313            proxy = mProxy;
1314            iMem = mCblkMemory;
1315
1316            if (mState == STATE_STOPPING) {
1317                status = -EINTR;
1318                buffer.mFrameCount = 0;
1319                buffer.mRaw = NULL;
1320                buffer.mNonContig = 0;
1321                break;
1322            }
1323
1324            // Non-blocking if track is stopped or paused
1325            if (mState != STATE_ACTIVE) {
1326                requested = &ClientProxy::kNonBlocking;
1327            }
1328
1329        }   // end of lock scope
1330
1331        buffer.mFrameCount = audioBuffer->frameCount;
1332        // FIXME starts the requested timeout and elapsed over from scratch
1333        status = proxy->obtainBuffer(&buffer, requested, elapsed);
1334
1335    } while ((status == DEAD_OBJECT) && (tryCounter-- > 0));
1336
1337    audioBuffer->frameCount = buffer.mFrameCount;
1338    audioBuffer->size = buffer.mFrameCount * mFrameSize;
1339    audioBuffer->raw = buffer.mRaw;
1340    if (nonContig != NULL) {
1341        *nonContig = buffer.mNonContig;
1342    }
1343    return status;
1344}
1345
1346void AudioTrack::releaseBuffer(const Buffer* audioBuffer)
1347{
1348    // FIXME add error checking on mode, by adding an internal version
1349    if (mTransfer == TRANSFER_SHARED) {
1350        return;
1351    }
1352
1353    size_t stepCount = audioBuffer->size / mFrameSize;
1354    if (stepCount == 0) {
1355        return;
1356    }
1357
1358    Proxy::Buffer buffer;
1359    buffer.mFrameCount = stepCount;
1360    buffer.mRaw = audioBuffer->raw;
1361
1362    AutoMutex lock(mLock);
1363    mReleased += stepCount;
1364    mInUnderrun = false;
1365    mProxy->releaseBuffer(&buffer);
1366
1367    // restart track if it was disabled by audioflinger due to previous underrun
1368    if (mState == STATE_ACTIVE) {
1369        audio_track_cblk_t* cblk = mCblk;
1370        if (android_atomic_and(~CBLK_DISABLED, &cblk->mFlags) & CBLK_DISABLED) {
1371            ALOGW("releaseBuffer() track %p disabled due to previous underrun, restarting", this);
1372            // FIXME ignoring status
1373            mAudioTrack->start();
1374        }
1375    }
1376}
1377
1378// -------------------------------------------------------------------------
1379
1380ssize_t AudioTrack::write(const void* buffer, size_t userSize, bool blocking)
1381{
1382    if (mTransfer != TRANSFER_SYNC || mIsTimed) {
1383        return INVALID_OPERATION;
1384    }
1385
1386    if (isDirect()) {
1387        AutoMutex lock(mLock);
1388        int32_t flags = android_atomic_and(
1389                            ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END),
1390                            &mCblk->mFlags);
1391        if (flags & CBLK_INVALID) {
1392            return DEAD_OBJECT;
1393        }
1394    }
1395
1396    if (ssize_t(userSize) < 0 || (buffer == NULL && userSize != 0)) {
1397        // Sanity-check: user is most-likely passing an error code, and it would
1398        // make the return value ambiguous (actualSize vs error).
1399        ALOGE("AudioTrack::write(buffer=%p, size=%zu (%zd)", buffer, userSize, userSize);
1400        return BAD_VALUE;
1401    }
1402
1403    size_t written = 0;
1404    Buffer audioBuffer;
1405
1406    while (userSize >= mFrameSize) {
1407        audioBuffer.frameCount = userSize / mFrameSize;
1408
1409        status_t err = obtainBuffer(&audioBuffer,
1410                blocking ? &ClientProxy::kForever : &ClientProxy::kNonBlocking);
1411        if (err < 0) {
1412            if (written > 0) {
1413                break;
1414            }
1415            return ssize_t(err);
1416        }
1417
1418        size_t toWrite = audioBuffer.size;
1419        memcpy(audioBuffer.i8, buffer, toWrite);
1420        buffer = ((const char *) buffer) + toWrite;
1421        userSize -= toWrite;
1422        written += toWrite;
1423
1424        releaseBuffer(&audioBuffer);
1425    }
1426
1427    return written;
1428}
1429
1430// -------------------------------------------------------------------------
1431
1432TimedAudioTrack::TimedAudioTrack() {
1433    mIsTimed = true;
1434}
1435
1436status_t TimedAudioTrack::allocateTimedBuffer(size_t size, sp<IMemory>* buffer)
1437{
1438    AutoMutex lock(mLock);
1439    status_t result = UNKNOWN_ERROR;
1440
1441#if 1
1442    // acquire a strong reference on the IMemory and IAudioTrack so that they cannot be destroyed
1443    // while we are accessing the cblk
1444    sp<IAudioTrack> audioTrack = mAudioTrack;
1445    sp<IMemory> iMem = mCblkMemory;
1446#endif
1447
1448    // If the track is not invalid already, try to allocate a buffer.  alloc
1449    // fails indicating that the server is dead, flag the track as invalid so
1450    // we can attempt to restore in just a bit.
1451    audio_track_cblk_t* cblk = mCblk;
1452    if (!(cblk->mFlags & CBLK_INVALID)) {
1453        result = mAudioTrack->allocateTimedBuffer(size, buffer);
1454        if (result == DEAD_OBJECT) {
1455            android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1456        }
1457    }
1458
1459    // If the track is invalid at this point, attempt to restore it. and try the
1460    // allocation one more time.
1461    if (cblk->mFlags & CBLK_INVALID) {
1462        result = restoreTrack_l("allocateTimedBuffer");
1463
1464        if (result == NO_ERROR) {
1465            result = mAudioTrack->allocateTimedBuffer(size, buffer);
1466        }
1467    }
1468
1469    return result;
1470}
1471
1472status_t TimedAudioTrack::queueTimedBuffer(const sp<IMemory>& buffer,
1473                                           int64_t pts)
1474{
1475    status_t status = mAudioTrack->queueTimedBuffer(buffer, pts);
1476    {
1477        AutoMutex lock(mLock);
1478        audio_track_cblk_t* cblk = mCblk;
1479        // restart track if it was disabled by audioflinger due to previous underrun
1480        if (buffer->size() != 0 && status == NO_ERROR &&
1481                (mState == STATE_ACTIVE) && (cblk->mFlags & CBLK_DISABLED)) {
1482            android_atomic_and(~CBLK_DISABLED, &cblk->mFlags);
1483            ALOGW("queueTimedBuffer() track %p disabled, restarting", this);
1484            // FIXME ignoring status
1485            mAudioTrack->start();
1486        }
1487    }
1488    return status;
1489}
1490
1491status_t TimedAudioTrack::setMediaTimeTransform(const LinearTransform& xform,
1492                                                TargetTimeline target)
1493{
1494    return mAudioTrack->setMediaTimeTransform(xform, target);
1495}
1496
1497// -------------------------------------------------------------------------
1498
1499nsecs_t AudioTrack::processAudioBuffer()
1500{
1501    // Currently the AudioTrack thread is not created if there are no callbacks.
1502    // Would it ever make sense to run the thread, even without callbacks?
1503    // If so, then replace this by checks at each use for mCbf != NULL.
1504    LOG_ALWAYS_FATAL_IF(mCblk == NULL);
1505
1506    mLock.lock();
1507    if (mAwaitBoost) {
1508        mAwaitBoost = false;
1509        mLock.unlock();
1510        static const int32_t kMaxTries = 5;
1511        int32_t tryCounter = kMaxTries;
1512        uint32_t pollUs = 10000;
1513        do {
1514            int policy = sched_getscheduler(0);
1515            if (policy == SCHED_FIFO || policy == SCHED_RR) {
1516                break;
1517            }
1518            usleep(pollUs);
1519            pollUs <<= 1;
1520        } while (tryCounter-- > 0);
1521        if (tryCounter < 0) {
1522            ALOGE("did not receive expected priority boost on time");
1523        }
1524        // Run again immediately
1525        return 0;
1526    }
1527
1528    // Can only reference mCblk while locked
1529    int32_t flags = android_atomic_and(
1530        ~(CBLK_UNDERRUN | CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL | CBLK_BUFFER_END), &mCblk->mFlags);
1531
1532    // Check for track invalidation
1533    if (flags & CBLK_INVALID) {
1534        // for offloaded tracks restoreTrack_l() will just update the sequence and clear
1535        // AudioSystem cache. We should not exit here but after calling the callback so
1536        // that the upper layers can recreate the track
1537        if (!isOffloadedOrDirect_l() || (mSequence == mObservedSequence)) {
1538            status_t status = restoreTrack_l("processAudioBuffer");
1539            // after restoration, continue below to make sure that the loop and buffer events
1540            // are notified because they have been cleared from mCblk->mFlags above.
1541        }
1542    }
1543
1544    bool waitStreamEnd = mState == STATE_STOPPING;
1545    bool active = mState == STATE_ACTIVE;
1546
1547    // Manage underrun callback, must be done under lock to avoid race with releaseBuffer()
1548    bool newUnderrun = false;
1549    if (flags & CBLK_UNDERRUN) {
1550#if 0
1551        // Currently in shared buffer mode, when the server reaches the end of buffer,
1552        // the track stays active in continuous underrun state.  It's up to the application
1553        // to pause or stop the track, or set the position to a new offset within buffer.
1554        // This was some experimental code to auto-pause on underrun.   Keeping it here
1555        // in "if 0" so we can re-visit this if we add a real sequencer for shared memory content.
1556        if (mTransfer == TRANSFER_SHARED) {
1557            mState = STATE_PAUSED;
1558            active = false;
1559        }
1560#endif
1561        if (!mInUnderrun) {
1562            mInUnderrun = true;
1563            newUnderrun = true;
1564        }
1565    }
1566
1567    // Get current position of server
1568    size_t position = updateAndGetPosition_l();
1569
1570    // Manage marker callback
1571    bool markerReached = false;
1572    size_t markerPosition = mMarkerPosition;
1573    // FIXME fails for wraparound, need 64 bits
1574    if (!mMarkerReached && (markerPosition > 0) && (position >= markerPosition)) {
1575        mMarkerReached = markerReached = true;
1576    }
1577
1578    // Determine number of new position callback(s) that will be needed, while locked
1579    size_t newPosCount = 0;
1580    size_t newPosition = mNewPosition;
1581    size_t updatePeriod = mUpdatePeriod;
1582    // FIXME fails for wraparound, need 64 bits
1583    if (updatePeriod > 0 && position >= newPosition) {
1584        newPosCount = ((position - newPosition) / updatePeriod) + 1;
1585        mNewPosition += updatePeriod * newPosCount;
1586    }
1587
1588    // Cache other fields that will be needed soon
1589    uint32_t sampleRate = mSampleRate;
1590    uint32_t notificationFrames = mNotificationFramesAct;
1591    if (mRefreshRemaining) {
1592        mRefreshRemaining = false;
1593        mRemainingFrames = notificationFrames;
1594        mRetryOnPartialBuffer = false;
1595    }
1596    size_t misalignment = mProxy->getMisalignment();
1597    uint32_t sequence = mSequence;
1598    sp<AudioTrackClientProxy> proxy = mProxy;
1599
1600    // Determine the number of new loop callback(s) that will be needed, while locked.
1601    int loopCountNotifications = 0;
1602    uint32_t loopPeriod = 0; // time in frames for next EVENT_LOOP_END or EVENT_BUFFER_END
1603
1604    if (mLoopCount > 0) {
1605        int loopCount;
1606        size_t bufferPosition;
1607        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1608        loopPeriod = ((loopCount > 0) ? mLoopEnd : mFrameCount) - bufferPosition;
1609        loopCountNotifications = min(mLoopCountNotified - loopCount, kMaxLoopCountNotifications);
1610        mLoopCountNotified = loopCount; // discard any excess notifications
1611    } else if (mLoopCount < 0) {
1612        // FIXME: We're not accurate with notification count and position with infinite looping
1613        // since loopCount from server side will always return -1 (we could decrement it).
1614        size_t bufferPosition = mStaticProxy->getBufferPosition();
1615        loopCountNotifications = int((flags & (CBLK_LOOP_CYCLE | CBLK_LOOP_FINAL)) != 0);
1616        loopPeriod = mLoopEnd - bufferPosition;
1617    } else if (/* mLoopCount == 0 && */ mSharedBuffer != 0) {
1618        size_t bufferPosition = mStaticProxy->getBufferPosition();
1619        loopPeriod = mFrameCount - bufferPosition;
1620    }
1621
1622    // These fields don't need to be cached, because they are assigned only by set():
1623    //     mTransfer, mCbf, mUserData, mFormat, mFrameSize, mFlags
1624    // mFlags is also assigned by createTrack_l(), but not the bit we care about.
1625
1626    mLock.unlock();
1627
1628    if (waitStreamEnd) {
1629        struct timespec timeout;
1630        timeout.tv_sec = WAIT_STREAM_END_TIMEOUT_SEC;
1631        timeout.tv_nsec = 0;
1632
1633        status_t status = proxy->waitStreamEndDone(&timeout);
1634        switch (status) {
1635        case NO_ERROR:
1636        case DEAD_OBJECT:
1637        case TIMED_OUT:
1638            mCbf(EVENT_STREAM_END, mUserData, NULL);
1639            {
1640                AutoMutex lock(mLock);
1641                // The previously assigned value of waitStreamEnd is no longer valid,
1642                // since the mutex has been unlocked and either the callback handler
1643                // or another thread could have re-started the AudioTrack during that time.
1644                waitStreamEnd = mState == STATE_STOPPING;
1645                if (waitStreamEnd) {
1646                    mState = STATE_STOPPED;
1647                    mReleased = 0;
1648                }
1649            }
1650            if (waitStreamEnd && status != DEAD_OBJECT) {
1651               return NS_INACTIVE;
1652            }
1653            break;
1654        }
1655        return 0;
1656    }
1657
1658    // perform callbacks while unlocked
1659    if (newUnderrun) {
1660        mCbf(EVENT_UNDERRUN, mUserData, NULL);
1661    }
1662    while (loopCountNotifications > 0) {
1663        mCbf(EVENT_LOOP_END, mUserData, NULL);
1664        --loopCountNotifications;
1665    }
1666    if (flags & CBLK_BUFFER_END) {
1667        mCbf(EVENT_BUFFER_END, mUserData, NULL);
1668    }
1669    if (markerReached) {
1670        mCbf(EVENT_MARKER, mUserData, &markerPosition);
1671    }
1672    while (newPosCount > 0) {
1673        size_t temp = newPosition;
1674        mCbf(EVENT_NEW_POS, mUserData, &temp);
1675        newPosition += updatePeriod;
1676        newPosCount--;
1677    }
1678
1679    if (mObservedSequence != sequence) {
1680        mObservedSequence = sequence;
1681        mCbf(EVENT_NEW_IAUDIOTRACK, mUserData, NULL);
1682        // for offloaded tracks, just wait for the upper layers to recreate the track
1683        if (isOffloadedOrDirect()) {
1684            return NS_INACTIVE;
1685        }
1686    }
1687
1688    // if inactive, then don't run me again until re-started
1689    if (!active) {
1690        return NS_INACTIVE;
1691    }
1692
1693    // Compute the estimated time until the next timed event (position, markers, loops)
1694    // FIXME only for non-compressed audio
1695    uint32_t minFrames = ~0;
1696    if (!markerReached && position < markerPosition) {
1697        minFrames = markerPosition - position;
1698    }
1699    if (loopPeriod > 0 && loopPeriod < minFrames) {
1700        // loopPeriod is already adjusted for actual position.
1701        minFrames = loopPeriod;
1702    }
1703    if (updatePeriod > 0) {
1704        minFrames = min(minFrames, uint32_t(newPosition - position));
1705    }
1706
1707    // If > 0, poll periodically to recover from a stuck server.  A good value is 2.
1708    static const uint32_t kPoll = 0;
1709    if (kPoll > 0 && mTransfer == TRANSFER_CALLBACK && kPoll * notificationFrames < minFrames) {
1710        minFrames = kPoll * notificationFrames;
1711    }
1712
1713    // Convert frame units to time units
1714    nsecs_t ns = NS_WHENEVER;
1715    if (minFrames != (uint32_t) ~0) {
1716        // This "fudge factor" avoids soaking CPU, and compensates for late progress by server
1717        static const nsecs_t kFudgeNs = 10000000LL; // 10 ms
1718        ns = ((minFrames * 1000000000LL) / sampleRate) + kFudgeNs;
1719    }
1720
1721    // If not supplying data by EVENT_MORE_DATA, then we're done
1722    if (mTransfer != TRANSFER_CALLBACK) {
1723        return ns;
1724    }
1725
1726    struct timespec timeout;
1727    const struct timespec *requested = &ClientProxy::kForever;
1728    if (ns != NS_WHENEVER) {
1729        timeout.tv_sec = ns / 1000000000LL;
1730        timeout.tv_nsec = ns % 1000000000LL;
1731        ALOGV("timeout %ld.%03d", timeout.tv_sec, (int) timeout.tv_nsec / 1000000);
1732        requested = &timeout;
1733    }
1734
1735    while (mRemainingFrames > 0) {
1736
1737        Buffer audioBuffer;
1738        audioBuffer.frameCount = mRemainingFrames;
1739        size_t nonContig;
1740        status_t err = obtainBuffer(&audioBuffer, requested, NULL, &nonContig);
1741        LOG_ALWAYS_FATAL_IF((err != NO_ERROR) != (audioBuffer.frameCount == 0),
1742                "obtainBuffer() err=%d frameCount=%zu", err, audioBuffer.frameCount);
1743        requested = &ClientProxy::kNonBlocking;
1744        size_t avail = audioBuffer.frameCount + nonContig;
1745        ALOGV("obtainBuffer(%u) returned %zu = %zu + %zu err %d",
1746                mRemainingFrames, avail, audioBuffer.frameCount, nonContig, err);
1747        if (err != NO_ERROR) {
1748            if (err == TIMED_OUT || err == WOULD_BLOCK || err == -EINTR ||
1749                    (isOffloaded() && (err == DEAD_OBJECT))) {
1750                return 0;
1751            }
1752            ALOGE("Error %d obtaining an audio buffer, giving up.", err);
1753            return NS_NEVER;
1754        }
1755
1756        if (mRetryOnPartialBuffer && !isOffloaded()) {
1757            mRetryOnPartialBuffer = false;
1758            if (avail < mRemainingFrames) {
1759                int64_t myns = ((mRemainingFrames - avail) * 1100000000LL) / sampleRate;
1760                if (ns < 0 || myns < ns) {
1761                    ns = myns;
1762                }
1763                return ns;
1764            }
1765        }
1766
1767        size_t reqSize = audioBuffer.size;
1768        mCbf(EVENT_MORE_DATA, mUserData, &audioBuffer);
1769        size_t writtenSize = audioBuffer.size;
1770
1771        // Sanity check on returned size
1772        if (ssize_t(writtenSize) < 0 || writtenSize > reqSize) {
1773            ALOGE("EVENT_MORE_DATA requested %zu bytes but callback returned %zd bytes",
1774                    reqSize, ssize_t(writtenSize));
1775            return NS_NEVER;
1776        }
1777
1778        if (writtenSize == 0) {
1779            // The callback is done filling buffers
1780            // Keep this thread going to handle timed events and
1781            // still try to get more data in intervals of WAIT_PERIOD_MS
1782            // but don't just loop and block the CPU, so wait
1783            return WAIT_PERIOD_MS * 1000000LL;
1784        }
1785
1786        size_t releasedFrames = audioBuffer.size / mFrameSize;
1787        audioBuffer.frameCount = releasedFrames;
1788        mRemainingFrames -= releasedFrames;
1789        if (misalignment >= releasedFrames) {
1790            misalignment -= releasedFrames;
1791        } else {
1792            misalignment = 0;
1793        }
1794
1795        releaseBuffer(&audioBuffer);
1796
1797        // FIXME here is where we would repeat EVENT_MORE_DATA again on same advanced buffer
1798        // if callback doesn't like to accept the full chunk
1799        if (writtenSize < reqSize) {
1800            continue;
1801        }
1802
1803        // There could be enough non-contiguous frames available to satisfy the remaining request
1804        if (mRemainingFrames <= nonContig) {
1805            continue;
1806        }
1807
1808#if 0
1809        // This heuristic tries to collapse a series of EVENT_MORE_DATA that would total to a
1810        // sum <= notificationFrames.  It replaces that series by at most two EVENT_MORE_DATA
1811        // that total to a sum == notificationFrames.
1812        if (0 < misalignment && misalignment <= mRemainingFrames) {
1813            mRemainingFrames = misalignment;
1814            return (mRemainingFrames * 1100000000LL) / sampleRate;
1815        }
1816#endif
1817
1818    }
1819    mRemainingFrames = notificationFrames;
1820    mRetryOnPartialBuffer = true;
1821
1822    // A lot has transpired since ns was calculated, so run again immediately and re-calculate
1823    return 0;
1824}
1825
1826status_t AudioTrack::restoreTrack_l(const char *from)
1827{
1828    ALOGW("dead IAudioTrack, %s, creating a new one from %s()",
1829          isOffloadedOrDirect_l() ? "Offloaded or Direct" : "PCM", from);
1830    ++mSequence;
1831
1832    // refresh the audio configuration cache in this process to make sure we get new
1833    // output parameters and new IAudioFlinger in createTrack_l()
1834    AudioSystem::clearAudioConfigCache();
1835
1836    if (isOffloadedOrDirect_l()) {
1837        // FIXME re-creation of offloaded tracks is not yet implemented
1838        return DEAD_OBJECT;
1839    }
1840
1841    // save the old static buffer position
1842    size_t bufferPosition = 0;
1843    int loopCount = 0;
1844    if (mStaticProxy != 0) {
1845        mStaticProxy->getBufferPositionAndLoopCount(&bufferPosition, &loopCount);
1846    }
1847
1848    // If a new IAudioTrack is successfully created, createTrack_l() will modify the
1849    // following member variables: mAudioTrack, mCblkMemory and mCblk.
1850    // It will also delete the strong references on previous IAudioTrack and IMemory.
1851    // If a new IAudioTrack cannot be created, the previous (dead) instance will be left intact.
1852    status_t result = createTrack_l();
1853
1854    // take the frames that will be lost by track recreation into account in saved position
1855    // For streaming tracks, this is the amount we obtained from the user/client
1856    // (not the number actually consumed at the server - those are already lost).
1857    (void) updateAndGetPosition_l();
1858    if (mStaticProxy != 0) {
1859        mPosition = mReleased;
1860    }
1861
1862    if (result == NO_ERROR) {
1863        // Continue playback from last known position and restore loop.
1864        if (mStaticProxy != 0) {
1865            if (loopCount != 0) {
1866                mStaticProxy->setBufferPositionAndLoop(bufferPosition,
1867                        mLoopStart, mLoopEnd, loopCount);
1868            } else {
1869                mStaticProxy->setBufferPosition(bufferPosition);
1870                if (bufferPosition == mFrameCount) {
1871                    ALOGD("restoring track at end of static buffer");
1872                }
1873            }
1874        }
1875        if (mState == STATE_ACTIVE) {
1876            result = mAudioTrack->start();
1877        }
1878    }
1879    if (result != NO_ERROR) {
1880        ALOGW("restoreTrack_l() failed status %d", result);
1881        mState = STATE_STOPPED;
1882        mReleased = 0;
1883    }
1884
1885    return result;
1886}
1887
1888uint32_t AudioTrack::updateAndGetPosition_l()
1889{
1890    // This is the sole place to read server consumed frames
1891    uint32_t newServer = mProxy->getPosition();
1892    int32_t delta = newServer - mServer;
1893    mServer = newServer;
1894    // TODO There is controversy about whether there can be "negative jitter" in server position.
1895    //      This should be investigated further, and if possible, it should be addressed.
1896    //      A more definite failure mode is infrequent polling by client.
1897    //      One could call (void)getPosition_l() in releaseBuffer(),
1898    //      so mReleased and mPosition are always lock-step as best possible.
1899    //      That should ensure delta never goes negative for infrequent polling
1900    //      unless the server has more than 2^31 frames in its buffer,
1901    //      in which case the use of uint32_t for these counters has bigger issues.
1902    if (delta < 0) {
1903        ALOGE("detected illegal retrograde motion by the server: mServer advanced by %d", delta);
1904        delta = 0;
1905    }
1906    return mPosition += (uint32_t) delta;
1907}
1908
1909status_t AudioTrack::setParameters(const String8& keyValuePairs)
1910{
1911    AutoMutex lock(mLock);
1912    return mAudioTrack->setParameters(keyValuePairs);
1913}
1914
1915status_t AudioTrack::getTimestamp(AudioTimestamp& timestamp)
1916{
1917    AutoMutex lock(mLock);
1918    // FIXME not implemented for fast tracks; should use proxy and SSQ
1919    if (mFlags & AUDIO_OUTPUT_FLAG_FAST) {
1920        return INVALID_OPERATION;
1921    }
1922
1923    switch (mState) {
1924    case STATE_ACTIVE:
1925    case STATE_PAUSED:
1926        break; // handle below
1927    case STATE_FLUSHED:
1928    case STATE_STOPPED:
1929        return WOULD_BLOCK;
1930    case STATE_STOPPING:
1931    case STATE_PAUSED_STOPPING:
1932        if (!isOffloaded_l()) {
1933            return INVALID_OPERATION;
1934        }
1935        break; // offloaded tracks handled below
1936    default:
1937        LOG_ALWAYS_FATAL("Invalid mState in getTimestamp(): %d", mState);
1938        break;
1939    }
1940
1941    if (mCblk->mFlags & CBLK_INVALID) {
1942        restoreTrack_l("getTimestamp");
1943    }
1944
1945    // The presented frame count must always lag behind the consumed frame count.
1946    // To avoid a race, read the presented frames first.  This ensures that presented <= consumed.
1947    status_t status = mAudioTrack->getTimestamp(timestamp);
1948    if (status != NO_ERROR) {
1949        ALOGV_IF(status != WOULD_BLOCK, "getTimestamp error:%#x", status);
1950        return status;
1951    }
1952    if (isOffloadedOrDirect_l()) {
1953        if (isOffloaded_l() && (mState == STATE_PAUSED || mState == STATE_PAUSED_STOPPING)) {
1954            // use cached paused position in case another offloaded track is running.
1955            timestamp.mPosition = mPausedPosition;
1956            clock_gettime(CLOCK_MONOTONIC, &timestamp.mTime);
1957            return NO_ERROR;
1958        }
1959
1960        // Check whether a pending flush or stop has completed, as those commands may
1961        // be asynchronous or return near finish.
1962        if (mStartUs != 0 && mSampleRate != 0) {
1963            static const int kTimeJitterUs = 100000; // 100 ms
1964            static const int k1SecUs = 1000000;
1965
1966            const int64_t timeNow = getNowUs();
1967
1968            if (timeNow < mStartUs + k1SecUs) { // within first second of starting
1969                const int64_t timestampTimeUs = convertTimespecToUs(timestamp.mTime);
1970                if (timestampTimeUs < mStartUs) {
1971                    return WOULD_BLOCK;  // stale timestamp time, occurs before start.
1972                }
1973                const int64_t deltaTimeUs = timestampTimeUs - mStartUs;
1974                const int64_t deltaPositionByUs = timestamp.mPosition * 1000000LL / mSampleRate;
1975
1976                if (deltaPositionByUs > deltaTimeUs + kTimeJitterUs) {
1977                    // Verify that the counter can't count faster than the sample rate
1978                    // since the start time.  If greater, then that means we have failed
1979                    // to completely flush or stop the previous playing track.
1980                    ALOGW("incomplete flush or stop:"
1981                            " deltaTimeUs(%lld) deltaPositionUs(%lld) tsmPosition(%u)",
1982                            (long long)deltaTimeUs, (long long)deltaPositionByUs,
1983                            timestamp.mPosition);
1984                    return WOULD_BLOCK;
1985                }
1986            }
1987            mStartUs = 0; // no need to check again, start timestamp has either expired or unneeded.
1988        }
1989    } else {
1990        // Update the mapping between local consumed (mPosition) and server consumed (mServer)
1991        (void) updateAndGetPosition_l();
1992        // Server consumed (mServer) and presented both use the same server time base,
1993        // and server consumed is always >= presented.
1994        // The delta between these represents the number of frames in the buffer pipeline.
1995        // If this delta between these is greater than the client position, it means that
1996        // actually presented is still stuck at the starting line (figuratively speaking),
1997        // waiting for the first frame to go by.  So we can't report a valid timestamp yet.
1998        if ((uint32_t) (mServer - timestamp.mPosition) > mPosition) {
1999            return INVALID_OPERATION;
2000        }
2001        // Convert timestamp position from server time base to client time base.
2002        // TODO The following code should work OK now because timestamp.mPosition is 32-bit.
2003        // But if we change it to 64-bit then this could fail.
2004        // If (mPosition - mServer) can be negative then should use:
2005        //   (int32_t)(mPosition - mServer)
2006        timestamp.mPosition += mPosition - mServer;
2007        // Immediately after a call to getPosition_l(), mPosition and
2008        // mServer both represent the same frame position.  mPosition is
2009        // in client's point of view, and mServer is in server's point of
2010        // view.  So the difference between them is the "fudge factor"
2011        // between client and server views due to stop() and/or new
2012        // IAudioTrack.  And timestamp.mPosition is initially in server's
2013        // point of view, so we need to apply the same fudge factor to it.
2014    }
2015    return status;
2016}
2017
2018String8 AudioTrack::getParameters(const String8& keys)
2019{
2020    audio_io_handle_t output = getOutput();
2021    if (output != AUDIO_IO_HANDLE_NONE) {
2022        return AudioSystem::getParameters(output, keys);
2023    } else {
2024        return String8::empty();
2025    }
2026}
2027
2028bool AudioTrack::isOffloaded() const
2029{
2030    AutoMutex lock(mLock);
2031    return isOffloaded_l();
2032}
2033
2034bool AudioTrack::isDirect() const
2035{
2036    AutoMutex lock(mLock);
2037    return isDirect_l();
2038}
2039
2040bool AudioTrack::isOffloadedOrDirect() const
2041{
2042    AutoMutex lock(mLock);
2043    return isOffloadedOrDirect_l();
2044}
2045
2046
2047status_t AudioTrack::dump(int fd, const Vector<String16>& args __unused) const
2048{
2049
2050    const size_t SIZE = 256;
2051    char buffer[SIZE];
2052    String8 result;
2053
2054    result.append(" AudioTrack::dump\n");
2055    snprintf(buffer, 255, "  stream type(%d), left - right volume(%f, %f)\n", mStreamType,
2056            mVolume[AUDIO_INTERLEAVE_LEFT], mVolume[AUDIO_INTERLEAVE_RIGHT]);
2057    result.append(buffer);
2058    snprintf(buffer, 255, "  format(%d), channel count(%d), frame count(%zu)\n", mFormat,
2059            mChannelCount, mFrameCount);
2060    result.append(buffer);
2061    snprintf(buffer, 255, "  sample rate(%u), status(%d)\n", mSampleRate, mStatus);
2062    result.append(buffer);
2063    snprintf(buffer, 255, "  state(%d), latency (%d)\n", mState, mLatency);
2064    result.append(buffer);
2065    ::write(fd, result.string(), result.size());
2066    return NO_ERROR;
2067}
2068
2069uint32_t AudioTrack::getUnderrunFrames() const
2070{
2071    AutoMutex lock(mLock);
2072    return mProxy->getUnderrunFrames();
2073}
2074
2075// =========================================================================
2076
2077void AudioTrack::DeathNotifier::binderDied(const wp<IBinder>& who __unused)
2078{
2079    sp<AudioTrack> audioTrack = mAudioTrack.promote();
2080    if (audioTrack != 0) {
2081        AutoMutex lock(audioTrack->mLock);
2082        audioTrack->mProxy->binderDied();
2083    }
2084}
2085
2086// =========================================================================
2087
2088AudioTrack::AudioTrackThread::AudioTrackThread(AudioTrack& receiver, bool bCanCallJava)
2089    : Thread(bCanCallJava), mReceiver(receiver), mPaused(true), mPausedInt(false), mPausedNs(0LL),
2090      mIgnoreNextPausedInt(false)
2091{
2092}
2093
2094AudioTrack::AudioTrackThread::~AudioTrackThread()
2095{
2096}
2097
2098bool AudioTrack::AudioTrackThread::threadLoop()
2099{
2100    {
2101        AutoMutex _l(mMyLock);
2102        if (mPaused) {
2103            mMyCond.wait(mMyLock);
2104            // caller will check for exitPending()
2105            return true;
2106        }
2107        if (mIgnoreNextPausedInt) {
2108            mIgnoreNextPausedInt = false;
2109            mPausedInt = false;
2110        }
2111        if (mPausedInt) {
2112            if (mPausedNs > 0) {
2113                (void) mMyCond.waitRelative(mMyLock, mPausedNs);
2114            } else {
2115                mMyCond.wait(mMyLock);
2116            }
2117            mPausedInt = false;
2118            return true;
2119        }
2120    }
2121    if (exitPending()) {
2122        return false;
2123    }
2124    nsecs_t ns = mReceiver.processAudioBuffer();
2125    switch (ns) {
2126    case 0:
2127        return true;
2128    case NS_INACTIVE:
2129        pauseInternal();
2130        return true;
2131    case NS_NEVER:
2132        return false;
2133    case NS_WHENEVER:
2134        // Event driven: call wake() when callback notifications conditions change.
2135        ns = INT64_MAX;
2136        // fall through
2137    default:
2138        LOG_ALWAYS_FATAL_IF(ns < 0, "processAudioBuffer() returned %" PRId64, ns);
2139        pauseInternal(ns);
2140        return true;
2141    }
2142}
2143
2144void AudioTrack::AudioTrackThread::requestExit()
2145{
2146    // must be in this order to avoid a race condition
2147    Thread::requestExit();
2148    resume();
2149}
2150
2151void AudioTrack::AudioTrackThread::pause()
2152{
2153    AutoMutex _l(mMyLock);
2154    mPaused = true;
2155}
2156
2157void AudioTrack::AudioTrackThread::resume()
2158{
2159    AutoMutex _l(mMyLock);
2160    mIgnoreNextPausedInt = true;
2161    if (mPaused || mPausedInt) {
2162        mPaused = false;
2163        mPausedInt = false;
2164        mMyCond.signal();
2165    }
2166}
2167
2168void AudioTrack::AudioTrackThread::wake()
2169{
2170    AutoMutex _l(mMyLock);
2171    if (!mPaused && mPausedInt && mPausedNs > 0) {
2172        // audio track is active and internally paused with timeout.
2173        mIgnoreNextPausedInt = true;
2174        mPausedInt = false;
2175        mMyCond.signal();
2176    }
2177}
2178
2179void AudioTrack::AudioTrackThread::pauseInternal(nsecs_t ns)
2180{
2181    AutoMutex _l(mMyLock);
2182    mPausedInt = true;
2183    mPausedNs = ns;
2184}
2185
2186} // namespace android
2187