Tracks.cpp revision 462fd2fa9eef642b0574aa7409de0bde3fec8d43
14a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project/*
24a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project**
34a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** Copyright 2012, The Android Open Source Project
44a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project**
54a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** Licensed under the Apache License, Version 2.0 (the "License");
64a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** you may not use this file except in compliance with the License.
74a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** You may obtain a copy of the License at
84a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project**
94a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project**     http://www.apache.org/licenses/LICENSE-2.0
104a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project**
114a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** Unless required by applicable law or agreed to in writing, software
124a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** distributed under the License is distributed on an "AS IS" BASIS,
134a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
144a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** See the License for the specific language governing permissions and
154a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project** limitations under the License.
164a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project*/
174a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
184a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
194a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#define LOG_TAG "AudioFlinger"
204a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project//#define LOG_NDEBUG 0
214a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
224a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include "Configuration.h"
234a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <math.h>
244a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <utils/Log.h>
254a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
264a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <private/media/AudioTrackShared.h>
274a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
284a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <common_time/cc_helper.h>
294a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <common_time/local_clock.h>
304a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
314a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include "AudioMixer.h"
324a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include "AudioFlinger.h"
334a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include "ServiceUtilities.h"
344a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
354a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <media/nbaio/Pipe.h>
364a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#include <media/nbaio/PipeReader.h>
374a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
384a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// ----------------------------------------------------------------------------
394a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
404a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// Note: the following macro is used for extremely verbose logging message.  In
414a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// order to run with ALOG_ASSERT turned on, we need to have LOG_NDEBUG set to
424a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// 0; but one side effect of this is to turn all LOGV's as well.  Some messages
434a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// are so verbose that we want to suppress them even when we have ALOG_ASSERT
444a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// turned on.  Do not uncomment the #def below unless you really know what you
454a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// are doing and want to see all of the extremely verbose messages.
464a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project//#define VERY_VERY_VERBOSE_LOGGING
474a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#ifdef VERY_VERY_VERBOSE_LOGGING
484a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#define ALOGVV ALOGV
494a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#else
504a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#define ALOGVV(a...) do { } while(0)
514a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#endif
524a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
534a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Projectnamespace android {
544a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
554a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// ----------------------------------------------------------------------------
564a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project//      TrackBase
574a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// ----------------------------------------------------------------------------
584a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
594a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Projectstatic volatile int32_t nextTrackId = 55;
604a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
614a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// TrackBase constructor must be called with AudioFlinger::mLock held
624a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source ProjectAudioFlinger::ThreadBase::TrackBase::TrackBase(
634a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            ThreadBase *thread,
644a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            const sp<Client>& client,
654a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            uint32_t sampleRate,
664a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            audio_format_t format,
674a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            audio_channel_mask_t channelMask,
684a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            size_t frameCount,
694a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            const sp<IMemory>& sharedBuffer,
704a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            int sessionId,
714a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            int clientUid,
724a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            bool isOut)
734a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    :   RefBase(),
744a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mThread(thread),
754a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mClient(client),
764a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mCblk(NULL),
774a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // mBuffer
784a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mState(IDLE),
794a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mSampleRate(sampleRate),
804a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mFormat(format),
814a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mChannelMask(channelMask),
824a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mChannelCount(popcount(channelMask)),
834a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mFrameSize(audio_is_linear_pcm(format) ?
844a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                mChannelCount * audio_bytes_per_sample(format) : sizeof(int8_t)),
854a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mFrameCount(frameCount),
864a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mSessionId(sessionId),
874a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mIsOut(isOut),
884a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mServerProxy(NULL),
894a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mId(android_atomic_inc(&nextTrackId)),
904a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mTerminated(false)
914a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project{
924a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // if the caller is us, trust the specified uid
934a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (IPCThreadState::self()->getCallingPid() != getpid_cached || clientUid == -1) {
944a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        int newclientUid = IPCThreadState::self()->getCallingUid();
954a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        if (clientUid != -1 && clientUid != newclientUid) {
964a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            ALOGW("uid %d tried to pass itself off as %d", newclientUid, clientUid);
974a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        }
984a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        clientUid = newclientUid;
994a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1004a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // clientUid contains the uid of the app that is responsible for this track, so we can blame
1014a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // battery usage on it.
1024a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    mUid = clientUid;
1034a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1044a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // client == 0 implies sharedBuffer == 0
1054a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    ALOG_ASSERT(!(client == 0 && sharedBuffer != 0));
1064a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1074a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    ALOGV_IF(sharedBuffer != 0, "sharedBuffer: %p, size: %d", sharedBuffer->pointer(),
1084a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            sharedBuffer->size());
1094a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1104a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // ALOGD("Creating track with %d buffers @ %d bytes", bufferCount, bufferSize);
1114a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    size_t size = sizeof(audio_track_cblk_t);
1124a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    size_t bufferSize = (sharedBuffer == 0 ? roundup(frameCount) : frameCount) * mFrameSize;
1134a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (sharedBuffer == 0) {
1144a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        size += bufferSize;
1154a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1164a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1174a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (client != 0) {
1184a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mCblkMemory = client->heap()->allocate(size);
1194a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        if (mCblkMemory != 0) {
1204a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            mCblk = static_cast<audio_track_cblk_t *>(mCblkMemory->pointer());
1214a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            // can't assume mCblk != NULL
1224a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        } else {
1234a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            ALOGE("not enough memory for AudioTrack size=%u", size);
1244a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            client->heap()->dump("AudioTrack");
1254a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            return;
1264a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        }
1274a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    } else {
1284a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // this syntax avoids calling the audio_track_cblk_t constructor twice
1294a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mCblk = (audio_track_cblk_t *) new uint8_t[size];
1304a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // assume mCblk != NULL
1314a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1324a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1334a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // construct the shared structure in-place.
1344a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (mCblk != NULL) {
1354a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        new(mCblk) audio_track_cblk_t();
1364a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // clear all buffers
1374a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mCblk->frameCount_ = frameCount;
1384a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        if (sharedBuffer == 0) {
1394a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            mBuffer = (char*)mCblk + sizeof(audio_track_cblk_t);
1404a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            memset(mBuffer, 0, bufferSize);
1414a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        } else {
1424a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            mBuffer = sharedBuffer->pointer();
1434a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#if 0
1444a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            mCblk->mFlags = CBLK_FORCEREADY;    // FIXME hack, need to fix the track ready logic
1454a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#endif
1464a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        }
1474a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1484a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#ifdef TEE_SINK
1494a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        if (mTeeSinkTrackEnabled) {
1504a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            NBAIO_Format pipeFormat = Format_from_SR_C(mSampleRate, mChannelCount);
1514a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            if (pipeFormat != Format_Invalid) {
1524a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                Pipe *pipe = new Pipe(mTeeSinkTrackFrames, pipeFormat);
1534a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                size_t numCounterOffers = 0;
1544a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                const NBAIO_Format offers[1] = {pipeFormat};
1554a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                ssize_t index = pipe->negotiate(offers, 1, NULL, numCounterOffers);
1564a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                ALOG_ASSERT(index == 0);
1574a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                PipeReader *pipeReader = new PipeReader(*pipe);
1584a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                numCounterOffers = 0;
1594a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                index = pipeReader->negotiate(offers, 1, NULL, numCounterOffers);
1604a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                ALOG_ASSERT(index == 0);
1614a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                mTeeSink = pipe;
1624a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project                mTeeSource = pipeReader;
1634a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            }
1644a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        }
1654a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#endif
1664a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1674a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1684a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project}
1694a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1704a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source ProjectAudioFlinger::ThreadBase::TrackBase::~TrackBase()
1714a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project{
1724a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#ifdef TEE_SINK
1734a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    dumpTee(-1, mTeeSource, mId);
1744a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#endif
1754a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    // delete the proxy before deleting the shared memory it refers to, to avoid dangling reference
1764a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    delete mServerProxy;
1774a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (mCblk != NULL) {
1784a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        if (mClient == 0) {
1794a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            delete mCblk;
1804a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        } else {
1814a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project            mCblk->~audio_track_cblk_t();   // destroy our shared-structure.
1824a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        }
1834a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1844a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    mCblkMemory.clear();    // free the shared memory before releasing the heap it belongs to
1854a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (mClient != 0) {
1864a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // Client destructor must run with AudioFlinger mutex locked
1874a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        Mutex::Autolock _l(mClient->audioFlinger()->mLock);
1884a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // If the client's reference count drops to zero, the associated destructor
1894a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // must run with AudioFlinger lock held. Thus the explicit clear() rather than
1904a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        // relying on the automatic clear() at end of scope.
1914a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        mClient.clear();
1924a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
1934a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project}
1944a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
1954a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// AudioBufferProvider interface
1964a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// getNextBuffer() = 0;
1974a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// This implementation of releaseBuffer() is used by Track and RecordTrack, but not TimedTrack
1984a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Projectvoid AudioFlinger::ThreadBase::TrackBase::releaseBuffer(AudioBufferProvider::Buffer* buffer)
1994a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project{
2004a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#ifdef TEE_SINK
2014a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    if (mTeeSink != 0) {
2024a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project        (void) mTeeSink->write(buffer->raw, buffer->frameCount);
2034a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    }
2044a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project#endif
2054a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
2064a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    ServerProxy::Buffer buf;
2074a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    buf.mFrameCount = buffer->frameCount;
2084a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    buf.mRaw = buffer->raw;
2094a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    buffer->frameCount = 0;
2104a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    buffer->raw = NULL;
2114a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    mServerProxy->releaseBuffer(&buf);
2124a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project}
2134a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
2144a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Projectstatus_t AudioFlinger::ThreadBase::TrackBase::setSyncEvent(const sp<SyncEvent>& event)
2154a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project{
2164a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    mSyncEvents.add(event);
2174a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project    return NO_ERROR;
2184a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project}
2194a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project
2204a68b3365c8c50aa93505e99ead2565ab73dcdb0The Android Open Source Project// ----------------------------------------------------------------------------
221//      Playback
222// ----------------------------------------------------------------------------
223
224AudioFlinger::TrackHandle::TrackHandle(const sp<AudioFlinger::PlaybackThread::Track>& track)
225    : BnAudioTrack(),
226      mTrack(track)
227{
228}
229
230AudioFlinger::TrackHandle::~TrackHandle() {
231    // just stop the track on deletion, associated resources
232    // will be freed from the main thread once all pending buffers have
233    // been played. Unless it's not in the active track list, in which
234    // case we free everything now...
235    mTrack->destroy();
236}
237
238sp<IMemory> AudioFlinger::TrackHandle::getCblk() const {
239    return mTrack->getCblk();
240}
241
242status_t AudioFlinger::TrackHandle::start() {
243    return mTrack->start();
244}
245
246void AudioFlinger::TrackHandle::stop() {
247    mTrack->stop();
248}
249
250void AudioFlinger::TrackHandle::flush() {
251    mTrack->flush();
252}
253
254void AudioFlinger::TrackHandle::pause() {
255    mTrack->pause();
256}
257
258status_t AudioFlinger::TrackHandle::attachAuxEffect(int EffectId)
259{
260    return mTrack->attachAuxEffect(EffectId);
261}
262
263status_t AudioFlinger::TrackHandle::allocateTimedBuffer(size_t size,
264                                                         sp<IMemory>* buffer) {
265    if (!mTrack->isTimedTrack())
266        return INVALID_OPERATION;
267
268    PlaybackThread::TimedTrack* tt =
269            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
270    return tt->allocateTimedBuffer(size, buffer);
271}
272
273status_t AudioFlinger::TrackHandle::queueTimedBuffer(const sp<IMemory>& buffer,
274                                                     int64_t pts) {
275    if (!mTrack->isTimedTrack())
276        return INVALID_OPERATION;
277
278    PlaybackThread::TimedTrack* tt =
279            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
280    return tt->queueTimedBuffer(buffer, pts);
281}
282
283status_t AudioFlinger::TrackHandle::setMediaTimeTransform(
284    const LinearTransform& xform, int target) {
285
286    if (!mTrack->isTimedTrack())
287        return INVALID_OPERATION;
288
289    PlaybackThread::TimedTrack* tt =
290            reinterpret_cast<PlaybackThread::TimedTrack*>(mTrack.get());
291    return tt->setMediaTimeTransform(
292        xform, static_cast<TimedAudioTrack::TargetTimeline>(target));
293}
294
295status_t AudioFlinger::TrackHandle::setParameters(const String8& keyValuePairs) {
296    return mTrack->setParameters(keyValuePairs);
297}
298
299status_t AudioFlinger::TrackHandle::getTimestamp(AudioTimestamp& timestamp)
300{
301    return mTrack->getTimestamp(timestamp);
302}
303
304
305void AudioFlinger::TrackHandle::signal()
306{
307    return mTrack->signal();
308}
309
310status_t AudioFlinger::TrackHandle::onTransact(
311    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
312{
313    return BnAudioTrack::onTransact(code, data, reply, flags);
314}
315
316// ----------------------------------------------------------------------------
317
318// Track constructor must be called with AudioFlinger::mLock and ThreadBase::mLock held
319AudioFlinger::PlaybackThread::Track::Track(
320            PlaybackThread *thread,
321            const sp<Client>& client,
322            audio_stream_type_t streamType,
323            uint32_t sampleRate,
324            audio_format_t format,
325            audio_channel_mask_t channelMask,
326            size_t frameCount,
327            const sp<IMemory>& sharedBuffer,
328            int sessionId,
329            int uid,
330            IAudioFlinger::track_flags_t flags)
331    :   TrackBase(thread, client, sampleRate, format, channelMask, frameCount, sharedBuffer,
332            sessionId, uid, true /*isOut*/),
333    mFillingUpStatus(FS_INVALID),
334    // mRetryCount initialized later when needed
335    mSharedBuffer(sharedBuffer),
336    mStreamType(streamType),
337    mName(-1),  // see note below
338    mMainBuffer(thread->mixBuffer()),
339    mAuxBuffer(NULL),
340    mAuxEffectId(0), mHasVolumeController(false),
341    mPresentationCompleteFrames(0),
342    mFlags(flags),
343    mFastIndex(-1),
344    mCachedVolume(1.0),
345    mIsInvalid(false),
346    mAudioTrackServerProxy(NULL),
347    mResumeToStopping(false)
348{
349    if (mCblk != NULL) {
350        if (sharedBuffer == 0) {
351            mAudioTrackServerProxy = new AudioTrackServerProxy(mCblk, mBuffer, frameCount,
352                    mFrameSize);
353        } else {
354            mAudioTrackServerProxy = new StaticAudioTrackServerProxy(mCblk, mBuffer, frameCount,
355                    mFrameSize);
356        }
357        mServerProxy = mAudioTrackServerProxy;
358        // to avoid leaking a track name, do not allocate one unless there is an mCblk
359        mName = thread->getTrackName_l(channelMask, sessionId);
360        if (mName < 0) {
361            ALOGE("no more track names available");
362            return;
363        }
364        // only allocate a fast track index if we were able to allocate a normal track name
365        if (flags & IAudioFlinger::TRACK_FAST) {
366            mAudioTrackServerProxy->framesReadyIsCalledByMultipleThreads();
367            ALOG_ASSERT(thread->mFastTrackAvailMask != 0);
368            int i = __builtin_ctz(thread->mFastTrackAvailMask);
369            ALOG_ASSERT(0 < i && i < (int)FastMixerState::kMaxFastTracks);
370            // FIXME This is too eager.  We allocate a fast track index before the
371            //       fast track becomes active.  Since fast tracks are a scarce resource,
372            //       this means we are potentially denying other more important fast tracks from
373            //       being created.  It would be better to allocate the index dynamically.
374            mFastIndex = i;
375            // Read the initial underruns because this field is never cleared by the fast mixer
376            mObservedUnderruns = thread->getFastTrackUnderruns(i);
377            thread->mFastTrackAvailMask &= ~(1 << i);
378        }
379    }
380    ALOGV("Track constructor name %d, calling pid %d", mName,
381            IPCThreadState::self()->getCallingPid());
382}
383
384AudioFlinger::PlaybackThread::Track::~Track()
385{
386    ALOGV("PlaybackThread::Track destructor");
387
388    // The destructor would clear mSharedBuffer,
389    // but it will not push the decremented reference count,
390    // leaving the client's IMemory dangling indefinitely.
391    // This prevents that leak.
392    if (mSharedBuffer != 0) {
393        mSharedBuffer.clear();
394        // flush the binder command buffer
395        IPCThreadState::self()->flushCommands();
396    }
397}
398
399status_t AudioFlinger::PlaybackThread::Track::initCheck() const
400{
401    status_t status = TrackBase::initCheck();
402    if (status == NO_ERROR && mName < 0) {
403        status = NO_MEMORY;
404    }
405    return status;
406}
407
408void AudioFlinger::PlaybackThread::Track::destroy()
409{
410    // NOTE: destroyTrack_l() can remove a strong reference to this Track
411    // by removing it from mTracks vector, so there is a risk that this Tracks's
412    // destructor is called. As the destructor needs to lock mLock,
413    // we must acquire a strong reference on this Track before locking mLock
414    // here so that the destructor is called only when exiting this function.
415    // On the other hand, as long as Track::destroy() is only called by
416    // TrackHandle destructor, the TrackHandle still holds a strong ref on
417    // this Track with its member mTrack.
418    sp<Track> keep(this);
419    { // scope for mLock
420        sp<ThreadBase> thread = mThread.promote();
421        if (thread != 0) {
422            Mutex::Autolock _l(thread->mLock);
423            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
424            bool wasActive = playbackThread->destroyTrack_l(this);
425            if (!isOutputTrack() && !wasActive) {
426                AudioSystem::releaseOutput(thread->id());
427            }
428        }
429    }
430}
431
432/*static*/ void AudioFlinger::PlaybackThread::Track::appendDumpHeader(String8& result)
433{
434    result.append("   Name Client Type      Fmt Chn mask Session fCount S F SRate  "
435                  "L dB  R dB    Server Main buf  Aux Buf Flags UndFrmCnt\n");
436}
437
438void AudioFlinger::PlaybackThread::Track::dump(char* buffer, size_t size)
439{
440    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
441    if (isFastTrack()) {
442        sprintf(buffer, "   F %2d", mFastIndex);
443    } else {
444        sprintf(buffer, "   %4d", mName - AudioMixer::TRACK0);
445    }
446    track_state state = mState;
447    char stateChar;
448    if (isTerminated()) {
449        stateChar = 'T';
450    } else {
451        switch (state) {
452        case IDLE:
453            stateChar = 'I';
454            break;
455        case STOPPING_1:
456            stateChar = 's';
457            break;
458        case STOPPING_2:
459            stateChar = '5';
460            break;
461        case STOPPED:
462            stateChar = 'S';
463            break;
464        case RESUMING:
465            stateChar = 'R';
466            break;
467        case ACTIVE:
468            stateChar = 'A';
469            break;
470        case PAUSING:
471            stateChar = 'p';
472            break;
473        case PAUSED:
474            stateChar = 'P';
475            break;
476        case FLUSHED:
477            stateChar = 'F';
478            break;
479        default:
480            stateChar = '?';
481            break;
482        }
483    }
484    char nowInUnderrun;
485    switch (mObservedUnderruns.mBitFields.mMostRecent) {
486    case UNDERRUN_FULL:
487        nowInUnderrun = ' ';
488        break;
489    case UNDERRUN_PARTIAL:
490        nowInUnderrun = '<';
491        break;
492    case UNDERRUN_EMPTY:
493        nowInUnderrun = '*';
494        break;
495    default:
496        nowInUnderrun = '?';
497        break;
498    }
499    snprintf(&buffer[7], size-7, " %6u %4u %08X %08X %7u %6u %1c %1d %5u %5.2g %5.2g  "
500                                 "%08X %08X %08X 0x%03X %9u%c\n",
501            (mClient == 0) ? getpid_cached : mClient->pid(),
502            mStreamType,
503            mFormat,
504            mChannelMask,
505            mSessionId,
506            mFrameCount,
507            stateChar,
508            mFillingUpStatus,
509            mAudioTrackServerProxy->getSampleRate(),
510            20.0 * log10((vlr & 0xFFFF) / 4096.0),
511            20.0 * log10((vlr >> 16) / 4096.0),
512            mCblk->mServer,
513            (int)mMainBuffer,
514            (int)mAuxBuffer,
515            mCblk->mFlags,
516            mAudioTrackServerProxy->getUnderrunFrames(),
517            nowInUnderrun);
518}
519
520uint32_t AudioFlinger::PlaybackThread::Track::sampleRate() const {
521    return mAudioTrackServerProxy->getSampleRate();
522}
523
524// AudioBufferProvider interface
525status_t AudioFlinger::PlaybackThread::Track::getNextBuffer(
526        AudioBufferProvider::Buffer* buffer, int64_t pts)
527{
528    ServerProxy::Buffer buf;
529    size_t desiredFrames = buffer->frameCount;
530    buf.mFrameCount = desiredFrames;
531    status_t status = mServerProxy->obtainBuffer(&buf);
532    buffer->frameCount = buf.mFrameCount;
533    buffer->raw = buf.mRaw;
534    if (buf.mFrameCount == 0) {
535        mAudioTrackServerProxy->tallyUnderrunFrames(desiredFrames);
536    }
537    return status;
538}
539
540// releaseBuffer() is not overridden
541
542// ExtendedAudioBufferProvider interface
543
544// Note that framesReady() takes a mutex on the control block using tryLock().
545// This could result in priority inversion if framesReady() is called by the normal mixer,
546// as the normal mixer thread runs at lower
547// priority than the client's callback thread:  there is a short window within framesReady()
548// during which the normal mixer could be preempted, and the client callback would block.
549// Another problem can occur if framesReady() is called by the fast mixer:
550// the tryLock() could block for up to 1 ms, and a sequence of these could delay fast mixer.
551// FIXME Replace AudioTrackShared control block implementation by a non-blocking FIFO queue.
552size_t AudioFlinger::PlaybackThread::Track::framesReady() const {
553    return mAudioTrackServerProxy->framesReady();
554}
555
556size_t AudioFlinger::PlaybackThread::Track::framesReleased() const
557{
558    return mAudioTrackServerProxy->framesReleased();
559}
560
561// Don't call for fast tracks; the framesReady() could result in priority inversion
562bool AudioFlinger::PlaybackThread::Track::isReady() const {
563    if (mFillingUpStatus != FS_FILLING || isStopped() || isPausing()) {
564        return true;
565    }
566
567    if (framesReady() >= mFrameCount ||
568            (mCblk->mFlags & CBLK_FORCEREADY)) {
569        mFillingUpStatus = FS_FILLED;
570        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
571        return true;
572    }
573    return false;
574}
575
576status_t AudioFlinger::PlaybackThread::Track::start(AudioSystem::sync_event_t event,
577                                                    int triggerSession)
578{
579    status_t status = NO_ERROR;
580    ALOGV("start(%d), calling pid %d session %d",
581            mName, IPCThreadState::self()->getCallingPid(), mSessionId);
582
583    sp<ThreadBase> thread = mThread.promote();
584    if (thread != 0) {
585        if (isOffloaded()) {
586            Mutex::Autolock _laf(thread->mAudioFlinger->mLock);
587            Mutex::Autolock _lth(thread->mLock);
588            sp<EffectChain> ec = thread->getEffectChain_l(mSessionId);
589            if (thread->mAudioFlinger->isNonOffloadableGlobalEffectEnabled_l() ||
590                    (ec != 0 && ec->isNonOffloadableEnabled())) {
591                invalidate();
592                return PERMISSION_DENIED;
593            }
594        }
595        Mutex::Autolock _lth(thread->mLock);
596        track_state state = mState;
597        // here the track could be either new, or restarted
598        // in both cases "unstop" the track
599
600        if (state == PAUSED) {
601            if (mResumeToStopping) {
602                // happened we need to resume to STOPPING_1
603                mState = TrackBase::STOPPING_1;
604                ALOGV("PAUSED => STOPPING_1 (%d) on thread %p", mName, this);
605            } else {
606                mState = TrackBase::RESUMING;
607                ALOGV("PAUSED => RESUMING (%d) on thread %p", mName, this);
608            }
609        } else {
610            mState = TrackBase::ACTIVE;
611            ALOGV("? => ACTIVE (%d) on thread %p", mName, this);
612        }
613
614        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
615        status = playbackThread->addTrack_l(this);
616        if (status == INVALID_OPERATION || status == PERMISSION_DENIED) {
617            triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
618            //  restore previous state if start was rejected by policy manager
619            if (status == PERMISSION_DENIED) {
620                mState = state;
621            }
622        }
623        // track was already in the active list, not a problem
624        if (status == ALREADY_EXISTS) {
625            status = NO_ERROR;
626        } else {
627            // Acknowledge any pending flush(), so that subsequent new data isn't discarded.
628            // It is usually unsafe to access the server proxy from a binder thread.
629            // But in this case we know the mixer thread (whether normal mixer or fast mixer)
630            // isn't looking at this track yet:  we still hold the normal mixer thread lock,
631            // and for fast tracks the track is not yet in the fast mixer thread's active set.
632            ServerProxy::Buffer buffer;
633            buffer.mFrameCount = 1;
634            (void) mAudioTrackServerProxy->obtainBuffer(&buffer, true /*ackFlush*/);
635        }
636    } else {
637        status = BAD_VALUE;
638    }
639    return status;
640}
641
642void AudioFlinger::PlaybackThread::Track::stop()
643{
644    ALOGV("stop(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
645    sp<ThreadBase> thread = mThread.promote();
646    if (thread != 0) {
647        Mutex::Autolock _l(thread->mLock);
648        track_state state = mState;
649        if (state == RESUMING || state == ACTIVE || state == PAUSING || state == PAUSED) {
650            // If the track is not active (PAUSED and buffers full), flush buffers
651            PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
652            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
653                reset();
654                mState = STOPPED;
655            } else if (!isFastTrack() && !isOffloaded()) {
656                mState = STOPPED;
657            } else {
658                // For fast tracks prepareTracks_l() will set state to STOPPING_2
659                // presentation is complete
660                // For an offloaded track this starts a drain and state will
661                // move to STOPPING_2 when drain completes and then STOPPED
662                mState = STOPPING_1;
663            }
664            ALOGV("not stopping/stopped => stopping/stopped (%d) on thread %p", mName,
665                    playbackThread);
666        }
667    }
668}
669
670void AudioFlinger::PlaybackThread::Track::pause()
671{
672    ALOGV("pause(%d), calling pid %d", mName, IPCThreadState::self()->getCallingPid());
673    sp<ThreadBase> thread = mThread.promote();
674    if (thread != 0) {
675        Mutex::Autolock _l(thread->mLock);
676        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
677        switch (mState) {
678        case STOPPING_1:
679        case STOPPING_2:
680            if (!isOffloaded()) {
681                /* nothing to do if track is not offloaded */
682                break;
683            }
684
685            // Offloaded track was draining, we need to carry on draining when resumed
686            mResumeToStopping = true;
687            // fall through...
688        case ACTIVE:
689        case RESUMING:
690            mState = PAUSING;
691            ALOGV("ACTIVE/RESUMING => PAUSING (%d) on thread %p", mName, thread.get());
692            playbackThread->broadcast_l();
693            break;
694
695        default:
696            break;
697        }
698    }
699}
700
701void AudioFlinger::PlaybackThread::Track::flush()
702{
703    ALOGV("flush(%d)", mName);
704    sp<ThreadBase> thread = mThread.promote();
705    if (thread != 0) {
706        Mutex::Autolock _l(thread->mLock);
707        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
708
709        if (isOffloaded()) {
710            // If offloaded we allow flush during any state except terminated
711            // and keep the track active to avoid problems if user is seeking
712            // rapidly and underlying hardware has a significant delay handling
713            // a pause
714            if (isTerminated()) {
715                return;
716            }
717
718            ALOGV("flush: offload flush");
719            reset();
720
721            if (mState == STOPPING_1 || mState == STOPPING_2) {
722                ALOGV("flushed in STOPPING_1 or 2 state, change state to ACTIVE");
723                mState = ACTIVE;
724            }
725
726            if (mState == ACTIVE) {
727                ALOGV("flush called in active state, resetting buffer time out retry count");
728                mRetryCount = PlaybackThread::kMaxTrackRetriesOffload;
729            }
730
731            mResumeToStopping = false;
732        } else {
733            if (mState != STOPPING_1 && mState != STOPPING_2 && mState != STOPPED &&
734                    mState != PAUSED && mState != PAUSING && mState != IDLE && mState != FLUSHED) {
735                return;
736            }
737            // No point remaining in PAUSED state after a flush => go to
738            // FLUSHED state
739            mState = FLUSHED;
740            // do not reset the track if it is still in the process of being stopped or paused.
741            // this will be done by prepareTracks_l() when the track is stopped.
742            // prepareTracks_l() will see mState == FLUSHED, then
743            // remove from active track list, reset(), and trigger presentation complete
744            if (playbackThread->mActiveTracks.indexOf(this) < 0) {
745                reset();
746            }
747        }
748        // Prevent flush being lost if the track is flushed and then resumed
749        // before mixer thread can run. This is important when offloading
750        // because the hardware buffer could hold a large amount of audio
751        playbackThread->flushOutput_l();
752        playbackThread->broadcast_l();
753    }
754}
755
756void AudioFlinger::PlaybackThread::Track::reset()
757{
758    // Do not reset twice to avoid discarding data written just after a flush and before
759    // the audioflinger thread detects the track is stopped.
760    if (!mResetDone) {
761        // Force underrun condition to avoid false underrun callback until first data is
762        // written to buffer
763        android_atomic_and(~CBLK_FORCEREADY, &mCblk->mFlags);
764        mFillingUpStatus = FS_FILLING;
765        mResetDone = true;
766        if (mState == FLUSHED) {
767            mState = IDLE;
768        }
769    }
770}
771
772status_t AudioFlinger::PlaybackThread::Track::setParameters(const String8& keyValuePairs)
773{
774    sp<ThreadBase> thread = mThread.promote();
775    if (thread == 0) {
776        ALOGE("thread is dead");
777        return FAILED_TRANSACTION;
778    } else if ((thread->type() == ThreadBase::DIRECT) ||
779                    (thread->type() == ThreadBase::OFFLOAD)) {
780        return thread->setParameters(keyValuePairs);
781    } else {
782        return PERMISSION_DENIED;
783    }
784}
785
786status_t AudioFlinger::PlaybackThread::Track::getTimestamp(AudioTimestamp& timestamp)
787{
788    // Client should implement this using SSQ; the unpresented frame count in latch is irrelevant
789    if (isFastTrack()) {
790        return INVALID_OPERATION;
791    }
792    sp<ThreadBase> thread = mThread.promote();
793    if (thread == 0) {
794        return INVALID_OPERATION;
795    }
796    Mutex::Autolock _l(thread->mLock);
797    PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
798    if (!isOffloaded()) {
799        if (!playbackThread->mLatchQValid) {
800            return INVALID_OPERATION;
801        }
802        uint32_t unpresentedFrames =
803                ((int64_t) playbackThread->mLatchQ.mUnpresentedFrames * mSampleRate) /
804                playbackThread->mSampleRate;
805        uint32_t framesWritten = mAudioTrackServerProxy->framesReleased();
806        if (framesWritten < unpresentedFrames) {
807            return INVALID_OPERATION;
808        }
809        timestamp.mPosition = framesWritten - unpresentedFrames;
810        timestamp.mTime = playbackThread->mLatchQ.mTimestamp.mTime;
811        return NO_ERROR;
812    }
813
814    return playbackThread->getTimestamp_l(timestamp);
815}
816
817status_t AudioFlinger::PlaybackThread::Track::attachAuxEffect(int EffectId)
818{
819    status_t status = DEAD_OBJECT;
820    sp<ThreadBase> thread = mThread.promote();
821    if (thread != 0) {
822        PlaybackThread *playbackThread = (PlaybackThread *)thread.get();
823        sp<AudioFlinger> af = mClient->audioFlinger();
824
825        Mutex::Autolock _l(af->mLock);
826
827        sp<PlaybackThread> srcThread = af->getEffectThread_l(AUDIO_SESSION_OUTPUT_MIX, EffectId);
828
829        if (EffectId != 0 && srcThread != 0 && playbackThread != srcThread.get()) {
830            Mutex::Autolock _dl(playbackThread->mLock);
831            Mutex::Autolock _sl(srcThread->mLock);
832            sp<EffectChain> chain = srcThread->getEffectChain_l(AUDIO_SESSION_OUTPUT_MIX);
833            if (chain == 0) {
834                return INVALID_OPERATION;
835            }
836
837            sp<EffectModule> effect = chain->getEffectFromId_l(EffectId);
838            if (effect == 0) {
839                return INVALID_OPERATION;
840            }
841            srcThread->removeEffect_l(effect);
842            status = playbackThread->addEffect_l(effect);
843            if (status != NO_ERROR) {
844                srcThread->addEffect_l(effect);
845                return INVALID_OPERATION;
846            }
847            // removeEffect_l() has stopped the effect if it was active so it must be restarted
848            if (effect->state() == EffectModule::ACTIVE ||
849                    effect->state() == EffectModule::STOPPING) {
850                effect->start();
851            }
852
853            sp<EffectChain> dstChain = effect->chain().promote();
854            if (dstChain == 0) {
855                srcThread->addEffect_l(effect);
856                return INVALID_OPERATION;
857            }
858            AudioSystem::unregisterEffect(effect->id());
859            AudioSystem::registerEffect(&effect->desc(),
860                                        srcThread->id(),
861                                        dstChain->strategy(),
862                                        AUDIO_SESSION_OUTPUT_MIX,
863                                        effect->id());
864        }
865        status = playbackThread->attachAuxEffect(this, EffectId);
866    }
867    return status;
868}
869
870void AudioFlinger::PlaybackThread::Track::setAuxBuffer(int EffectId, int32_t *buffer)
871{
872    mAuxEffectId = EffectId;
873    mAuxBuffer = buffer;
874}
875
876bool AudioFlinger::PlaybackThread::Track::presentationComplete(size_t framesWritten,
877                                                         size_t audioHalFrames)
878{
879    // a track is considered presented when the total number of frames written to audio HAL
880    // corresponds to the number of frames written when presentationComplete() is called for the
881    // first time (mPresentationCompleteFrames == 0) plus the buffer filling status at that time.
882    // For an offloaded track the HAL+h/w delay is variable so a HAL drain() is used
883    // to detect when all frames have been played. In this case framesWritten isn't
884    // useful because it doesn't always reflect whether there is data in the h/w
885    // buffers, particularly if a track has been paused and resumed during draining
886    ALOGV("presentationComplete() mPresentationCompleteFrames %d framesWritten %d",
887                      mPresentationCompleteFrames, framesWritten);
888    if (mPresentationCompleteFrames == 0) {
889        mPresentationCompleteFrames = framesWritten + audioHalFrames;
890        ALOGV("presentationComplete() reset: mPresentationCompleteFrames %d audioHalFrames %d",
891                  mPresentationCompleteFrames, audioHalFrames);
892    }
893
894    if (framesWritten >= mPresentationCompleteFrames || isOffloaded()) {
895        ALOGV("presentationComplete() session %d complete: framesWritten %d",
896                  mSessionId, framesWritten);
897        triggerEvents(AudioSystem::SYNC_EVENT_PRESENTATION_COMPLETE);
898        mAudioTrackServerProxy->setStreamEndDone();
899        return true;
900    }
901    return false;
902}
903
904void AudioFlinger::PlaybackThread::Track::triggerEvents(AudioSystem::sync_event_t type)
905{
906    for (int i = 0; i < (int)mSyncEvents.size(); i++) {
907        if (mSyncEvents[i]->type() == type) {
908            mSyncEvents[i]->trigger();
909            mSyncEvents.removeAt(i);
910            i--;
911        }
912    }
913}
914
915// implement VolumeBufferProvider interface
916
917uint32_t AudioFlinger::PlaybackThread::Track::getVolumeLR()
918{
919    // called by FastMixer, so not allowed to take any locks, block, or do I/O including logs
920    ALOG_ASSERT(isFastTrack() && (mCblk != NULL));
921    uint32_t vlr = mAudioTrackServerProxy->getVolumeLR();
922    uint32_t vl = vlr & 0xFFFF;
923    uint32_t vr = vlr >> 16;
924    // track volumes come from shared memory, so can't be trusted and must be clamped
925    if (vl > MAX_GAIN_INT) {
926        vl = MAX_GAIN_INT;
927    }
928    if (vr > MAX_GAIN_INT) {
929        vr = MAX_GAIN_INT;
930    }
931    // now apply the cached master volume and stream type volume;
932    // this is trusted but lacks any synchronization or barrier so may be stale
933    float v = mCachedVolume;
934    vl *= v;
935    vr *= v;
936    // re-combine into U4.16
937    vlr = (vr << 16) | (vl & 0xFFFF);
938    // FIXME look at mute, pause, and stop flags
939    return vlr;
940}
941
942status_t AudioFlinger::PlaybackThread::Track::setSyncEvent(const sp<SyncEvent>& event)
943{
944    if (isTerminated() || mState == PAUSED ||
945            ((framesReady() == 0) && ((mSharedBuffer != 0) ||
946                                      (mState == STOPPED)))) {
947        ALOGW("Track::setSyncEvent() in invalid state %d on session %d %s mode, framesReady %d ",
948              mState, mSessionId, (mSharedBuffer != 0) ? "static" : "stream", framesReady());
949        event->cancel();
950        return INVALID_OPERATION;
951    }
952    (void) TrackBase::setSyncEvent(event);
953    return NO_ERROR;
954}
955
956void AudioFlinger::PlaybackThread::Track::invalidate()
957{
958    // FIXME should use proxy, and needs work
959    audio_track_cblk_t* cblk = mCblk;
960    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
961    android_atomic_release_store(0x40000000, &cblk->mFutex);
962    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
963    (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
964    mIsInvalid = true;
965}
966
967void AudioFlinger::PlaybackThread::Track::signal()
968{
969    sp<ThreadBase> thread = mThread.promote();
970    if (thread != 0) {
971        PlaybackThread *t = (PlaybackThread *)thread.get();
972        Mutex::Autolock _l(t->mLock);
973        t->broadcast_l();
974    }
975}
976
977// ----------------------------------------------------------------------------
978
979sp<AudioFlinger::PlaybackThread::TimedTrack>
980AudioFlinger::PlaybackThread::TimedTrack::create(
981            PlaybackThread *thread,
982            const sp<Client>& client,
983            audio_stream_type_t streamType,
984            uint32_t sampleRate,
985            audio_format_t format,
986            audio_channel_mask_t channelMask,
987            size_t frameCount,
988            const sp<IMemory>& sharedBuffer,
989            int sessionId,
990            int uid) {
991    if (!client->reserveTimedTrack())
992        return 0;
993
994    return new TimedTrack(
995        thread, client, streamType, sampleRate, format, channelMask, frameCount,
996        sharedBuffer, sessionId, uid);
997}
998
999AudioFlinger::PlaybackThread::TimedTrack::TimedTrack(
1000            PlaybackThread *thread,
1001            const sp<Client>& client,
1002            audio_stream_type_t streamType,
1003            uint32_t sampleRate,
1004            audio_format_t format,
1005            audio_channel_mask_t channelMask,
1006            size_t frameCount,
1007            const sp<IMemory>& sharedBuffer,
1008            int sessionId,
1009            int uid)
1010    : Track(thread, client, streamType, sampleRate, format, channelMask,
1011            frameCount, sharedBuffer, sessionId, uid, IAudioFlinger::TRACK_TIMED),
1012      mQueueHeadInFlight(false),
1013      mTrimQueueHeadOnRelease(false),
1014      mFramesPendingInQueue(0),
1015      mTimedSilenceBuffer(NULL),
1016      mTimedSilenceBufferSize(0),
1017      mTimedAudioOutputOnTime(false),
1018      mMediaTimeTransformValid(false)
1019{
1020    LocalClock lc;
1021    mLocalTimeFreq = lc.getLocalFreq();
1022
1023    mLocalTimeToSampleTransform.a_zero = 0;
1024    mLocalTimeToSampleTransform.b_zero = 0;
1025    mLocalTimeToSampleTransform.a_to_b_numer = sampleRate;
1026    mLocalTimeToSampleTransform.a_to_b_denom = mLocalTimeFreq;
1027    LinearTransform::reduce(&mLocalTimeToSampleTransform.a_to_b_numer,
1028                            &mLocalTimeToSampleTransform.a_to_b_denom);
1029
1030    mMediaTimeToSampleTransform.a_zero = 0;
1031    mMediaTimeToSampleTransform.b_zero = 0;
1032    mMediaTimeToSampleTransform.a_to_b_numer = sampleRate;
1033    mMediaTimeToSampleTransform.a_to_b_denom = 1000000;
1034    LinearTransform::reduce(&mMediaTimeToSampleTransform.a_to_b_numer,
1035                            &mMediaTimeToSampleTransform.a_to_b_denom);
1036}
1037
1038AudioFlinger::PlaybackThread::TimedTrack::~TimedTrack() {
1039    mClient->releaseTimedTrack();
1040    delete [] mTimedSilenceBuffer;
1041}
1042
1043status_t AudioFlinger::PlaybackThread::TimedTrack::allocateTimedBuffer(
1044    size_t size, sp<IMemory>* buffer) {
1045
1046    Mutex::Autolock _l(mTimedBufferQueueLock);
1047
1048    trimTimedBufferQueue_l();
1049
1050    // lazily initialize the shared memory heap for timed buffers
1051    if (mTimedMemoryDealer == NULL) {
1052        const int kTimedBufferHeapSize = 512 << 10;
1053
1054        mTimedMemoryDealer = new MemoryDealer(kTimedBufferHeapSize,
1055                                              "AudioFlingerTimed");
1056        if (mTimedMemoryDealer == NULL) {
1057            return NO_MEMORY;
1058        }
1059    }
1060
1061    sp<IMemory> newBuffer = mTimedMemoryDealer->allocate(size);
1062    if (newBuffer == NULL) {
1063        newBuffer = mTimedMemoryDealer->allocate(size);
1064        if (newBuffer == NULL) {
1065            return NO_MEMORY;
1066        }
1067    }
1068
1069    *buffer = newBuffer;
1070    return NO_ERROR;
1071}
1072
1073// caller must hold mTimedBufferQueueLock
1074void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueue_l() {
1075    int64_t mediaTimeNow;
1076    {
1077        Mutex::Autolock mttLock(mMediaTimeTransformLock);
1078        if (!mMediaTimeTransformValid)
1079            return;
1080
1081        int64_t targetTimeNow;
1082        status_t res = (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME)
1083            ? mCCHelper.getCommonTime(&targetTimeNow)
1084            : mCCHelper.getLocalTime(&targetTimeNow);
1085
1086        if (OK != res)
1087            return;
1088
1089        if (!mMediaTimeTransform.doReverseTransform(targetTimeNow,
1090                                                    &mediaTimeNow)) {
1091            return;
1092        }
1093    }
1094
1095    size_t trimEnd;
1096    for (trimEnd = 0; trimEnd < mTimedBufferQueue.size(); trimEnd++) {
1097        int64_t bufEnd;
1098
1099        if ((trimEnd + 1) < mTimedBufferQueue.size()) {
1100            // We have a next buffer.  Just use its PTS as the PTS of the frame
1101            // following the last frame in this buffer.  If the stream is sparse
1102            // (ie, there are deliberate gaps left in the stream which should be
1103            // filled with silence by the TimedAudioTrack), then this can result
1104            // in one extra buffer being left un-trimmed when it could have
1105            // been.  In general, this is not typical, and we would rather
1106            // optimized away the TS calculation below for the more common case
1107            // where PTSes are contiguous.
1108            bufEnd = mTimedBufferQueue[trimEnd + 1].pts();
1109        } else {
1110            // We have no next buffer.  Compute the PTS of the frame following
1111            // the last frame in this buffer by computing the duration of of
1112            // this frame in media time units and adding it to the PTS of the
1113            // buffer.
1114            int64_t frameCount = mTimedBufferQueue[trimEnd].buffer()->size()
1115                               / mFrameSize;
1116
1117            if (!mMediaTimeToSampleTransform.doReverseTransform(frameCount,
1118                                                                &bufEnd)) {
1119                ALOGE("Failed to convert frame count of %lld to media time"
1120                      " duration" " (scale factor %d/%u) in %s",
1121                      frameCount,
1122                      mMediaTimeToSampleTransform.a_to_b_numer,
1123                      mMediaTimeToSampleTransform.a_to_b_denom,
1124                      __PRETTY_FUNCTION__);
1125                break;
1126            }
1127            bufEnd += mTimedBufferQueue[trimEnd].pts();
1128        }
1129
1130        if (bufEnd > mediaTimeNow)
1131            break;
1132
1133        // Is the buffer we want to use in the middle of a mix operation right
1134        // now?  If so, don't actually trim it.  Just wait for the releaseBuffer
1135        // from the mixer which should be coming back shortly.
1136        if (!trimEnd && mQueueHeadInFlight) {
1137            mTrimQueueHeadOnRelease = true;
1138        }
1139    }
1140
1141    size_t trimStart = mTrimQueueHeadOnRelease ? 1 : 0;
1142    if (trimStart < trimEnd) {
1143        // Update the bookkeeping for framesReady()
1144        for (size_t i = trimStart; i < trimEnd; ++i) {
1145            updateFramesPendingAfterTrim_l(mTimedBufferQueue[i], "trim");
1146        }
1147
1148        // Now actually remove the buffers from the queue.
1149        mTimedBufferQueue.removeItemsAt(trimStart, trimEnd);
1150    }
1151}
1152
1153void AudioFlinger::PlaybackThread::TimedTrack::trimTimedBufferQueueHead_l(
1154        const char* logTag) {
1155    ALOG_ASSERT(mTimedBufferQueue.size() > 0,
1156                "%s called (reason \"%s\"), but timed buffer queue has no"
1157                " elements to trim.", __FUNCTION__, logTag);
1158
1159    updateFramesPendingAfterTrim_l(mTimedBufferQueue[0], logTag);
1160    mTimedBufferQueue.removeAt(0);
1161}
1162
1163void AudioFlinger::PlaybackThread::TimedTrack::updateFramesPendingAfterTrim_l(
1164        const TimedBuffer& buf,
1165        const char* logTag) {
1166    uint32_t bufBytes        = buf.buffer()->size();
1167    uint32_t consumedAlready = buf.position();
1168
1169    ALOG_ASSERT(consumedAlready <= bufBytes,
1170                "Bad bookkeeping while updating frames pending.  Timed buffer is"
1171                " only %u bytes long, but claims to have consumed %u"
1172                " bytes.  (update reason: \"%s\")",
1173                bufBytes, consumedAlready, logTag);
1174
1175    uint32_t bufFrames = (bufBytes - consumedAlready) / mFrameSize;
1176    ALOG_ASSERT(mFramesPendingInQueue >= bufFrames,
1177                "Bad bookkeeping while updating frames pending.  Should have at"
1178                " least %u queued frames, but we think we have only %u.  (update"
1179                " reason: \"%s\")",
1180                bufFrames, mFramesPendingInQueue, logTag);
1181
1182    mFramesPendingInQueue -= bufFrames;
1183}
1184
1185status_t AudioFlinger::PlaybackThread::TimedTrack::queueTimedBuffer(
1186    const sp<IMemory>& buffer, int64_t pts) {
1187
1188    {
1189        Mutex::Autolock mttLock(mMediaTimeTransformLock);
1190        if (!mMediaTimeTransformValid)
1191            return INVALID_OPERATION;
1192    }
1193
1194    Mutex::Autolock _l(mTimedBufferQueueLock);
1195
1196    uint32_t bufFrames = buffer->size() / mFrameSize;
1197    mFramesPendingInQueue += bufFrames;
1198    mTimedBufferQueue.add(TimedBuffer(buffer, pts));
1199
1200    return NO_ERROR;
1201}
1202
1203status_t AudioFlinger::PlaybackThread::TimedTrack::setMediaTimeTransform(
1204    const LinearTransform& xform, TimedAudioTrack::TargetTimeline target) {
1205
1206    ALOGVV("setMediaTimeTransform az=%lld bz=%lld n=%d d=%u tgt=%d",
1207           xform.a_zero, xform.b_zero, xform.a_to_b_numer, xform.a_to_b_denom,
1208           target);
1209
1210    if (!(target == TimedAudioTrack::LOCAL_TIME ||
1211          target == TimedAudioTrack::COMMON_TIME)) {
1212        return BAD_VALUE;
1213    }
1214
1215    Mutex::Autolock lock(mMediaTimeTransformLock);
1216    mMediaTimeTransform = xform;
1217    mMediaTimeTransformTarget = target;
1218    mMediaTimeTransformValid = true;
1219
1220    return NO_ERROR;
1221}
1222
1223#define min(a, b) ((a) < (b) ? (a) : (b))
1224
1225// implementation of getNextBuffer for tracks whose buffers have timestamps
1226status_t AudioFlinger::PlaybackThread::TimedTrack::getNextBuffer(
1227    AudioBufferProvider::Buffer* buffer, int64_t pts)
1228{
1229    if (pts == AudioBufferProvider::kInvalidPTS) {
1230        buffer->raw = NULL;
1231        buffer->frameCount = 0;
1232        mTimedAudioOutputOnTime = false;
1233        return INVALID_OPERATION;
1234    }
1235
1236    Mutex::Autolock _l(mTimedBufferQueueLock);
1237
1238    ALOG_ASSERT(!mQueueHeadInFlight,
1239                "getNextBuffer called without releaseBuffer!");
1240
1241    while (true) {
1242
1243        // if we have no timed buffers, then fail
1244        if (mTimedBufferQueue.isEmpty()) {
1245            buffer->raw = NULL;
1246            buffer->frameCount = 0;
1247            return NOT_ENOUGH_DATA;
1248        }
1249
1250        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1251
1252        // calculate the PTS of the head of the timed buffer queue expressed in
1253        // local time
1254        int64_t headLocalPTS;
1255        {
1256            Mutex::Autolock mttLock(mMediaTimeTransformLock);
1257
1258            ALOG_ASSERT(mMediaTimeTransformValid, "media time transform invalid");
1259
1260            if (mMediaTimeTransform.a_to_b_denom == 0) {
1261                // the transform represents a pause, so yield silence
1262                timedYieldSilence_l(buffer->frameCount, buffer);
1263                return NO_ERROR;
1264            }
1265
1266            int64_t transformedPTS;
1267            if (!mMediaTimeTransform.doForwardTransform(head.pts(),
1268                                                        &transformedPTS)) {
1269                // the transform failed.  this shouldn't happen, but if it does
1270                // then just drop this buffer
1271                ALOGW("timedGetNextBuffer transform failed");
1272                buffer->raw = NULL;
1273                buffer->frameCount = 0;
1274                trimTimedBufferQueueHead_l("getNextBuffer; no transform");
1275                return NO_ERROR;
1276            }
1277
1278            if (mMediaTimeTransformTarget == TimedAudioTrack::COMMON_TIME) {
1279                if (OK != mCCHelper.commonTimeToLocalTime(transformedPTS,
1280                                                          &headLocalPTS)) {
1281                    buffer->raw = NULL;
1282                    buffer->frameCount = 0;
1283                    return INVALID_OPERATION;
1284                }
1285            } else {
1286                headLocalPTS = transformedPTS;
1287            }
1288        }
1289
1290        uint32_t sr = sampleRate();
1291
1292        // adjust the head buffer's PTS to reflect the portion of the head buffer
1293        // that has already been consumed
1294        int64_t effectivePTS = headLocalPTS +
1295                ((head.position() / mFrameSize) * mLocalTimeFreq / sr);
1296
1297        // Calculate the delta in samples between the head of the input buffer
1298        // queue and the start of the next output buffer that will be written.
1299        // If the transformation fails because of over or underflow, it means
1300        // that the sample's position in the output stream is so far out of
1301        // whack that it should just be dropped.
1302        int64_t sampleDelta;
1303        if (llabs(effectivePTS - pts) >= (static_cast<int64_t>(1) << 31)) {
1304            ALOGV("*** head buffer is too far from PTS: dropped buffer");
1305            trimTimedBufferQueueHead_l("getNextBuffer, buf pts too far from"
1306                                       " mix");
1307            continue;
1308        }
1309        if (!mLocalTimeToSampleTransform.doForwardTransform(
1310                (effectivePTS - pts) << 32, &sampleDelta)) {
1311            ALOGV("*** too late during sample rate transform: dropped buffer");
1312            trimTimedBufferQueueHead_l("getNextBuffer, bad local to sample");
1313            continue;
1314        }
1315
1316        ALOGVV("*** getNextBuffer head.pts=%lld head.pos=%d pts=%lld"
1317               " sampleDelta=[%d.%08x]",
1318               head.pts(), head.position(), pts,
1319               static_cast<int32_t>((sampleDelta >= 0 ? 0 : 1)
1320                   + (sampleDelta >> 32)),
1321               static_cast<uint32_t>(sampleDelta & 0xFFFFFFFF));
1322
1323        // if the delta between the ideal placement for the next input sample and
1324        // the current output position is within this threshold, then we will
1325        // concatenate the next input samples to the previous output
1326        const int64_t kSampleContinuityThreshold =
1327                (static_cast<int64_t>(sr) << 32) / 250;
1328
1329        // if this is the first buffer of audio that we're emitting from this track
1330        // then it should be almost exactly on time.
1331        const int64_t kSampleStartupThreshold = 1LL << 32;
1332
1333        if ((mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleContinuityThreshold) ||
1334           (!mTimedAudioOutputOnTime && llabs(sampleDelta) <= kSampleStartupThreshold)) {
1335            // the next input is close enough to being on time, so concatenate it
1336            // with the last output
1337            timedYieldSamples_l(buffer);
1338
1339            ALOGVV("*** on time: head.pos=%d frameCount=%u",
1340                    head.position(), buffer->frameCount);
1341            return NO_ERROR;
1342        }
1343
1344        // Looks like our output is not on time.  Reset our on timed status.
1345        // Next time we mix samples from our input queue, then should be within
1346        // the StartupThreshold.
1347        mTimedAudioOutputOnTime = false;
1348        if (sampleDelta > 0) {
1349            // the gap between the current output position and the proper start of
1350            // the next input sample is too big, so fill it with silence
1351            uint32_t framesUntilNextInput = (sampleDelta + 0x80000000) >> 32;
1352
1353            timedYieldSilence_l(framesUntilNextInput, buffer);
1354            ALOGV("*** silence: frameCount=%u", buffer->frameCount);
1355            return NO_ERROR;
1356        } else {
1357            // the next input sample is late
1358            uint32_t lateFrames = static_cast<uint32_t>(-((sampleDelta + 0x80000000) >> 32));
1359            size_t onTimeSamplePosition =
1360                    head.position() + lateFrames * mFrameSize;
1361
1362            if (onTimeSamplePosition > head.buffer()->size()) {
1363                // all the remaining samples in the head are too late, so
1364                // drop it and move on
1365                ALOGV("*** too late: dropped buffer");
1366                trimTimedBufferQueueHead_l("getNextBuffer, dropped late buffer");
1367                continue;
1368            } else {
1369                // skip over the late samples
1370                head.setPosition(onTimeSamplePosition);
1371
1372                // yield the available samples
1373                timedYieldSamples_l(buffer);
1374
1375                ALOGV("*** late: head.pos=%d frameCount=%u", head.position(), buffer->frameCount);
1376                return NO_ERROR;
1377            }
1378        }
1379    }
1380}
1381
1382// Yield samples from the timed buffer queue head up to the given output
1383// buffer's capacity.
1384//
1385// Caller must hold mTimedBufferQueueLock
1386void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSamples_l(
1387    AudioBufferProvider::Buffer* buffer) {
1388
1389    const TimedBuffer& head = mTimedBufferQueue[0];
1390
1391    buffer->raw = (static_cast<uint8_t*>(head.buffer()->pointer()) +
1392                   head.position());
1393
1394    uint32_t framesLeftInHead = ((head.buffer()->size() - head.position()) /
1395                                 mFrameSize);
1396    size_t framesRequested = buffer->frameCount;
1397    buffer->frameCount = min(framesLeftInHead, framesRequested);
1398
1399    mQueueHeadInFlight = true;
1400    mTimedAudioOutputOnTime = true;
1401}
1402
1403// Yield samples of silence up to the given output buffer's capacity
1404//
1405// Caller must hold mTimedBufferQueueLock
1406void AudioFlinger::PlaybackThread::TimedTrack::timedYieldSilence_l(
1407    uint32_t numFrames, AudioBufferProvider::Buffer* buffer) {
1408
1409    // lazily allocate a buffer filled with silence
1410    if (mTimedSilenceBufferSize < numFrames * mFrameSize) {
1411        delete [] mTimedSilenceBuffer;
1412        mTimedSilenceBufferSize = numFrames * mFrameSize;
1413        mTimedSilenceBuffer = new uint8_t[mTimedSilenceBufferSize];
1414        memset(mTimedSilenceBuffer, 0, mTimedSilenceBufferSize);
1415    }
1416
1417    buffer->raw = mTimedSilenceBuffer;
1418    size_t framesRequested = buffer->frameCount;
1419    buffer->frameCount = min(numFrames, framesRequested);
1420
1421    mTimedAudioOutputOnTime = false;
1422}
1423
1424// AudioBufferProvider interface
1425void AudioFlinger::PlaybackThread::TimedTrack::releaseBuffer(
1426    AudioBufferProvider::Buffer* buffer) {
1427
1428    Mutex::Autolock _l(mTimedBufferQueueLock);
1429
1430    // If the buffer which was just released is part of the buffer at the head
1431    // of the queue, be sure to update the amt of the buffer which has been
1432    // consumed.  If the buffer being returned is not part of the head of the
1433    // queue, its either because the buffer is part of the silence buffer, or
1434    // because the head of the timed queue was trimmed after the mixer called
1435    // getNextBuffer but before the mixer called releaseBuffer.
1436    if (buffer->raw == mTimedSilenceBuffer) {
1437        ALOG_ASSERT(!mQueueHeadInFlight,
1438                    "Queue head in flight during release of silence buffer!");
1439        goto done;
1440    }
1441
1442    ALOG_ASSERT(mQueueHeadInFlight,
1443                "TimedTrack::releaseBuffer of non-silence buffer, but no queue"
1444                " head in flight.");
1445
1446    if (mTimedBufferQueue.size()) {
1447        TimedBuffer& head = mTimedBufferQueue.editItemAt(0);
1448
1449        void* start = head.buffer()->pointer();
1450        void* end   = reinterpret_cast<void*>(
1451                        reinterpret_cast<uint8_t*>(head.buffer()->pointer())
1452                        + head.buffer()->size());
1453
1454        ALOG_ASSERT((buffer->raw >= start) && (buffer->raw < end),
1455                    "released buffer not within the head of the timed buffer"
1456                    " queue; qHead = [%p, %p], released buffer = %p",
1457                    start, end, buffer->raw);
1458
1459        head.setPosition(head.position() +
1460                (buffer->frameCount * mFrameSize));
1461        mQueueHeadInFlight = false;
1462
1463        ALOG_ASSERT(mFramesPendingInQueue >= buffer->frameCount,
1464                    "Bad bookkeeping during releaseBuffer!  Should have at"
1465                    " least %u queued frames, but we think we have only %u",
1466                    buffer->frameCount, mFramesPendingInQueue);
1467
1468        mFramesPendingInQueue -= buffer->frameCount;
1469
1470        if ((static_cast<size_t>(head.position()) >= head.buffer()->size())
1471            || mTrimQueueHeadOnRelease) {
1472            trimTimedBufferQueueHead_l("releaseBuffer");
1473            mTrimQueueHeadOnRelease = false;
1474        }
1475    } else {
1476        LOG_FATAL("TimedTrack::releaseBuffer of non-silence buffer with no"
1477                  " buffers in the timed buffer queue");
1478    }
1479
1480done:
1481    buffer->raw = 0;
1482    buffer->frameCount = 0;
1483}
1484
1485size_t AudioFlinger::PlaybackThread::TimedTrack::framesReady() const {
1486    Mutex::Autolock _l(mTimedBufferQueueLock);
1487    return mFramesPendingInQueue;
1488}
1489
1490AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer()
1491        : mPTS(0), mPosition(0) {}
1492
1493AudioFlinger::PlaybackThread::TimedTrack::TimedBuffer::TimedBuffer(
1494    const sp<IMemory>& buffer, int64_t pts)
1495        : mBuffer(buffer), mPTS(pts), mPosition(0) {}
1496
1497
1498// ----------------------------------------------------------------------------
1499
1500AudioFlinger::PlaybackThread::OutputTrack::OutputTrack(
1501            PlaybackThread *playbackThread,
1502            DuplicatingThread *sourceThread,
1503            uint32_t sampleRate,
1504            audio_format_t format,
1505            audio_channel_mask_t channelMask,
1506            size_t frameCount,
1507            int uid)
1508    :   Track(playbackThread, NULL, AUDIO_STREAM_CNT, sampleRate, format, channelMask, frameCount,
1509                NULL, 0, uid, IAudioFlinger::TRACK_DEFAULT),
1510    mActive(false), mSourceThread(sourceThread), mClientProxy(NULL)
1511{
1512
1513    if (mCblk != NULL) {
1514        mOutBuffer.frameCount = 0;
1515        playbackThread->mTracks.add(this);
1516        ALOGV("OutputTrack constructor mCblk %p, mBuffer %p, "
1517                "mCblk->frameCount_ %u, mChannelMask 0x%08x",
1518                mCblk, mBuffer,
1519                mCblk->frameCount_, mChannelMask);
1520        // since client and server are in the same process,
1521        // the buffer has the same virtual address on both sides
1522        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize);
1523        mClientProxy->setVolumeLR((uint32_t(uint16_t(0x1000)) << 16) | uint16_t(0x1000));
1524        mClientProxy->setSendLevel(0.0);
1525        mClientProxy->setSampleRate(sampleRate);
1526        mClientProxy = new AudioTrackClientProxy(mCblk, mBuffer, mFrameCount, mFrameSize,
1527                true /*clientInServer*/);
1528    } else {
1529        ALOGW("Error creating output track on thread %p", playbackThread);
1530    }
1531}
1532
1533AudioFlinger::PlaybackThread::OutputTrack::~OutputTrack()
1534{
1535    clearBufferQueue();
1536    delete mClientProxy;
1537    // superclass destructor will now delete the server proxy and shared memory both refer to
1538}
1539
1540status_t AudioFlinger::PlaybackThread::OutputTrack::start(AudioSystem::sync_event_t event,
1541                                                          int triggerSession)
1542{
1543    status_t status = Track::start(event, triggerSession);
1544    if (status != NO_ERROR) {
1545        return status;
1546    }
1547
1548    mActive = true;
1549    mRetryCount = 127;
1550    return status;
1551}
1552
1553void AudioFlinger::PlaybackThread::OutputTrack::stop()
1554{
1555    Track::stop();
1556    clearBufferQueue();
1557    mOutBuffer.frameCount = 0;
1558    mActive = false;
1559}
1560
1561bool AudioFlinger::PlaybackThread::OutputTrack::write(int16_t* data, uint32_t frames)
1562{
1563    Buffer *pInBuffer;
1564    Buffer inBuffer;
1565    uint32_t channelCount = mChannelCount;
1566    bool outputBufferFull = false;
1567    inBuffer.frameCount = frames;
1568    inBuffer.i16 = data;
1569
1570    uint32_t waitTimeLeftMs = mSourceThread->waitTimeMs();
1571
1572    if (!mActive && frames != 0) {
1573        start();
1574        sp<ThreadBase> thread = mThread.promote();
1575        if (thread != 0) {
1576            MixerThread *mixerThread = (MixerThread *)thread.get();
1577            if (mFrameCount > frames) {
1578                if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1579                    uint32_t startFrames = (mFrameCount - frames);
1580                    pInBuffer = new Buffer;
1581                    pInBuffer->mBuffer = new int16_t[startFrames * channelCount];
1582                    pInBuffer->frameCount = startFrames;
1583                    pInBuffer->i16 = pInBuffer->mBuffer;
1584                    memset(pInBuffer->raw, 0, startFrames * channelCount * sizeof(int16_t));
1585                    mBufferQueue.add(pInBuffer);
1586                } else {
1587                    ALOGW("OutputTrack::write() %p no more buffers in queue", this);
1588                }
1589            }
1590        }
1591    }
1592
1593    while (waitTimeLeftMs) {
1594        // First write pending buffers, then new data
1595        if (mBufferQueue.size()) {
1596            pInBuffer = mBufferQueue.itemAt(0);
1597        } else {
1598            pInBuffer = &inBuffer;
1599        }
1600
1601        if (pInBuffer->frameCount == 0) {
1602            break;
1603        }
1604
1605        if (mOutBuffer.frameCount == 0) {
1606            mOutBuffer.frameCount = pInBuffer->frameCount;
1607            nsecs_t startTime = systemTime();
1608            status_t status = obtainBuffer(&mOutBuffer, waitTimeLeftMs);
1609            if (status != NO_ERROR) {
1610                ALOGV("OutputTrack::write() %p thread %p no more output buffers; status %d", this,
1611                        mThread.unsafe_get(), status);
1612                outputBufferFull = true;
1613                break;
1614            }
1615            uint32_t waitTimeMs = (uint32_t)ns2ms(systemTime() - startTime);
1616            if (waitTimeLeftMs >= waitTimeMs) {
1617                waitTimeLeftMs -= waitTimeMs;
1618            } else {
1619                waitTimeLeftMs = 0;
1620            }
1621        }
1622
1623        uint32_t outFrames = pInBuffer->frameCount > mOutBuffer.frameCount ? mOutBuffer.frameCount :
1624                pInBuffer->frameCount;
1625        memcpy(mOutBuffer.raw, pInBuffer->raw, outFrames * channelCount * sizeof(int16_t));
1626        Proxy::Buffer buf;
1627        buf.mFrameCount = outFrames;
1628        buf.mRaw = NULL;
1629        mClientProxy->releaseBuffer(&buf);
1630        pInBuffer->frameCount -= outFrames;
1631        pInBuffer->i16 += outFrames * channelCount;
1632        mOutBuffer.frameCount -= outFrames;
1633        mOutBuffer.i16 += outFrames * channelCount;
1634
1635        if (pInBuffer->frameCount == 0) {
1636            if (mBufferQueue.size()) {
1637                mBufferQueue.removeAt(0);
1638                delete [] pInBuffer->mBuffer;
1639                delete pInBuffer;
1640                ALOGV("OutputTrack::write() %p thread %p released overflow buffer %d", this,
1641                        mThread.unsafe_get(), mBufferQueue.size());
1642            } else {
1643                break;
1644            }
1645        }
1646    }
1647
1648    // If we could not write all frames, allocate a buffer and queue it for next time.
1649    if (inBuffer.frameCount) {
1650        sp<ThreadBase> thread = mThread.promote();
1651        if (thread != 0 && !thread->standby()) {
1652            if (mBufferQueue.size() < kMaxOverFlowBuffers) {
1653                pInBuffer = new Buffer;
1654                pInBuffer->mBuffer = new int16_t[inBuffer.frameCount * channelCount];
1655                pInBuffer->frameCount = inBuffer.frameCount;
1656                pInBuffer->i16 = pInBuffer->mBuffer;
1657                memcpy(pInBuffer->raw, inBuffer.raw, inBuffer.frameCount * channelCount *
1658                        sizeof(int16_t));
1659                mBufferQueue.add(pInBuffer);
1660                ALOGV("OutputTrack::write() %p thread %p adding overflow buffer %d", this,
1661                        mThread.unsafe_get(), mBufferQueue.size());
1662            } else {
1663                ALOGW("OutputTrack::write() %p thread %p no more overflow buffers",
1664                        mThread.unsafe_get(), this);
1665            }
1666        }
1667    }
1668
1669    // Calling write() with a 0 length buffer, means that no more data will be written:
1670    // If no more buffers are pending, fill output track buffer to make sure it is started
1671    // by output mixer.
1672    if (frames == 0 && mBufferQueue.size() == 0) {
1673        // FIXME borken, replace by getting framesReady() from proxy
1674        size_t user = 0;    // was mCblk->user
1675        if (user < mFrameCount) {
1676            frames = mFrameCount - user;
1677            pInBuffer = new Buffer;
1678            pInBuffer->mBuffer = new int16_t[frames * channelCount];
1679            pInBuffer->frameCount = frames;
1680            pInBuffer->i16 = pInBuffer->mBuffer;
1681            memset(pInBuffer->raw, 0, frames * channelCount * sizeof(int16_t));
1682            mBufferQueue.add(pInBuffer);
1683        } else if (mActive) {
1684            stop();
1685        }
1686    }
1687
1688    return outputBufferFull;
1689}
1690
1691status_t AudioFlinger::PlaybackThread::OutputTrack::obtainBuffer(
1692        AudioBufferProvider::Buffer* buffer, uint32_t waitTimeMs)
1693{
1694    ClientProxy::Buffer buf;
1695    buf.mFrameCount = buffer->frameCount;
1696    struct timespec timeout;
1697    timeout.tv_sec = waitTimeMs / 1000;
1698    timeout.tv_nsec = (int) (waitTimeMs % 1000) * 1000000;
1699    status_t status = mClientProxy->obtainBuffer(&buf, &timeout);
1700    buffer->frameCount = buf.mFrameCount;
1701    buffer->raw = buf.mRaw;
1702    return status;
1703}
1704
1705void AudioFlinger::PlaybackThread::OutputTrack::clearBufferQueue()
1706{
1707    size_t size = mBufferQueue.size();
1708
1709    for (size_t i = 0; i < size; i++) {
1710        Buffer *pBuffer = mBufferQueue.itemAt(i);
1711        delete [] pBuffer->mBuffer;
1712        delete pBuffer;
1713    }
1714    mBufferQueue.clear();
1715}
1716
1717
1718// ----------------------------------------------------------------------------
1719//      Record
1720// ----------------------------------------------------------------------------
1721
1722AudioFlinger::RecordHandle::RecordHandle(
1723        const sp<AudioFlinger::RecordThread::RecordTrack>& recordTrack)
1724    : BnAudioRecord(),
1725    mRecordTrack(recordTrack)
1726{
1727}
1728
1729AudioFlinger::RecordHandle::~RecordHandle() {
1730    stop_nonvirtual();
1731    mRecordTrack->destroy();
1732}
1733
1734sp<IMemory> AudioFlinger::RecordHandle::getCblk() const {
1735    return mRecordTrack->getCblk();
1736}
1737
1738status_t AudioFlinger::RecordHandle::start(int /*AudioSystem::sync_event_t*/ event,
1739        int triggerSession) {
1740    ALOGV("RecordHandle::start()");
1741    return mRecordTrack->start((AudioSystem::sync_event_t)event, triggerSession);
1742}
1743
1744void AudioFlinger::RecordHandle::stop() {
1745    stop_nonvirtual();
1746}
1747
1748void AudioFlinger::RecordHandle::stop_nonvirtual() {
1749    ALOGV("RecordHandle::stop()");
1750    mRecordTrack->stop();
1751}
1752
1753status_t AudioFlinger::RecordHandle::onTransact(
1754    uint32_t code, const Parcel& data, Parcel* reply, uint32_t flags)
1755{
1756    return BnAudioRecord::onTransact(code, data, reply, flags);
1757}
1758
1759// ----------------------------------------------------------------------------
1760
1761// RecordTrack constructor must be called with AudioFlinger::mLock held
1762AudioFlinger::RecordThread::RecordTrack::RecordTrack(
1763            RecordThread *thread,
1764            const sp<Client>& client,
1765            uint32_t sampleRate,
1766            audio_format_t format,
1767            audio_channel_mask_t channelMask,
1768            size_t frameCount,
1769            int sessionId,
1770            int uid)
1771    :   TrackBase(thread, client, sampleRate, format,
1772                  channelMask, frameCount, 0 /*sharedBuffer*/, sessionId, uid, false /*isOut*/),
1773        mOverflow(false)
1774{
1775    ALOGV("RecordTrack constructor");
1776    if (mCblk != NULL) {
1777        mServerProxy = new AudioRecordServerProxy(mCblk, mBuffer, frameCount, mFrameSize);
1778    }
1779}
1780
1781AudioFlinger::RecordThread::RecordTrack::~RecordTrack()
1782{
1783    ALOGV("%s", __func__);
1784}
1785
1786// AudioBufferProvider interface
1787status_t AudioFlinger::RecordThread::RecordTrack::getNextBuffer(AudioBufferProvider::Buffer* buffer,
1788        int64_t pts)
1789{
1790    ServerProxy::Buffer buf;
1791    buf.mFrameCount = buffer->frameCount;
1792    status_t status = mServerProxy->obtainBuffer(&buf);
1793    buffer->frameCount = buf.mFrameCount;
1794    buffer->raw = buf.mRaw;
1795    if (buf.mFrameCount == 0) {
1796        // FIXME also wake futex so that overrun is noticed more quickly
1797        (void) android_atomic_or(CBLK_OVERRUN, &mCblk->mFlags);
1798    }
1799    return status;
1800}
1801
1802status_t AudioFlinger::RecordThread::RecordTrack::start(AudioSystem::sync_event_t event,
1803                                                        int triggerSession)
1804{
1805    sp<ThreadBase> thread = mThread.promote();
1806    if (thread != 0) {
1807        RecordThread *recordThread = (RecordThread *)thread.get();
1808        return recordThread->start(this, event, triggerSession);
1809    } else {
1810        return BAD_VALUE;
1811    }
1812}
1813
1814void AudioFlinger::RecordThread::RecordTrack::stop()
1815{
1816    sp<ThreadBase> thread = mThread.promote();
1817    if (thread != 0) {
1818        RecordThread *recordThread = (RecordThread *)thread.get();
1819        if (recordThread->stop(this)) {
1820            AudioSystem::stopInput(recordThread->id());
1821        }
1822    }
1823}
1824
1825void AudioFlinger::RecordThread::RecordTrack::destroy()
1826{
1827    // see comments at AudioFlinger::PlaybackThread::Track::destroy()
1828    sp<RecordTrack> keep(this);
1829    {
1830        sp<ThreadBase> thread = mThread.promote();
1831        if (thread != 0) {
1832            if (mState == ACTIVE || mState == RESUMING) {
1833                AudioSystem::stopInput(thread->id());
1834            }
1835            AudioSystem::releaseInput(thread->id());
1836            Mutex::Autolock _l(thread->mLock);
1837            RecordThread *recordThread = (RecordThread *) thread.get();
1838            recordThread->destroyTrack_l(this);
1839        }
1840    }
1841}
1842
1843void AudioFlinger::RecordThread::RecordTrack::invalidate()
1844{
1845    // FIXME should use proxy, and needs work
1846    audio_track_cblk_t* cblk = mCblk;
1847    android_atomic_or(CBLK_INVALID, &cblk->mFlags);
1848    android_atomic_release_store(0x40000000, &cblk->mFutex);
1849    // client is not in server, so FUTEX_WAKE is needed instead of FUTEX_WAKE_PRIVATE
1850    (void) __futex_syscall3(&cblk->mFutex, FUTEX_WAKE, INT_MAX);
1851}
1852
1853
1854/*static*/ void AudioFlinger::RecordThread::RecordTrack::appendDumpHeader(String8& result)
1855{
1856    result.append("Client Fmt Chn mask Session S   Server fCount\n");
1857}
1858
1859void AudioFlinger::RecordThread::RecordTrack::dump(char* buffer, size_t size)
1860{
1861    snprintf(buffer, size, "%6u %3u %08X %7u %1d %08X %6u\n",
1862            (mClient == 0) ? getpid_cached : mClient->pid(),
1863            mFormat,
1864            mChannelMask,
1865            mSessionId,
1866            mState,
1867            mCblk->mServer,
1868            mFrameCount);
1869}
1870
1871}; // namespace android
1872