AudioStreamOut.cpp revision b7d477723e64b7526106641de168788e59152617
1/*
2**
3** Copyright 2012, The Android Open Source Project
4**
5** Licensed under the Apache License, Version 2.0 (the "License");
6** you may not use this file except in compliance with the License.
7** You may obtain a copy of the License at
8**
9**     http://www.apache.org/licenses/LICENSE-2.0
10**
11** Unless required by applicable law or agreed to in writing, software
12** distributed under the License is distributed on an "AS IS" BASIS,
13** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14** See the License for the specific language governing permissions and
15** limitations under the License.
16*/
17
18#define LOG_TAG "AudioHAL:AudioStreamOut"
19
20#include <utils/Log.h>
21
22#include "AudioHardwareOutput.h"
23#include "AudioStreamOut.h"
24
25// Set to 1 to print timestamp data in CSV format.
26#ifndef HAL_PRINT_TIMESTAMP_CSV
27#define HAL_PRINT_TIMESTAMP_CSV 0
28#endif
29
30//#define VERY_VERBOSE_LOGGING
31#ifdef VERY_VERBOSE_LOGGING
32#define ALOGVV ALOGV
33#else
34#define ALOGVV(a...) do { } while(0)
35#endif
36
37namespace android {
38
39AudioStreamOut::AudioStreamOut(AudioHardwareOutput& owner, bool mcOut)
40    : mFramesPresented(0)
41    , mFramesWrittenRemainder(0)
42    , mOwnerHAL(owner)
43    , mFramesWritten(0)
44    , mTgtDevices(0)
45    , mAudioFlingerTgtDevices(0)
46    , mIsMCOutput(mcOut)
47    , mIsEncoded(false)
48    , mSPDIFEncoder(this)
49{
50    assert(mLocalClock.initCheck());
51
52    mPhysOutputs.setCapacity(3);
53
54    // Set some reasonable defaults for these.  All of this should be eventually
55    // be overwritten by a specific audio flinger configuration, but it does not
56    // hurt to have something here by default.
57    mInputSampleRate = 48000;
58    mInputChanMask = AUDIO_CHANNEL_OUT_STEREO;
59    mInputFormat = AUDIO_FORMAT_PCM_16_BIT;
60    mInputNominalChunksInFlight = 4;
61    updateInputNums();
62
63    mThrottleValid = false;
64
65    memset(&mUSecToLocalTime, 0, sizeof(mUSecToLocalTime));
66    mUSecToLocalTime.a_to_b_numer = mLocalClock.getLocalFreq();
67    mUSecToLocalTime.a_to_b_denom = 1000000;
68    LinearTransform::reduce(&mUSecToLocalTime.a_to_b_numer,
69                            &mUSecToLocalTime.a_to_b_denom);
70}
71
72AudioStreamOut::~AudioStreamOut()
73{
74    releaseAllOutputs();
75}
76
77status_t AudioStreamOut::set(
78        audio_format_t *pFormat,
79        uint32_t *pChannels,
80        uint32_t *pRate)
81{
82    Mutex::Autolock _l(mLock);
83    audio_format_t lFormat   = pFormat ? *pFormat : AUDIO_FORMAT_DEFAULT;
84    uint32_t       lChannels = pChannels ? *pChannels : 0;
85    uint32_t       lRate     = pRate ? *pRate : 0;
86
87    // fix up defaults
88    if (lFormat == AUDIO_FORMAT_DEFAULT) lFormat = format();
89    if (lChannels == 0)                  lChannels = chanMask();
90    if (lRate == 0)                      lRate = sampleRate();
91
92    if (pFormat)   *pFormat   = lFormat;
93    if (pChannels) *pChannels = lChannels;
94    if (pRate)     *pRate     = lRate;
95
96    mIsEncoded = !audio_is_linear_pcm(lFormat);
97
98    if (!mIsMCOutput && !mIsEncoded) {
99        // If this is the primary stream out, then demand our defaults.
100        if ((lFormat   != format()) ||
101            (lChannels != chanMask()) ||
102            (lRate     != sampleRate()))
103            return BAD_VALUE;
104    } else {
105        // Else check to see if our HDMI sink supports this format before proceeding.
106        if (!mOwnerHAL.getHDMIAudioCaps().supportsFormat(lFormat,
107                                                     lRate,
108                                                     audio_channel_count_from_out_mask(lChannels)))
109            return BAD_VALUE;
110    }
111
112    mInputFormat = lFormat;
113    mInputChanMask = lChannels;
114    mInputSampleRate = lRate;
115    ALOGI("AudioStreamOut::set: lRate = %u, mIsEncoded = %d\n", lRate, mIsEncoded);
116    updateInputNums();
117
118    return NO_ERROR;
119}
120
121void AudioStreamOut::setTgtDevices(uint32_t tgtDevices)
122{
123    Mutex::Autolock _l(mRoutingLock);
124    if (mTgtDevices != tgtDevices) {
125        mTgtDevices = tgtDevices;
126    }
127}
128
129status_t AudioStreamOut::standby()
130{
131    releaseAllOutputs();
132    return NO_ERROR;
133}
134
135void AudioStreamOut::releaseAllOutputs() {
136    Mutex::Autolock _l(mRoutingLock);
137
138    ALOGI("releaseAllOutputs: releasing %d mPhysOutputs", mPhysOutputs.size());
139    AudioOutputList::iterator I;
140    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
141        mOwnerHAL.releaseOutput(*this, *I);
142
143    mPhysOutputs.clear();
144}
145
146void AudioStreamOut::updateInputNums()
147{
148    assert(mLocalClock.initCheck());
149
150    // mInputBufSize determines how many audio frames AudioFlinger is going to
151    // mix at a time.  We also use the mInputBufSize to determine the ALSA
152    // period_size, the number of of samples which need to play out (at most)
153    // before low level ALSA driver code is required to wake up upper levels of
154    // SW to fill a new buffer.  As it turns out, ALSA is going to apply some
155    // rules and modify the period_size which we pass to it.  One of the things
156    // ALSA seems to do is attempt to round the period_size up to a value which
157    // will make the period an integral number of 0.5 mSec.  This round-up
158    // behavior can cause the low levels of ALSA to consume more data per period
159    // than the AudioFlinger mixer has been told to produce.  If there are only
160    // two buffers in flight at any given point in time, this can lead to a
161    // situation where the pipeline ends up slipping an extra buffer and
162    // underflowing.  There are two approaches to mitigate this, both of which
163    // are implemented in this HAL...
164    //
165    // 1) Try as hard as possible to make certain that the buffer size we choose
166    //    results in a period_size which is not going to get rounded up by ALSA.
167    //    This means that we want a buffer size which at the chosen sample rate
168    //    and frame size will be an integral multiple of 1/2 mSec.
169    // 2) Increate the number of chunks we keep in flight.  If the system slips
170    //    a single period, its only really a problem if there is no data left in
171    //    the pipeline waiting to be played out.  The mixer should going to mix
172    //    as fast as possible until the buffer has been topped off.  By
173    //    decreasing the buffer size and increasing the number of buffers in
174    //    flight, we increase the number of interrups and mix events per second,
175    //    but buy ourselves some insurance against the negative side effects of
176    //    slipping one buffer in the schedule.  We end up using 4 buffers at
177    //    10mSec, making the total audio latency somewhere between 40 and 50
178    //    mSec, depending on when a sample begins playback relative to
179    //    AudioFlinger's mixing schedule.
180    //
181    mInputChanCount = audio_channel_count_from_out_mask(mInputChanMask);
182
183    // Picking a chunk duration 10mSec should satisfy #1 for both major families
184    // of audio sample rates (the 44.1K and 48K families).  In the case of 44.1
185    // (or higher) we will end up with a multiple of 441 frames of audio per
186    // chunk, while for 48K, we will have a multiple of 480 frames of audio per
187    // chunk.  This will not work well for lower sample rates in the 44.1 family
188    // (22.05K and 11.025K); it is unlikely that we will ever be configured to
189    // deliver those rates, and if we ever do, we will need to rely on having
190    // extra chunks in flight to deal with the jitter problem described above.
191    mInputChunkFrames = outputSampleRate() / 100;
192
193    // FIXME: Currently, audio flinger demands an input buffer size which is a
194    // multiple of 16 audio frames.  Right now, there is no good way to
195    // reconcile this with ALSA round-up behavior described above when the
196    // desired sample rate is a member of the 44.1 family.  For now, we just
197    // round up to the nearest multiple of 16 frames and roll the dice, but
198    // someday it would be good to fix one or the other halves of the problem
199    // (either ALSA or AudioFlinger)
200    mInputChunkFrames = (mInputChunkFrames + 0xF) & ~0xF;
201
202    ALOGD("AudioStreamOut::updateInputNums: chunk size %u from output rate %u\n",
203        mInputChunkFrames, outputSampleRate());
204
205    // Buffer size is just the frame size multiplied by the number of
206    // frames per chunk.
207    mInputBufSize = mInputChunkFrames * getBytesPerOutputFrame();
208
209    // The nominal latency is just the duration of a chunk * the number of
210    // chunks we nominally keep in flight at any given point in time.
211    mInputNominalLatencyUSec = static_cast<uint32_t>(((
212                    static_cast<uint64_t>(mInputChunkFrames)
213                    * 1000000 * mInputNominalChunksInFlight)
214                    / mInputSampleRate));
215
216    memset(&mLocalTimeToFrames, 0, sizeof(mLocalTimeToFrames));
217    mLocalTimeToFrames.a_to_b_numer = mInputSampleRate;
218    mLocalTimeToFrames.a_to_b_denom = mLocalClock.getLocalFreq();
219    LinearTransform::reduce(
220            &mLocalTimeToFrames.a_to_b_numer,
221            &mLocalTimeToFrames.a_to_b_denom);
222}
223
224void AudioStreamOut::finishedWriteOp(size_t framesWritten,
225                                     bool needThrottle)
226{
227    assert(mLocalClock.initCheck());
228
229    int64_t now = mLocalClock.getLocalTime();
230
231    if (!mThrottleValid || !needThrottle) {
232        mThrottleValid = true;
233        mWriteStartLT  = now;
234        mFramesWritten = 0;
235    }
236
237    size_t framesWrittenAppRate;
238    uint32_t multiplier = getRateMultiplier();
239    if (multiplier != 1) {
240        // Accumulate round-off error from previous call.
241        framesWritten += mFramesWrittenRemainder;
242        // Scale from device sample rate to application rate.
243        framesWrittenAppRate = framesWritten / multiplier;
244        ALOGV("finishedWriteOp() framesWrittenAppRate = %d = %d / %d\n",
245            framesWrittenAppRate, framesWritten, multiplier);
246        // Save remainder for next time to prevent error accumulation.
247        mFramesWrittenRemainder = framesWritten - (framesWrittenAppRate * multiplier);
248    } else {
249        framesWrittenAppRate = framesWritten;
250    }
251
252    mFramesWritten += framesWrittenAppRate;
253    mFramesPresented += framesWrittenAppRate;
254
255    if (needThrottle) {
256        int64_t deltaLT;
257        mLocalTimeToFrames.doReverseTransform(mFramesWritten, &deltaLT);
258        deltaLT += mWriteStartLT;
259        deltaLT -= now;
260
261        int64_t deltaUSec;
262        mUSecToLocalTime.doReverseTransform(deltaLT, &deltaUSec);
263
264        if (deltaUSec > 0) {
265            useconds_t sleep_time;
266
267            // We should never be a full second ahead of schedule; sanity check
268            // our throttle time and cap the max sleep time at 1 second.
269            if (deltaUSec > 1000000)
270                sleep_time = 1000000;
271            else
272                sleep_time = static_cast<useconds_t>(deltaUSec);
273
274            usleep(sleep_time);
275        }
276    }
277}
278
279static const String8 keyRouting(AudioParameter::keyRouting);
280static const String8 keySupSampleRates("sup_sampling_rates");
281static const String8 keySupFormats("sup_formats");
282static const String8 keySupChannels("sup_channels");
283status_t AudioStreamOut::setParameters(__unused struct audio_stream *stream, const char *kvpairs)
284{
285    AudioParameter param = AudioParameter(String8(kvpairs));
286    String8 key = String8(AudioParameter::keyRouting);
287    int tmpInt;
288
289    if (param.getInt(key, tmpInt) == NO_ERROR) {
290        // The audio HAL handles routing to physical devices entirely
291        // internally and mostly ignores what audio flinger tells it to do.  JiC
292        // there is something (now or in the future) in audio flinger which
293        // cares about the routing value in a call to getParameters, we hang on
294        // to the last routing value set by audio flinger so we can at least be
295        // consistent when we lie to the upper levels about doing what they told
296        // us to do.
297        mAudioFlingerTgtDevices = static_cast<uint32_t>(tmpInt);
298    }
299
300    return NO_ERROR;
301}
302
303char* AudioStreamOut::getParameters(const char* k)
304{
305    AudioParameter param = AudioParameter(String8(k));
306    String8 value;
307
308    if (param.get(keyRouting, value) == NO_ERROR) {
309        param.addInt(keyRouting, (int)mAudioFlingerTgtDevices);
310    }
311
312    HDMIAudioCaps& hdmiCaps = mOwnerHAL.getHDMIAudioCaps();
313
314    if (param.get(keySupSampleRates, value) == NO_ERROR) {
315        if (mIsMCOutput) {
316            hdmiCaps.getRatesForAF(value);
317            param.add(keySupSampleRates, value);
318        } else {
319            param.add(keySupSampleRates, String8("48000"));
320        }
321    }
322
323    if (param.get(keySupFormats, value) == NO_ERROR) {
324        if (mIsMCOutput) {
325            hdmiCaps.getFmtsForAF(value);
326            param.add(keySupFormats, value);
327        } else {
328            param.add(keySupFormats, String8("AUDIO_FORMAT_PCM_16_BIT"));
329        }
330    }
331
332    if (param.get(keySupChannels, value) == NO_ERROR) {
333        if (mIsMCOutput) {
334            hdmiCaps.getChannelMasksForAF(value, true);
335            param.add(keySupChannels, value);
336        } else {
337            param.add(keySupChannels, String8("AUDIO_CHANNEL_OUT_STEREO"));
338        }
339    }
340
341    return strdup(param.toString().string());
342}
343
344uint32_t AudioStreamOut::getRateMultiplier() const
345{
346    return (mIsEncoded) ? mSPDIFEncoder.getRateMultiplier() : 1;
347}
348
349uint32_t AudioStreamOut::outputSampleRate() const
350{
351    return mInputSampleRate * getRateMultiplier();
352}
353
354int AudioStreamOut::getBytesPerOutputFrame()
355{
356    return (mIsEncoded) ? mSPDIFEncoder.getBytesPerOutputFrame()
357        : (mInputChanCount * sizeof(int16_t));
358}
359
360uint32_t AudioStreamOut::latency() const {
361    uint32_t uSecLatency = mInputNominalLatencyUSec;
362    uint32_t vcompDelay = mOwnerHAL.getVideoDelayCompUsec();
363
364    if (uSecLatency < vcompDelay)
365        return 0;
366
367    return ((uSecLatency - vcompDelay) / 1000);
368}
369
370// Used to implement get_presentation_position() for Audio HAL.
371// According to the prototype in audio.h, the frame count should not get
372// reset on standby().
373status_t AudioStreamOut::getPresentationPosition(uint64_t *frames,
374        struct timespec *timestamp)
375{
376    Mutex::Autolock _l(mRoutingLock);
377    status_t result = -ENODEV;
378    // The presentation timestamp should be the same for all devices.
379    // Also Molly only has one output device at the moment.
380    // So just use the first one in the list.
381    if (!mPhysOutputs.isEmpty()) {
382        const unsigned int kInsaneAvail = 10 * 48000;
383        unsigned int avail = 0;
384        sp<AudioOutput> audioOutput = mPhysOutputs.itemAt(0);
385        if (audioOutput->getHardwareTimestamp(&avail, timestamp) == 0) {
386            if (avail < kInsaneAvail) {
387                // FIXME av sync fudge factor
388                // Use a fudge factor to account for hidden buffering in the
389                // HDMI output path. This is a hack until we can determine the
390                // actual buffer sizes.
391                // Increasing kFudgeMSec will move the audio earlier in
392                // relation to the video.
393                const int kFudgeMSec = 40;
394                int fudgeFrames = kFudgeMSec * sampleRate() / 1000;
395
396                // Scale the frames in the driver because it might be running at
397                // a higher rate for EAC3.
398                int64_t framesInDriverBuffer =
399                    (int64_t)audioOutput->getKernelBufferSize() - (int64_t)avail;
400                framesInDriverBuffer = framesInDriverBuffer / getRateMultiplier();
401
402                int64_t pendingFrames = framesInDriverBuffer + fudgeFrames;
403                int64_t signedFrames = mFramesPresented - pendingFrames;
404                if (pendingFrames < 0) {
405                    ALOGE("getPresentationPosition: negative pendingFrames = %lld",
406                        pendingFrames);
407                } else if (signedFrames < 0) {
408                    ALOGI("getPresentationPosition: playing silent preroll"
409                        ", mFramesPresented = %llu, pendingFrames = %lld",
410                        mFramesPresented, pendingFrames);
411                } else {
412#if HAL_PRINT_TIMESTAMP_CSV
413                    // Print comma separated values for spreadsheet analysis.
414                    uint64_t nanos = (((uint64_t)timestamp->tv_sec) * 1000000000L)
415                            + timestamp->tv_nsec;
416                    ALOGI("getPresentationPosition, %lld, %4u, %lld, %llu",
417                            mFramesPresented, avail, signedFrames, nanos);
418#endif
419                    *frames = (uint64_t) signedFrames;
420                    result = NO_ERROR;
421                }
422            } else {
423                ALOGE("getPresentationPosition: avail too large = %u", avail);
424            }
425        } else {
426            ALOGE("getPresentationPosition: getHardwareTimestamp returned non-zero");
427        }
428    } else {
429        ALOGVV("getPresentationPosition: no physical outputs! This HAL is inactive!");
430    }
431    return result;
432}
433
434status_t AudioStreamOut::getRenderPosition(__unused uint32_t *dspFrames)
435{
436    return INVALID_OPERATION;
437}
438
439void AudioStreamOut::updateTargetOutputs()
440{
441    Mutex::Autolock _l(mRoutingLock);
442
443    AudioOutputList::iterator I;
444    uint32_t cur_outputs = 0;
445
446    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I)
447        cur_outputs |= (*I)->devMask();
448
449    if (cur_outputs == mTgtDevices)
450        return;
451
452    uint32_t outputsToObtain  = mTgtDevices & ~cur_outputs;
453    uint32_t outputsToRelease = cur_outputs & ~mTgtDevices;
454
455    // Start by releasing any outputs we should no longer have back to the HAL.
456    if (outputsToRelease) {
457
458        I = mPhysOutputs.begin();
459        while (I != mPhysOutputs.end()) {
460            if (!(outputsToRelease & (*I)->devMask())) {
461                ++I;
462                continue;
463            }
464
465            outputsToRelease &= ~((*I)->devMask());
466            mOwnerHAL.releaseOutput(*this, *I);
467            I = mPhysOutputs.erase(I);
468        }
469    }
470
471    if (outputsToRelease) {
472        ALOGW("Bookkeeping error!  Still have outputs to release (%08x), but"
473              " none of them appear to be in the mPhysOutputs list!",
474              outputsToRelease);
475    }
476
477    // Now attempt to obtain any outputs we should be using, but are not
478    // currently.
479    if (outputsToObtain) {
480        uint32_t mask;
481
482        // Buffer configuration may need updating now that we have decoded
483        // the start of a stream. For example, EAC3, needs 4X sampleRate.
484        updateInputNums();
485
486        for (mask = 0x1; outputsToObtain; mask <<= 1) {
487            if (!(mask & outputsToObtain))
488                continue;
489
490            sp<AudioOutput> newOutput;
491            status_t res;
492
493            res = mOwnerHAL.obtainOutput(*this, mask, &newOutput);
494            outputsToObtain &= ~mask;
495
496            if (OK != res) {
497                // If we get an error back from obtain output, it means that
498                // something went really wrong at a lower level (probably failed
499                // to open the driver).  We should not try to obtain this output
500                // again, at least until the next routing change.
501                ALOGW("Failed to obtain output %08x for %s audio stream out."
502                      " (res %d)", mask, getName(), res);
503                mTgtDevices &= ~mask;
504                continue;
505            }
506
507            if (newOutput != NULL) {
508                // If we actually got an output, go ahead and add it to our list
509                // of physical outputs.  The rest of the system will handle
510                // starting it up.  If we didn't get an output, but also go no
511                // error code, it just means that the output is currently busy
512                // and should become available soon.
513                ALOGI("updateTargetOutputs: adding output back to mPhysOutputs");
514                mPhysOutputs.push_back(newOutput);
515            }
516        }
517    }
518}
519
520void AudioStreamOut::adjustOutputs(int64_t maxTime)
521{
522    AudioOutputList::iterator I;
523
524    // Check to see if any outputs are active and see what their buffer levels
525    // are.
526    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
527        if ((*I)->getState() == AudioOutput::DMA_START) {
528            int64_t lastWriteTS = (*I)->getLastNextWriteTS();
529            int64_t padAmt;
530
531            mLocalTimeToFrames.a_zero = lastWriteTS;
532            mLocalTimeToFrames.b_zero = 0;
533            if (mLocalTimeToFrames.doForwardTransform(maxTime,
534                                                      &padAmt)) {
535                (*I)->adjustDelay(((int32_t)padAmt));
536            }
537        }
538    }
539}
540
541ssize_t AudioStreamOut::write(const void* buffer, size_t bytes)
542{
543    uint8_t *data = (uint8_t *)buffer;
544    ALOGVV("AudioStreamOut::write(%u)   0x%02X, 0x%02X, 0x%02X, 0x%02X,"
545          " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
546          " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
547          " 0x%02X, 0x%02X, 0x%02X, 0x%02X ====",
548        bytes, data[0], data[1], data[2], data[3],
549        data[4], data[5], data[6], data[7],
550        data[8], data[9], data[10], data[11],
551        data[12], data[13], data[14], data[15]
552        );
553    if (mIsEncoded) {
554        return mSPDIFEncoder.write(buffer, bytes);
555    } else {
556        return writeInternal(buffer, bytes);
557    }
558}
559
560ssize_t AudioStreamOut::writeInternal(const void* buffer, size_t bytes)
561{
562    uint8_t *data = (uint8_t *)buffer;
563    ALOGVV("AudioStreamOut::write_l(%u) 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
564          " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
565          " 0x%02X, 0x%02X, 0x%02X, 0x%02X,"
566          " 0x%02X, 0x%02X, 0x%02X, 0x%02X",
567        bytes, data[0], data[1], data[2], data[3],
568        data[4], data[5], data[6], data[7],
569        data[8], data[9], data[10], data[11],
570        data[12], data[13], data[14], data[15]
571        );
572    // Note: no lock is obtained here.  Calls to write and getNextWriteTimestamp
573    // happen only on the AudioFlinger mixer thread which owns this particular
574    // output stream, so there is no need to worry that there will be two
575    // threads in this instance method concurrently.
576    //
577    // In addition, only calls to write change the contents of the mPhysOutputs
578    // collection (during the call to updateTargetOutputs).  updateTargetOutputs
579    // will hold the routing lock during the operation, as should any reader of
580    // mPhysOutputs, unless the reader is a call to write or
581    // getNextWriteTimestamp (we know that it is safe for write and gnwt to read
582    // the collection because the only collection mutator is the same thread
583    // which calls write and gnwt).
584    updateTargetOutputs();
585
586    // If any of our outputs is in the PRIMED state when ::write is called, it
587    // means one of two things.  First, it could be that the DMA output really
588    // has not started yet.  This is odd, but certainly not impossible.  The
589    // other possibility is that AudioFlinger is in its silence-pushing mode and
590    // is not calling getNextWriteTimestamp.  After an output is primed, its in
591    // GNWTS where the amt of padding to compensate for different DMA start
592    // times is taken into account.  Go ahead and force a call to GNWTS, just to
593    // be certain that we have checked recently and are not stuck in silence
594    // fill mode.  Failure to do this will cause the AudioOutput state machine
595    // to eventually give up on DMA starting and reset the output over and over
596    // again (spamming the log and producing general confusion).
597    //
598    // While we are in the process of checking our various output states, check
599    // to see if any outputs have made it to the ACTIVE state.  Pass this
600    // information along to the call to processOneChunk.  If any of our outputs
601    // are waiting to be primed while other outputs have made it to steady
602    // state, we need to change our priming behavior slightly.  Instead of
603    // filling an output's buffer completely, we want to fill it to slightly
604    // less than full and let the adjustDelay mechanism take care of the rest.
605    //
606    // Failure to do this during steady state operation will almost certainly
607    // lead to the new output being over-filled relative to the other outputs
608    // causing it to be slightly out of sync.
609    AudioOutputList::iterator I;
610    bool checkDMAStart = false;
611    bool hasActiveOutputs = false;
612    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
613        if (AudioOutput::PRIMED == (*I)->getState())
614            checkDMAStart = true;
615
616        if ((*I)->getState() == AudioOutput::ACTIVE)
617            hasActiveOutputs = true;
618    }
619
620    if (checkDMAStart) {
621        int64_t junk;
622        getNextWriteTimestamp_internal(&junk);
623    }
624
625    // We always call processOneChunk on the outputs, as it is the
626    // tick for their state machines.
627    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
628        (*I)->processOneChunk((uint8_t *)buffer, bytes, hasActiveOutputs);
629    }
630
631    // If we don't actually have any physical outputs to write to, just sleep
632    // for the proper amt of time in order to simulate the throttle that writing
633    // to the hardware would impose.
634    finishedWriteOp(bytes / getBytesPerOutputFrame(), (0 == mPhysOutputs.size()));
635
636    return static_cast<ssize_t>(bytes);
637}
638
639status_t AudioStreamOut::getNextWriteTimestamp(int64_t *timestamp)
640{
641    return getNextWriteTimestamp_internal(timestamp);
642}
643
644status_t AudioStreamOut::getNextWriteTimestamp_internal(
645        int64_t *timestamp)
646{
647    int64_t max_time = LLONG_MIN;
648    bool    max_time_valid = false;
649    bool    need_adjust = false;
650
651    // Across all of our physical outputs, figure out the max time when
652    // a write operation will hit the speakers.  Assume that if an
653    // output cannot answer the question, its because it has never
654    // started or because it has recently underflowed and needs to be
655    // restarted.  If this is the case, we will need to prime the
656    // pipeline with a chunk's worth of data before proceeding.
657    // If any of the outputs indicate a discontinuity (meaning that the
658    // DMA start time was valid and is now invalid, or was and is valid
659    // but was different from before; almost certainly caused by a low
660    // level underfow), then just stop now.  We will need to reset and
661    // re-prime all of the outputs in order to make certain that the
662    // lead-times on all of the outputs match.
663
664    AudioOutputList::iterator I;
665    bool discon = false;
666
667    // Find the largest next write timestamp. The goal is to make EVERY
668    // output have the same value, but we also need this to pass back
669    // up the layers.
670    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
671        int64_t tmp;
672        if (OK == (*I)->getNextWriteTimestamp(&tmp, &discon)) {
673            if (!max_time_valid || (max_time < tmp)) {
674                max_time = tmp;
675                max_time_valid = true;
676            }
677        }
678    }
679
680    // Check the state of each output and determine if we need to align them.
681    // Make sure to do this after we have called each outputs'
682    // getNextWriteTimestamp as the transition from PRIMED to DMA_START happens
683    // there.
684    for (I = mPhysOutputs.begin(); I != mPhysOutputs.end(); ++I) {
685        if ((*I)->getState() == AudioOutput::DMA_START) {
686            need_adjust = true;
687            break;
688        }
689    }
690
691    // At this point, if we still have not found at least one output
692    // who knows when their data is going to hit the speakers, then we
693    // just can't answer the getNextWriteTimestamp question and we
694    // should give up.
695    if (!max_time_valid) {
696        return INVALID_OPERATION;
697    }
698
699    // Stuff silence into the non-aligned outputs so that the effective
700    // timestamp is the same for all the outputs.
701    if (need_adjust)
702        adjustOutputs(max_time);
703
704    // We are done. The time at which the next written audio should
705    // hit the speakers is just max_time plus the maximum amt of delay
706    // compensation in the system.
707    *timestamp = max_time;
708    return OK;
709}
710
711#define DUMP(a...) \
712    snprintf(buffer, SIZE, a); \
713    buffer[SIZE - 1] = 0; \
714    result.append(buffer);
715#define B2STR(b) b ? "true" : "false"
716
717status_t AudioStreamOut::dump(int fd)
718{
719    const size_t SIZE = 256;
720    char buffer[SIZE];
721    String8 result;
722    DUMP("\n%s AudioStreamOut::dump\n", getName());
723    DUMP("\tsample rate            : %d\n", sampleRate());
724    DUMP("\tbuffer size            : %d\n", bufferSize());
725    DUMP("\tchannel mask           : 0x%04x\n", chanMask());
726    DUMP("\tformat                 : %d\n", format());
727    DUMP("\tdevice mask            : 0x%04x\n", mTgtDevices);
728
729    mRoutingLock.lock();
730    AudioOutputList outSnapshot(mPhysOutputs);
731    mRoutingLock.unlock();
732
733    AudioOutputList::iterator I;
734    for (I = outSnapshot.begin(); I != outSnapshot.end(); ++I)
735        (*I)->dump(result);
736
737    ::write(fd, result.string(), result.size());
738
739    return NO_ERROR;
740}
741
742#undef B2STR
743#undef DUMP
744
745}  // android
746