NuPlayerRenderer.cpp revision 46f80165c595d81dda68f8f3fea27f4fb04937dd
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <algorithm>
23#include <cutils/properties.h>
24#include <media/stagefright/foundation/ABuffer.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/foundation/AWakeLock.h>
29#include <media/stagefright/MediaClock.h>
30#include <media/stagefright/MediaErrors.h>
31#include <media/stagefright/MetaData.h>
32#include <media/stagefright/Utils.h>
33#include <media/stagefright/VideoFrameScheduler.h>
34
35#include <inttypes.h>
36
37namespace android {
38
39/*
40 * Example of common configuration settings in shell script form
41
42   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43   adb shell setprop audio.offload.disable 1
44
45   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46   adb shell setprop audio.offload.video 1
47
48   #Use audio callbacks for PCM data
49   adb shell setprop media.stagefright.audio.cbk 1
50
51   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52   adb shell setprop media.stagefright.audio.deep 1
53
54   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55   adb shell setprop media.stagefright.audio.sink 1000
56
57 * These configurations take effect for the next track played (not the current track).
58 */
59
60static inline bool getUseAudioCallbackSetting() {
61    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62}
63
64static inline int32_t getAudioSinkPcmMsSetting() {
65    return property_get_int32(
66            "media.stagefright.audio.sink", 500 /* default_value */);
67}
68
69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70// is closed to allow the audio DSP to power down.
71static const int64_t kOffloadPauseMaxUs = 10000000ll;
72
73// Maximum allowed delay from AudioSink, 1.5 seconds.
74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75
76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77
78// static
79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80        AUDIO_CHANNEL_NONE,
81        AUDIO_OUTPUT_FLAG_NONE,
82        AUDIO_FORMAT_INVALID,
83        0, // mNumChannels
84        0 // mSampleRate
85};
86
87// static
88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89
90NuPlayer::Renderer::Renderer(
91        const sp<MediaPlayerBase::AudioSink> &sink,
92        const sp<AMessage> &notify,
93        uint32_t flags)
94    : mAudioSink(sink),
95      mUseVirtualAudioSink(false),
96      mNotify(notify),
97      mFlags(flags),
98      mNumFramesWritten(0),
99      mDrainAudioQueuePending(false),
100      mDrainVideoQueuePending(false),
101      mAudioQueueGeneration(0),
102      mVideoQueueGeneration(0),
103      mAudioDrainGeneration(0),
104      mVideoDrainGeneration(0),
105      mAudioEOSGeneration(0),
106      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
107      mAudioFirstAnchorTimeMediaUs(-1),
108      mAnchorTimeMediaUs(-1),
109      mAnchorNumFramesWritten(-1),
110      mVideoLateByUs(0ll),
111      mHasAudio(false),
112      mHasVideo(false),
113      mNotifyCompleteAudio(false),
114      mNotifyCompleteVideo(false),
115      mSyncQueues(false),
116      mPaused(false),
117      mPauseDrainAudioAllowedUs(0),
118      mVideoSampleReceived(false),
119      mVideoRenderingStarted(false),
120      mVideoRenderingStartGeneration(0),
121      mAudioRenderingStartGeneration(0),
122      mRenderingDataDelivered(false),
123      mNextAudioClockUpdateTimeUs(-1),
124      mLastAudioMediaTimeUs(-1),
125      mAudioOffloadPauseTimeoutGeneration(0),
126      mAudioTornDown(false),
127      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
128      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
129      mTotalBuffersQueued(0),
130      mLastAudioBufferDrained(0),
131      mUseAudioCallback(false),
132      mWakeLock(new AWakeLock()) {
133    mMediaClock = new MediaClock;
134    mPlaybackRate = mPlaybackSettings.mSpeed;
135    mMediaClock->setPlaybackRate(mPlaybackRate);
136}
137
138NuPlayer::Renderer::~Renderer() {
139    if (offloadingAudio()) {
140        mAudioSink->stop();
141        mAudioSink->flush();
142        mAudioSink->close();
143    }
144}
145
146void NuPlayer::Renderer::queueBuffer(
147        bool audio,
148        const sp<ABuffer> &buffer,
149        const sp<AMessage> &notifyConsumed) {
150    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
151    msg->setInt32("queueGeneration", getQueueGeneration(audio));
152    msg->setInt32("audio", static_cast<int32_t>(audio));
153    msg->setBuffer("buffer", buffer);
154    msg->setMessage("notifyConsumed", notifyConsumed);
155    msg->post();
156}
157
158void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
159    CHECK_NE(finalResult, (status_t)OK);
160
161    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
162    msg->setInt32("queueGeneration", getQueueGeneration(audio));
163    msg->setInt32("audio", static_cast<int32_t>(audio));
164    msg->setInt32("finalResult", finalResult);
165    msg->post();
166}
167
168status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
169    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
170    writeToAMessage(msg, rate);
171    sp<AMessage> response;
172    status_t err = msg->postAndAwaitResponse(&response);
173    if (err == OK && response != NULL) {
174        CHECK(response->findInt32("err", &err));
175    }
176    return err;
177}
178
179status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
180    if (rate.mSpeed == 0.f) {
181        onPause();
182        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
183        // have to correspond to the any non-0 speed (e.g old speed). Keep
184        // settings nonetheless, using the old speed, in case audiosink changes.
185        AudioPlaybackRate newRate = rate;
186        newRate.mSpeed = mPlaybackSettings.mSpeed;
187        mPlaybackSettings = newRate;
188        return OK;
189    }
190
191    if (mAudioSink != NULL && mAudioSink->ready()) {
192        status_t err = mAudioSink->setPlaybackRate(rate);
193        if (err != OK) {
194            return err;
195        }
196    }
197    mPlaybackSettings = rate;
198    mPlaybackRate = rate.mSpeed;
199    mMediaClock->setPlaybackRate(mPlaybackRate);
200    return OK;
201}
202
203status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
204    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
205    sp<AMessage> response;
206    status_t err = msg->postAndAwaitResponse(&response);
207    if (err == OK && response != NULL) {
208        CHECK(response->findInt32("err", &err));
209        if (err == OK) {
210            readFromAMessage(response, rate);
211        }
212    }
213    return err;
214}
215
216status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
217    if (mAudioSink != NULL && mAudioSink->ready()) {
218        status_t err = mAudioSink->getPlaybackRate(rate);
219        if (err == OK) {
220            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
221                ALOGW("correcting mismatch in internal/external playback rate");
222            }
223            // get playback settings used by audiosink, as it may be
224            // slightly off due to audiosink not taking small changes.
225            mPlaybackSettings = *rate;
226            if (mPaused) {
227                rate->mSpeed = 0.f;
228            }
229        }
230        return err;
231    }
232    *rate = mPlaybackSettings;
233    return OK;
234}
235
236status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
237    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
238    writeToAMessage(msg, sync, videoFpsHint);
239    sp<AMessage> response;
240    status_t err = msg->postAndAwaitResponse(&response);
241    if (err == OK && response != NULL) {
242        CHECK(response->findInt32("err", &err));
243    }
244    return err;
245}
246
247status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
248    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
249        return BAD_VALUE;
250    }
251    // TODO: support sync sources
252    return INVALID_OPERATION;
253}
254
255status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
256    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
257    sp<AMessage> response;
258    status_t err = msg->postAndAwaitResponse(&response);
259    if (err == OK && response != NULL) {
260        CHECK(response->findInt32("err", &err));
261        if (err == OK) {
262            readFromAMessage(response, sync, videoFps);
263        }
264    }
265    return err;
266}
267
268status_t NuPlayer::Renderer::onGetSyncSettings(
269        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
270    *sync = mSyncSettings;
271    *videoFps = -1.f;
272    return OK;
273}
274
275void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
276    {
277        Mutex::Autolock autoLock(mLock);
278        if (audio) {
279            mNotifyCompleteAudio |= notifyComplete;
280            clearAudioFirstAnchorTime_l();
281            ++mAudioQueueGeneration;
282            ++mAudioDrainGeneration;
283        } else {
284            mNotifyCompleteVideo |= notifyComplete;
285            ++mVideoQueueGeneration;
286            ++mVideoDrainGeneration;
287        }
288
289        clearAnchorTime_l();
290        mVideoLateByUs = 0;
291        mSyncQueues = false;
292    }
293
294    sp<AMessage> msg = new AMessage(kWhatFlush, this);
295    msg->setInt32("audio", static_cast<int32_t>(audio));
296    msg->post();
297}
298
299void NuPlayer::Renderer::signalTimeDiscontinuity() {
300}
301
302void NuPlayer::Renderer::signalDisableOffloadAudio() {
303    (new AMessage(kWhatDisableOffloadAudio, this))->post();
304}
305
306void NuPlayer::Renderer::signalEnableOffloadAudio() {
307    (new AMessage(kWhatEnableOffloadAudio, this))->post();
308}
309
310void NuPlayer::Renderer::pause() {
311    (new AMessage(kWhatPause, this))->post();
312}
313
314void NuPlayer::Renderer::resume() {
315    (new AMessage(kWhatResume, this))->post();
316}
317
318void NuPlayer::Renderer::setVideoFrameRate(float fps) {
319    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
320    msg->setFloat("frame-rate", fps);
321    msg->post();
322}
323
324// Called on any threads without mLock acquired.
325status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
326    status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
327    if (result == OK) {
328        return result;
329    }
330
331    // MediaClock has not started yet. Try to start it if possible.
332    {
333        Mutex::Autolock autoLock(mLock);
334        if (mAudioFirstAnchorTimeMediaUs == -1) {
335            return result;
336        }
337
338        AudioTimestamp ts;
339        status_t res = mAudioSink->getTimestamp(ts);
340        if (res != OK) {
341            return result;
342        }
343
344        // AudioSink has rendered some frames.
345        int64_t nowUs = ALooper::GetNowUs();
346        int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
347                + mAudioFirstAnchorTimeMediaUs;
348        mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
349    }
350
351    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
352}
353
354void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
355    mAudioFirstAnchorTimeMediaUs = -1;
356    mMediaClock->setStartingTimeMedia(-1);
357}
358
359void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
360    if (mAudioFirstAnchorTimeMediaUs == -1) {
361        mAudioFirstAnchorTimeMediaUs = mediaUs;
362        mMediaClock->setStartingTimeMedia(mediaUs);
363    }
364}
365
366void NuPlayer::Renderer::clearAnchorTime_l() {
367    mMediaClock->clearAnchor();
368    mAnchorTimeMediaUs = -1;
369    mAnchorNumFramesWritten = -1;
370}
371
372void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
373    Mutex::Autolock autoLock(mLock);
374    mVideoLateByUs = lateUs;
375}
376
377int64_t NuPlayer::Renderer::getVideoLateByUs() {
378    Mutex::Autolock autoLock(mLock);
379    return mVideoLateByUs;
380}
381
382status_t NuPlayer::Renderer::openAudioSink(
383        const sp<AMessage> &format,
384        bool offloadOnly,
385        bool hasVideo,
386        uint32_t flags,
387        bool *isOffloaded) {
388    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
389    msg->setMessage("format", format);
390    msg->setInt32("offload-only", offloadOnly);
391    msg->setInt32("has-video", hasVideo);
392    msg->setInt32("flags", flags);
393
394    sp<AMessage> response;
395    msg->postAndAwaitResponse(&response);
396
397    int32_t err;
398    if (!response->findInt32("err", &err)) {
399        err = INVALID_OPERATION;
400    } else if (err == OK && isOffloaded != NULL) {
401        int32_t offload;
402        CHECK(response->findInt32("offload", &offload));
403        *isOffloaded = (offload != 0);
404    }
405    return err;
406}
407
408void NuPlayer::Renderer::closeAudioSink() {
409    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
410
411    sp<AMessage> response;
412    msg->postAndAwaitResponse(&response);
413}
414
415void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
416    switch (msg->what()) {
417        case kWhatOpenAudioSink:
418        {
419            sp<AMessage> format;
420            CHECK(msg->findMessage("format", &format));
421
422            int32_t offloadOnly;
423            CHECK(msg->findInt32("offload-only", &offloadOnly));
424
425            int32_t hasVideo;
426            CHECK(msg->findInt32("has-video", &hasVideo));
427
428            uint32_t flags;
429            CHECK(msg->findInt32("flags", (int32_t *)&flags));
430
431            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
432
433            sp<AMessage> response = new AMessage;
434            response->setInt32("err", err);
435            response->setInt32("offload", offloadingAudio());
436
437            sp<AReplyToken> replyID;
438            CHECK(msg->senderAwaitsResponse(&replyID));
439            response->postReply(replyID);
440
441            break;
442        }
443
444        case kWhatCloseAudioSink:
445        {
446            sp<AReplyToken> replyID;
447            CHECK(msg->senderAwaitsResponse(&replyID));
448
449            onCloseAudioSink();
450
451            sp<AMessage> response = new AMessage;
452            response->postReply(replyID);
453            break;
454        }
455
456        case kWhatStopAudioSink:
457        {
458            mAudioSink->stop();
459            break;
460        }
461
462        case kWhatDrainAudioQueue:
463        {
464            mDrainAudioQueuePending = false;
465
466            int32_t generation;
467            CHECK(msg->findInt32("drainGeneration", &generation));
468            if (generation != getDrainGeneration(true /* audio */)) {
469                break;
470            }
471
472            if (onDrainAudioQueue()) {
473                uint32_t numFramesPlayed;
474                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
475                         (status_t)OK);
476
477                uint32_t numFramesPendingPlayout =
478                    mNumFramesWritten - numFramesPlayed;
479
480                // This is how long the audio sink will have data to
481                // play back.
482                int64_t delayUs =
483                    mAudioSink->msecsPerFrame()
484                        * numFramesPendingPlayout * 1000ll;
485                if (mPlaybackRate > 1.0f) {
486                    delayUs /= mPlaybackRate;
487                }
488
489                // Let's give it more data after about half that time
490                // has elapsed.
491                delayUs /= 2;
492                // check the buffer size to estimate maximum delay permitted.
493                const int64_t maxDrainDelayUs = std::max(
494                        mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
495                ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
496                        (long long)delayUs, (long long)maxDrainDelayUs);
497                Mutex::Autolock autoLock(mLock);
498                postDrainAudioQueue_l(delayUs);
499            }
500            break;
501        }
502
503        case kWhatDrainVideoQueue:
504        {
505            int32_t generation;
506            CHECK(msg->findInt32("drainGeneration", &generation));
507            if (generation != getDrainGeneration(false /* audio */)) {
508                break;
509            }
510
511            mDrainVideoQueuePending = false;
512
513            onDrainVideoQueue();
514
515            postDrainVideoQueue();
516            break;
517        }
518
519        case kWhatPostDrainVideoQueue:
520        {
521            int32_t generation;
522            CHECK(msg->findInt32("drainGeneration", &generation));
523            if (generation != getDrainGeneration(false /* audio */)) {
524                break;
525            }
526
527            mDrainVideoQueuePending = false;
528            postDrainVideoQueue();
529            break;
530        }
531
532        case kWhatQueueBuffer:
533        {
534            onQueueBuffer(msg);
535            break;
536        }
537
538        case kWhatQueueEOS:
539        {
540            onQueueEOS(msg);
541            break;
542        }
543
544        case kWhatEOS:
545        {
546            int32_t generation;
547            CHECK(msg->findInt32("audioEOSGeneration", &generation));
548            if (generation != mAudioEOSGeneration) {
549                break;
550            }
551            status_t finalResult;
552            CHECK(msg->findInt32("finalResult", &finalResult));
553            notifyEOS(true /* audio */, finalResult);
554            break;
555        }
556
557        case kWhatConfigPlayback:
558        {
559            sp<AReplyToken> replyID;
560            CHECK(msg->senderAwaitsResponse(&replyID));
561            AudioPlaybackRate rate;
562            readFromAMessage(msg, &rate);
563            status_t err = onConfigPlayback(rate);
564            sp<AMessage> response = new AMessage;
565            response->setInt32("err", err);
566            response->postReply(replyID);
567            break;
568        }
569
570        case kWhatGetPlaybackSettings:
571        {
572            sp<AReplyToken> replyID;
573            CHECK(msg->senderAwaitsResponse(&replyID));
574            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
575            status_t err = onGetPlaybackSettings(&rate);
576            sp<AMessage> response = new AMessage;
577            if (err == OK) {
578                writeToAMessage(response, rate);
579            }
580            response->setInt32("err", err);
581            response->postReply(replyID);
582            break;
583        }
584
585        case kWhatConfigSync:
586        {
587            sp<AReplyToken> replyID;
588            CHECK(msg->senderAwaitsResponse(&replyID));
589            AVSyncSettings sync;
590            float videoFpsHint;
591            readFromAMessage(msg, &sync, &videoFpsHint);
592            status_t err = onConfigSync(sync, videoFpsHint);
593            sp<AMessage> response = new AMessage;
594            response->setInt32("err", err);
595            response->postReply(replyID);
596            break;
597        }
598
599        case kWhatGetSyncSettings:
600        {
601            sp<AReplyToken> replyID;
602            CHECK(msg->senderAwaitsResponse(&replyID));
603
604            ALOGV("kWhatGetSyncSettings");
605            AVSyncSettings sync;
606            float videoFps = -1.f;
607            status_t err = onGetSyncSettings(&sync, &videoFps);
608            sp<AMessage> response = new AMessage;
609            if (err == OK) {
610                writeToAMessage(response, sync, videoFps);
611            }
612            response->setInt32("err", err);
613            response->postReply(replyID);
614            break;
615        }
616
617        case kWhatFlush:
618        {
619            onFlush(msg);
620            break;
621        }
622
623        case kWhatDisableOffloadAudio:
624        {
625            onDisableOffloadAudio();
626            break;
627        }
628
629        case kWhatEnableOffloadAudio:
630        {
631            onEnableOffloadAudio();
632            break;
633        }
634
635        case kWhatPause:
636        {
637            onPause();
638            break;
639        }
640
641        case kWhatResume:
642        {
643            onResume();
644            break;
645        }
646
647        case kWhatSetVideoFrameRate:
648        {
649            float fps;
650            CHECK(msg->findFloat("frame-rate", &fps));
651            onSetVideoFrameRate(fps);
652            break;
653        }
654
655        case kWhatAudioTearDown:
656        {
657            int32_t reason;
658            CHECK(msg->findInt32("reason", &reason));
659
660            onAudioTearDown((AudioTearDownReason)reason);
661            break;
662        }
663
664        case kWhatAudioOffloadPauseTimeout:
665        {
666            int32_t generation;
667            CHECK(msg->findInt32("drainGeneration", &generation));
668            if (generation != mAudioOffloadPauseTimeoutGeneration) {
669                break;
670            }
671            ALOGV("Audio Offload tear down due to pause timeout.");
672            onAudioTearDown(kDueToTimeout);
673            mWakeLock->release();
674            break;
675        }
676
677        default:
678            TRESPASS();
679            break;
680    }
681}
682
683void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
684    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
685        return;
686    }
687
688    if (mAudioQueue.empty()) {
689        return;
690    }
691
692    // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
693    if (mPaused) {
694        const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
695        if (diffUs > delayUs) {
696            delayUs = diffUs;
697        }
698    }
699
700    mDrainAudioQueuePending = true;
701    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
702    msg->setInt32("drainGeneration", mAudioDrainGeneration);
703    msg->post(delayUs);
704}
705
706void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
707    mAudioRenderingStartGeneration = mAudioDrainGeneration;
708    mVideoRenderingStartGeneration = mVideoDrainGeneration;
709    mRenderingDataDelivered = false;
710}
711
712void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
713    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
714        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
715        mRenderingDataDelivered = true;
716        if (mPaused) {
717            return;
718        }
719        mVideoRenderingStartGeneration = -1;
720        mAudioRenderingStartGeneration = -1;
721
722        sp<AMessage> notify = mNotify->dup();
723        notify->setInt32("what", kWhatMediaRenderingStart);
724        notify->post();
725    }
726}
727
728// static
729size_t NuPlayer::Renderer::AudioSinkCallback(
730        MediaPlayerBase::AudioSink * /* audioSink */,
731        void *buffer,
732        size_t size,
733        void *cookie,
734        MediaPlayerBase::AudioSink::cb_event_t event) {
735    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
736
737    switch (event) {
738        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
739        {
740            return me->fillAudioBuffer(buffer, size);
741            break;
742        }
743
744        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
745        {
746            ALOGV("AudioSink::CB_EVENT_STREAM_END");
747            me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
748            break;
749        }
750
751        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
752        {
753            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
754            me->notifyAudioTearDown(kDueToError);
755            break;
756        }
757    }
758
759    return 0;
760}
761
762size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
763    Mutex::Autolock autoLock(mLock);
764
765    if (!mUseAudioCallback) {
766        return 0;
767    }
768
769    bool hasEOS = false;
770
771    size_t sizeCopied = 0;
772    bool firstEntry = true;
773    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
774    while (sizeCopied < size && !mAudioQueue.empty()) {
775        entry = &*mAudioQueue.begin();
776
777        if (entry->mBuffer == NULL) { // EOS
778            hasEOS = true;
779            mAudioQueue.erase(mAudioQueue.begin());
780            break;
781        }
782
783        if (firstEntry && entry->mOffset == 0) {
784            firstEntry = false;
785            int64_t mediaTimeUs;
786            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
787            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
788            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
789        }
790
791        size_t copy = entry->mBuffer->size() - entry->mOffset;
792        size_t sizeRemaining = size - sizeCopied;
793        if (copy > sizeRemaining) {
794            copy = sizeRemaining;
795        }
796
797        memcpy((char *)buffer + sizeCopied,
798               entry->mBuffer->data() + entry->mOffset,
799               copy);
800
801        entry->mOffset += copy;
802        if (entry->mOffset == entry->mBuffer->size()) {
803            entry->mNotifyConsumed->post();
804            mAudioQueue.erase(mAudioQueue.begin());
805            entry = NULL;
806        }
807        sizeCopied += copy;
808
809        notifyIfMediaRenderingStarted_l();
810    }
811
812    if (mAudioFirstAnchorTimeMediaUs >= 0) {
813        int64_t nowUs = ALooper::GetNowUs();
814        int64_t nowMediaUs =
815            mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
816        // we don't know how much data we are queueing for offloaded tracks.
817        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
818    }
819
820    // for non-offloaded audio, we need to compute the frames written because
821    // there is no EVENT_STREAM_END notification. The frames written gives
822    // an estimate on the pending played out duration.
823    if (!offloadingAudio()) {
824        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
825    }
826
827    if (hasEOS) {
828        (new AMessage(kWhatStopAudioSink, this))->post();
829        // As there is currently no EVENT_STREAM_END callback notification for
830        // non-offloaded audio tracks, we need to post the EOS ourselves.
831        if (!offloadingAudio()) {
832            int64_t postEOSDelayUs = 0;
833            if (mAudioSink->needsTrailingPadding()) {
834                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
835            }
836            ALOGV("fillAudioBuffer: notifyEOS "
837                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
838                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
839            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
840        }
841    }
842    return sizeCopied;
843}
844
845void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
846    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
847    bool foundEOS = false;
848    while (it != mAudioQueue.end()) {
849        int32_t eos;
850        QueueEntry *entry = &*it++;
851        if (entry->mBuffer == NULL
852                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
853            itEOS = it;
854            foundEOS = true;
855        }
856    }
857
858    if (foundEOS) {
859        // post all replies before EOS and drop the samples
860        for (it = mAudioQueue.begin(); it != itEOS; it++) {
861            if (it->mBuffer == NULL) {
862                // delay doesn't matter as we don't even have an AudioTrack
863                notifyEOS(true /* audio */, it->mFinalResult);
864            } else {
865                it->mNotifyConsumed->post();
866            }
867        }
868        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
869    }
870}
871
872bool NuPlayer::Renderer::onDrainAudioQueue() {
873    // do not drain audio during teardown as queued buffers may be invalid.
874    if (mAudioTornDown) {
875        return false;
876    }
877    // TODO: This call to getPosition checks if AudioTrack has been created
878    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
879    // CHECKs on getPosition will fail.
880    // We still need to figure out why AudioTrack is not created when
881    // this function is called. One possible reason could be leftover
882    // audio. Another possible place is to check whether decoder
883    // has received INFO_FORMAT_CHANGED as the first buffer since
884    // AudioSink is opened there, and possible interactions with flush
885    // immediately after start. Investigate error message
886    // "vorbis_dsp_synthesis returned -135", along with RTSP.
887    uint32_t numFramesPlayed;
888    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
889        // When getPosition fails, renderer will not reschedule the draining
890        // unless new samples are queued.
891        // If we have pending EOS (or "eos" marker for discontinuities), we need
892        // to post these now as NuPlayerDecoder might be waiting for it.
893        drainAudioQueueUntilLastEOS();
894
895        ALOGW("onDrainAudioQueue(): audio sink is not ready");
896        return false;
897    }
898
899#if 0
900    ssize_t numFramesAvailableToWrite =
901        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
902
903    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
904        ALOGI("audio sink underrun");
905    } else {
906        ALOGV("audio queue has %d frames left to play",
907             mAudioSink->frameCount() - numFramesAvailableToWrite);
908    }
909#endif
910
911    uint32_t prevFramesWritten = mNumFramesWritten;
912    while (!mAudioQueue.empty()) {
913        QueueEntry *entry = &*mAudioQueue.begin();
914
915        mLastAudioBufferDrained = entry->mBufferOrdinal;
916
917        if (entry->mBuffer == NULL) {
918            // EOS
919            int64_t postEOSDelayUs = 0;
920            if (mAudioSink->needsTrailingPadding()) {
921                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
922            }
923            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
924            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
925
926            mAudioQueue.erase(mAudioQueue.begin());
927            entry = NULL;
928            if (mAudioSink->needsTrailingPadding()) {
929                // If we're not in gapless playback (i.e. through setNextPlayer), we
930                // need to stop the track here, because that will play out the last
931                // little bit at the end of the file. Otherwise short files won't play.
932                mAudioSink->stop();
933                mNumFramesWritten = 0;
934            }
935            return false;
936        }
937
938        // ignore 0-sized buffer which could be EOS marker with no data
939        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
940            int64_t mediaTimeUs;
941            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
942            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
943                    mediaTimeUs / 1E6);
944            onNewAudioMediaTime(mediaTimeUs);
945        }
946
947        size_t copy = entry->mBuffer->size() - entry->mOffset;
948
949        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
950                                            copy, false /* blocking */);
951        if (written < 0) {
952            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
953            if (written == WOULD_BLOCK) {
954                ALOGV("AudioSink write would block when writing %zu bytes", copy);
955            } else {
956                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
957                // This can only happen when AudioSink was opened with doNotReconnect flag set to
958                // true, in which case the NuPlayer will handle the reconnect.
959                notifyAudioTearDown(kDueToError);
960            }
961            break;
962        }
963
964        entry->mOffset += written;
965        if (entry->mOffset == entry->mBuffer->size()) {
966            entry->mNotifyConsumed->post();
967            mAudioQueue.erase(mAudioQueue.begin());
968
969            entry = NULL;
970        }
971
972        size_t copiedFrames = written / mAudioSink->frameSize();
973        mNumFramesWritten += copiedFrames;
974
975        {
976            Mutex::Autolock autoLock(mLock);
977            int64_t maxTimeMedia;
978            maxTimeMedia =
979                mAnchorTimeMediaUs +
980                        (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
981                                * 1000LL * mAudioSink->msecsPerFrame());
982            mMediaClock->updateMaxTimeMedia(maxTimeMedia);
983
984            notifyIfMediaRenderingStarted_l();
985        }
986
987        if (written != (ssize_t)copy) {
988            // A short count was received from AudioSink::write()
989            //
990            // AudioSink write is called in non-blocking mode.
991            // It may return with a short count when:
992            //
993            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
994            // 2) The data to be copied exceeds the available buffer in AudioSink.
995            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
996            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
997
998            // (Case 1)
999            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1000            // needs to fail, as we should not carry over fractional frames between calls.
1001            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
1002
1003            // (Case 2, 3, 4)
1004            // Return early to the caller.
1005            // Beware of calling immediately again as this may busy-loop if you are not careful.
1006            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1007            break;
1008        }
1009    }
1010
1011    // calculate whether we need to reschedule another write.
1012    bool reschedule = !mAudioQueue.empty()
1013            && (!mPaused
1014                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1015    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1016    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1017    return reschedule;
1018}
1019
1020int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1021    int32_t sampleRate = offloadingAudio() ?
1022            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1023    if (sampleRate == 0) {
1024        ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1025        return 0;
1026    }
1027    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1028    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1029}
1030
1031// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1032int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1033    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1034    if (mUseVirtualAudioSink) {
1035        int64_t nowUs = ALooper::GetNowUs();
1036        int64_t mediaUs;
1037        if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1038            return 0ll;
1039        } else {
1040            return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1041        }
1042    }
1043    return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs);
1044}
1045
1046int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1047    int64_t realUs;
1048    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1049        // If failed to get current position, e.g. due to audio clock is
1050        // not ready, then just play out video immediately without delay.
1051        return nowUs;
1052    }
1053    return realUs;
1054}
1055
1056void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1057    Mutex::Autolock autoLock(mLock);
1058    // TRICKY: vorbis decoder generates multiple frames with the same
1059    // timestamp, so only update on the first frame with a given timestamp
1060    if (mediaTimeUs == mAnchorTimeMediaUs) {
1061        return;
1062    }
1063    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1064
1065    // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1066    if (mNextAudioClockUpdateTimeUs == -1) {
1067        AudioTimestamp ts;
1068        if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1069            mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1070        }
1071    }
1072    int64_t nowUs = ALooper::GetNowUs();
1073    if (mNextAudioClockUpdateTimeUs >= 0) {
1074        if (nowUs >= mNextAudioClockUpdateTimeUs) {
1075            int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1076            mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1077            mUseVirtualAudioSink = false;
1078            mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1079        }
1080    } else {
1081        int64_t unused;
1082        if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1083                && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1084                        > kMaxAllowedAudioSinkDelayUs)) {
1085            // Enough data has been sent to AudioSink, but AudioSink has not rendered
1086            // any data yet. Something is wrong with AudioSink, e.g., the device is not
1087            // connected to audio out.
1088            // Switch to system clock. This essentially creates a virtual AudioSink with
1089            // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1090            // This virtual AudioSink renders audio data starting from the very first sample
1091            // and it's paced by system clock.
1092            ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1093            mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1094            mUseVirtualAudioSink = true;
1095        }
1096    }
1097    mAnchorNumFramesWritten = mNumFramesWritten;
1098    mAnchorTimeMediaUs = mediaTimeUs;
1099}
1100
1101// Called without mLock acquired.
1102void NuPlayer::Renderer::postDrainVideoQueue() {
1103    if (mDrainVideoQueuePending
1104            || getSyncQueues()
1105            || (mPaused && mVideoSampleReceived)) {
1106        return;
1107    }
1108
1109    if (mVideoQueue.empty()) {
1110        return;
1111    }
1112
1113    QueueEntry &entry = *mVideoQueue.begin();
1114
1115    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1116    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1117
1118    if (entry.mBuffer == NULL) {
1119        // EOS doesn't carry a timestamp.
1120        msg->post();
1121        mDrainVideoQueuePending = true;
1122        return;
1123    }
1124
1125    bool needRepostDrainVideoQueue = false;
1126    int64_t delayUs;
1127    int64_t nowUs = ALooper::GetNowUs();
1128    int64_t realTimeUs;
1129    if (mFlags & FLAG_REAL_TIME) {
1130        int64_t mediaTimeUs;
1131        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1132        realTimeUs = mediaTimeUs;
1133    } else {
1134        int64_t mediaTimeUs;
1135        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1136
1137        {
1138            Mutex::Autolock autoLock(mLock);
1139            if (mAnchorTimeMediaUs < 0) {
1140                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1141                mAnchorTimeMediaUs = mediaTimeUs;
1142                realTimeUs = nowUs;
1143            } else if (!mVideoSampleReceived) {
1144                // Always render the first video frame.
1145                realTimeUs = nowUs;
1146            } else if (mAudioFirstAnchorTimeMediaUs < 0
1147                || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
1148                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1149            } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
1150                needRepostDrainVideoQueue = true;
1151                realTimeUs = nowUs;
1152            } else {
1153                realTimeUs = nowUs;
1154            }
1155        }
1156        if (!mHasAudio) {
1157            // smooth out videos >= 10fps
1158            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1159        }
1160
1161        // Heuristics to handle situation when media time changed without a
1162        // discontinuity. If we have not drained an audio buffer that was
1163        // received after this buffer, repost in 10 msec. Otherwise repost
1164        // in 500 msec.
1165        delayUs = realTimeUs - nowUs;
1166        int64_t postDelayUs = -1;
1167        if (delayUs > 500000) {
1168            postDelayUs = 500000;
1169            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1170                postDelayUs = 10000;
1171            }
1172        } else if (needRepostDrainVideoQueue) {
1173            // CHECK(mPlaybackRate > 0);
1174            // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
1175            // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
1176            postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
1177            postDelayUs /= mPlaybackRate;
1178        }
1179
1180        if (postDelayUs >= 0) {
1181            msg->setWhat(kWhatPostDrainVideoQueue);
1182            msg->post(postDelayUs);
1183            mVideoScheduler->restart();
1184            ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms",
1185                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1186            mDrainVideoQueuePending = true;
1187            return;
1188        }
1189    }
1190
1191    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1192    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1193
1194    delayUs = realTimeUs - nowUs;
1195
1196    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1197    // post 2 display refreshes before rendering is due
1198    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1199
1200    mDrainVideoQueuePending = true;
1201}
1202
1203void NuPlayer::Renderer::onDrainVideoQueue() {
1204    if (mVideoQueue.empty()) {
1205        return;
1206    }
1207
1208    QueueEntry *entry = &*mVideoQueue.begin();
1209
1210    if (entry->mBuffer == NULL) {
1211        // EOS
1212
1213        notifyEOS(false /* audio */, entry->mFinalResult);
1214
1215        mVideoQueue.erase(mVideoQueue.begin());
1216        entry = NULL;
1217
1218        setVideoLateByUs(0);
1219        return;
1220    }
1221
1222    int64_t nowUs = ALooper::GetNowUs();
1223    int64_t realTimeUs;
1224    int64_t mediaTimeUs = -1;
1225    if (mFlags & FLAG_REAL_TIME) {
1226        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1227    } else {
1228        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1229
1230        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1231    }
1232
1233    bool tooLate = false;
1234
1235    if (!mPaused) {
1236        setVideoLateByUs(nowUs - realTimeUs);
1237        tooLate = (mVideoLateByUs > 40000);
1238
1239        if (tooLate) {
1240            ALOGV("video late by %lld us (%.2f secs)",
1241                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1242        } else {
1243            int64_t mediaUs = 0;
1244            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1245            ALOGV("rendering video at media time %.2f secs",
1246                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1247                    mediaUs) / 1E6);
1248
1249            if (!(mFlags & FLAG_REAL_TIME)
1250                    && mLastAudioMediaTimeUs != -1
1251                    && mediaTimeUs > mLastAudioMediaTimeUs) {
1252                // If audio ends before video, video continues to drive media clock.
1253                // Also smooth out videos >= 10fps.
1254                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1255            }
1256        }
1257    } else {
1258        setVideoLateByUs(0);
1259        if (!mVideoSampleReceived && !mHasAudio) {
1260            // This will ensure that the first frame after a flush won't be used as anchor
1261            // when renderer is in paused state, because resume can happen any time after seek.
1262            Mutex::Autolock autoLock(mLock);
1263            clearAnchorTime_l();
1264        }
1265    }
1266
1267    // Always render the first video frame while keeping stats on A/V sync.
1268    if (!mVideoSampleReceived) {
1269        realTimeUs = nowUs;
1270        tooLate = false;
1271    }
1272
1273    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1274    entry->mNotifyConsumed->setInt32("render", !tooLate);
1275    entry->mNotifyConsumed->post();
1276    mVideoQueue.erase(mVideoQueue.begin());
1277    entry = NULL;
1278
1279    mVideoSampleReceived = true;
1280
1281    if (!mPaused) {
1282        if (!mVideoRenderingStarted) {
1283            mVideoRenderingStarted = true;
1284            notifyVideoRenderingStart();
1285        }
1286        Mutex::Autolock autoLock(mLock);
1287        notifyIfMediaRenderingStarted_l();
1288    }
1289}
1290
1291void NuPlayer::Renderer::notifyVideoRenderingStart() {
1292    sp<AMessage> notify = mNotify->dup();
1293    notify->setInt32("what", kWhatVideoRenderingStart);
1294    notify->post();
1295}
1296
1297void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1298    if (audio && delayUs > 0) {
1299        sp<AMessage> msg = new AMessage(kWhatEOS, this);
1300        msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1301        msg->setInt32("finalResult", finalResult);
1302        msg->post(delayUs);
1303        return;
1304    }
1305    sp<AMessage> notify = mNotify->dup();
1306    notify->setInt32("what", kWhatEOS);
1307    notify->setInt32("audio", static_cast<int32_t>(audio));
1308    notify->setInt32("finalResult", finalResult);
1309    notify->post(delayUs);
1310}
1311
1312void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1313    sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1314    msg->setInt32("reason", reason);
1315    msg->post();
1316}
1317
1318void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1319    int32_t audio;
1320    CHECK(msg->findInt32("audio", &audio));
1321
1322    if (dropBufferIfStale(audio, msg)) {
1323        return;
1324    }
1325
1326    if (audio) {
1327        mHasAudio = true;
1328    } else {
1329        mHasVideo = true;
1330    }
1331
1332    if (mHasVideo) {
1333        if (mVideoScheduler == NULL) {
1334            mVideoScheduler = new VideoFrameScheduler();
1335            mVideoScheduler->init();
1336        }
1337    }
1338
1339    sp<ABuffer> buffer;
1340    CHECK(msg->findBuffer("buffer", &buffer));
1341
1342    sp<AMessage> notifyConsumed;
1343    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1344
1345    QueueEntry entry;
1346    entry.mBuffer = buffer;
1347    entry.mNotifyConsumed = notifyConsumed;
1348    entry.mOffset = 0;
1349    entry.mFinalResult = OK;
1350    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1351
1352    if (audio) {
1353        Mutex::Autolock autoLock(mLock);
1354        mAudioQueue.push_back(entry);
1355        postDrainAudioQueue_l();
1356    } else {
1357        mVideoQueue.push_back(entry);
1358        postDrainVideoQueue();
1359    }
1360
1361    Mutex::Autolock autoLock(mLock);
1362    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1363        return;
1364    }
1365
1366    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1367    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1368
1369    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1370        // EOS signalled on either queue.
1371        syncQueuesDone_l();
1372        return;
1373    }
1374
1375    int64_t firstAudioTimeUs;
1376    int64_t firstVideoTimeUs;
1377    CHECK(firstAudioBuffer->meta()
1378            ->findInt64("timeUs", &firstAudioTimeUs));
1379    CHECK(firstVideoBuffer->meta()
1380            ->findInt64("timeUs", &firstVideoTimeUs));
1381
1382    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1383
1384    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1385
1386    if (diff > 100000ll) {
1387        // Audio data starts More than 0.1 secs before video.
1388        // Drop some audio.
1389
1390        (*mAudioQueue.begin()).mNotifyConsumed->post();
1391        mAudioQueue.erase(mAudioQueue.begin());
1392        return;
1393    }
1394
1395    syncQueuesDone_l();
1396}
1397
1398void NuPlayer::Renderer::syncQueuesDone_l() {
1399    if (!mSyncQueues) {
1400        return;
1401    }
1402
1403    mSyncQueues = false;
1404
1405    if (!mAudioQueue.empty()) {
1406        postDrainAudioQueue_l();
1407    }
1408
1409    if (!mVideoQueue.empty()) {
1410        mLock.unlock();
1411        postDrainVideoQueue();
1412        mLock.lock();
1413    }
1414}
1415
1416void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1417    int32_t audio;
1418    CHECK(msg->findInt32("audio", &audio));
1419
1420    if (dropBufferIfStale(audio, msg)) {
1421        return;
1422    }
1423
1424    int32_t finalResult;
1425    CHECK(msg->findInt32("finalResult", &finalResult));
1426
1427    QueueEntry entry;
1428    entry.mOffset = 0;
1429    entry.mFinalResult = finalResult;
1430
1431    if (audio) {
1432        Mutex::Autolock autoLock(mLock);
1433        if (mAudioQueue.empty() && mSyncQueues) {
1434            syncQueuesDone_l();
1435        }
1436        mAudioQueue.push_back(entry);
1437        postDrainAudioQueue_l();
1438    } else {
1439        if (mVideoQueue.empty() && getSyncQueues()) {
1440            Mutex::Autolock autoLock(mLock);
1441            syncQueuesDone_l();
1442        }
1443        mVideoQueue.push_back(entry);
1444        postDrainVideoQueue();
1445    }
1446}
1447
1448void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1449    int32_t audio, notifyComplete;
1450    CHECK(msg->findInt32("audio", &audio));
1451
1452    {
1453        Mutex::Autolock autoLock(mLock);
1454        if (audio) {
1455            notifyComplete = mNotifyCompleteAudio;
1456            mNotifyCompleteAudio = false;
1457            mLastAudioMediaTimeUs = -1;
1458        } else {
1459            notifyComplete = mNotifyCompleteVideo;
1460            mNotifyCompleteVideo = false;
1461        }
1462
1463        // If we're currently syncing the queues, i.e. dropping audio while
1464        // aligning the first audio/video buffer times and only one of the
1465        // two queues has data, we may starve that queue by not requesting
1466        // more buffers from the decoder. If the other source then encounters
1467        // a discontinuity that leads to flushing, we'll never find the
1468        // corresponding discontinuity on the other queue.
1469        // Therefore we'll stop syncing the queues if at least one of them
1470        // is flushed.
1471        syncQueuesDone_l();
1472        clearAnchorTime_l();
1473    }
1474
1475    ALOGV("flushing %s", audio ? "audio" : "video");
1476    if (audio) {
1477        {
1478            Mutex::Autolock autoLock(mLock);
1479            flushQueue(&mAudioQueue);
1480
1481            ++mAudioDrainGeneration;
1482            ++mAudioEOSGeneration;
1483            prepareForMediaRenderingStart_l();
1484
1485            // the frame count will be reset after flush.
1486            clearAudioFirstAnchorTime_l();
1487        }
1488
1489        mDrainAudioQueuePending = false;
1490
1491        if (offloadingAudio()) {
1492            mAudioSink->pause();
1493            mAudioSink->flush();
1494            if (!mPaused) {
1495                mAudioSink->start();
1496            }
1497        } else {
1498            mAudioSink->pause();
1499            mAudioSink->flush();
1500            // Call stop() to signal to the AudioSink to completely fill the
1501            // internal buffer before resuming playback.
1502            // FIXME: this is ignored after flush().
1503            mAudioSink->stop();
1504            if (mPaused) {
1505                // Race condition: if renderer is paused and audio sink is stopped,
1506                // we need to make sure that the audio track buffer fully drains
1507                // before delivering data.
1508                // FIXME: remove this if we can detect if stop() is complete.
1509                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1510                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1511            } else {
1512                mAudioSink->start();
1513            }
1514            mNumFramesWritten = 0;
1515        }
1516        mNextAudioClockUpdateTimeUs = -1;
1517    } else {
1518        flushQueue(&mVideoQueue);
1519
1520        mDrainVideoQueuePending = false;
1521
1522        if (mVideoScheduler != NULL) {
1523            mVideoScheduler->restart();
1524        }
1525
1526        Mutex::Autolock autoLock(mLock);
1527        ++mVideoDrainGeneration;
1528        prepareForMediaRenderingStart_l();
1529    }
1530
1531    mVideoSampleReceived = false;
1532
1533    if (notifyComplete) {
1534        notifyFlushComplete(audio);
1535    }
1536}
1537
1538void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1539    while (!queue->empty()) {
1540        QueueEntry *entry = &*queue->begin();
1541
1542        if (entry->mBuffer != NULL) {
1543            entry->mNotifyConsumed->post();
1544        }
1545
1546        queue->erase(queue->begin());
1547        entry = NULL;
1548    }
1549}
1550
1551void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1552    sp<AMessage> notify = mNotify->dup();
1553    notify->setInt32("what", kWhatFlushComplete);
1554    notify->setInt32("audio", static_cast<int32_t>(audio));
1555    notify->post();
1556}
1557
1558bool NuPlayer::Renderer::dropBufferIfStale(
1559        bool audio, const sp<AMessage> &msg) {
1560    int32_t queueGeneration;
1561    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1562
1563    if (queueGeneration == getQueueGeneration(audio)) {
1564        return false;
1565    }
1566
1567    sp<AMessage> notifyConsumed;
1568    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1569        notifyConsumed->post();
1570    }
1571
1572    return true;
1573}
1574
1575void NuPlayer::Renderer::onAudioSinkChanged() {
1576    if (offloadingAudio()) {
1577        return;
1578    }
1579    CHECK(!mDrainAudioQueuePending);
1580    mNumFramesWritten = 0;
1581    {
1582        Mutex::Autolock autoLock(mLock);
1583        mAnchorNumFramesWritten = -1;
1584    }
1585    uint32_t written;
1586    if (mAudioSink->getFramesWritten(&written) == OK) {
1587        mNumFramesWritten = written;
1588    }
1589}
1590
1591void NuPlayer::Renderer::onDisableOffloadAudio() {
1592    Mutex::Autolock autoLock(mLock);
1593    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1594    ++mAudioDrainGeneration;
1595    if (mAudioRenderingStartGeneration != -1) {
1596        prepareForMediaRenderingStart_l();
1597    }
1598}
1599
1600void NuPlayer::Renderer::onEnableOffloadAudio() {
1601    Mutex::Autolock autoLock(mLock);
1602    mFlags |= FLAG_OFFLOAD_AUDIO;
1603    ++mAudioDrainGeneration;
1604    if (mAudioRenderingStartGeneration != -1) {
1605        prepareForMediaRenderingStart_l();
1606    }
1607}
1608
1609void NuPlayer::Renderer::onPause() {
1610    if (mPaused) {
1611        return;
1612    }
1613
1614    {
1615        Mutex::Autolock autoLock(mLock);
1616        // we do not increment audio drain generation so that we fill audio buffer during pause.
1617        ++mVideoDrainGeneration;
1618        prepareForMediaRenderingStart_l();
1619        mPaused = true;
1620        mMediaClock->setPlaybackRate(0.0);
1621    }
1622
1623    mDrainAudioQueuePending = false;
1624    mDrainVideoQueuePending = false;
1625
1626    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1627    mAudioSink->pause();
1628    startAudioOffloadPauseTimeout();
1629
1630    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1631          mAudioQueue.size(), mVideoQueue.size());
1632}
1633
1634void NuPlayer::Renderer::onResume() {
1635    if (!mPaused) {
1636        return;
1637    }
1638
1639    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1640    cancelAudioOffloadPauseTimeout();
1641    if (mAudioSink->ready()) {
1642        status_t err = mAudioSink->start();
1643        if (err != OK) {
1644            ALOGE("cannot start AudioSink err %d", err);
1645            notifyAudioTearDown(kDueToError);
1646        }
1647    }
1648
1649    {
1650        Mutex::Autolock autoLock(mLock);
1651        mPaused = false;
1652        // rendering started message may have been delayed if we were paused.
1653        if (mRenderingDataDelivered) {
1654            notifyIfMediaRenderingStarted_l();
1655        }
1656        // configure audiosink as we did not do it when pausing
1657        if (mAudioSink != NULL && mAudioSink->ready()) {
1658            mAudioSink->setPlaybackRate(mPlaybackSettings);
1659        }
1660
1661        mMediaClock->setPlaybackRate(mPlaybackRate);
1662
1663        if (!mAudioQueue.empty()) {
1664            postDrainAudioQueue_l();
1665        }
1666    }
1667
1668    if (!mVideoQueue.empty()) {
1669        postDrainVideoQueue();
1670    }
1671}
1672
1673void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1674    if (mVideoScheduler == NULL) {
1675        mVideoScheduler = new VideoFrameScheduler();
1676    }
1677    mVideoScheduler->init(fps);
1678}
1679
1680int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1681    Mutex::Autolock autoLock(mLock);
1682    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1683}
1684
1685int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1686    Mutex::Autolock autoLock(mLock);
1687    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1688}
1689
1690bool NuPlayer::Renderer::getSyncQueues() {
1691    Mutex::Autolock autoLock(mLock);
1692    return mSyncQueues;
1693}
1694
1695void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1696    if (mAudioTornDown) {
1697        return;
1698    }
1699    mAudioTornDown = true;
1700
1701    int64_t currentPositionUs;
1702    sp<AMessage> notify = mNotify->dup();
1703    if (getCurrentPosition(&currentPositionUs) == OK) {
1704        notify->setInt64("positionUs", currentPositionUs);
1705    }
1706
1707    mAudioSink->stop();
1708    mAudioSink->flush();
1709
1710    notify->setInt32("what", kWhatAudioTearDown);
1711    notify->setInt32("reason", reason);
1712    notify->post();
1713}
1714
1715void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1716    if (offloadingAudio()) {
1717        mWakeLock->acquire();
1718        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1719        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1720        msg->post(kOffloadPauseMaxUs);
1721    }
1722}
1723
1724void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1725    // We may have called startAudioOffloadPauseTimeout() without
1726    // the AudioSink open and with offloadingAudio enabled.
1727    //
1728    // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1729    // we always release the wakelock and increment the pause timeout generation.
1730    //
1731    // Note: The acquired wakelock prevents the device from suspending
1732    // immediately after offload pause (in case a resume happens shortly thereafter).
1733    mWakeLock->release(true);
1734    ++mAudioOffloadPauseTimeoutGeneration;
1735}
1736
1737status_t NuPlayer::Renderer::onOpenAudioSink(
1738        const sp<AMessage> &format,
1739        bool offloadOnly,
1740        bool hasVideo,
1741        uint32_t flags) {
1742    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1743            offloadOnly, offloadingAudio());
1744    bool audioSinkChanged = false;
1745
1746    int32_t numChannels;
1747    CHECK(format->findInt32("channel-count", &numChannels));
1748
1749    int32_t channelMask;
1750    if (!format->findInt32("channel-mask", &channelMask)) {
1751        // signal to the AudioSink to derive the mask from count.
1752        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1753    }
1754
1755    int32_t sampleRate;
1756    CHECK(format->findInt32("sample-rate", &sampleRate));
1757
1758    if (offloadingAudio()) {
1759        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1760        AString mime;
1761        CHECK(format->findString("mime", &mime));
1762        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1763
1764        if (err != OK) {
1765            ALOGE("Couldn't map mime \"%s\" to a valid "
1766                    "audio_format", mime.c_str());
1767            onDisableOffloadAudio();
1768        } else {
1769            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1770                    mime.c_str(), audioFormat);
1771
1772            int avgBitRate = -1;
1773            format->findInt32("bitrate", &avgBitRate);
1774
1775            int32_t aacProfile = -1;
1776            if (audioFormat == AUDIO_FORMAT_AAC
1777                    && format->findInt32("aac-profile", &aacProfile)) {
1778                // Redefine AAC format as per aac profile
1779                mapAACProfileToAudioFormat(
1780                        audioFormat,
1781                        aacProfile);
1782            }
1783
1784            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1785            offloadInfo.duration_us = -1;
1786            format->findInt64(
1787                    "durationUs", &offloadInfo.duration_us);
1788            offloadInfo.sample_rate = sampleRate;
1789            offloadInfo.channel_mask = channelMask;
1790            offloadInfo.format = audioFormat;
1791            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1792            offloadInfo.bit_rate = avgBitRate;
1793            offloadInfo.has_video = hasVideo;
1794            offloadInfo.is_streaming = true;
1795
1796            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1797                ALOGV("openAudioSink: no change in offload mode");
1798                // no change from previous configuration, everything ok.
1799                return OK;
1800            }
1801            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1802
1803            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1804            uint32_t offloadFlags = flags;
1805            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1806            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1807            audioSinkChanged = true;
1808            mAudioSink->close();
1809
1810            err = mAudioSink->open(
1811                    sampleRate,
1812                    numChannels,
1813                    (audio_channel_mask_t)channelMask,
1814                    audioFormat,
1815                    0 /* bufferCount - unused */,
1816                    &NuPlayer::Renderer::AudioSinkCallback,
1817                    this,
1818                    (audio_output_flags_t)offloadFlags,
1819                    &offloadInfo);
1820
1821            if (err == OK) {
1822                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1823            }
1824
1825            if (err == OK) {
1826                // If the playback is offloaded to h/w, we pass
1827                // the HAL some metadata information.
1828                // We don't want to do this for PCM because it
1829                // will be going through the AudioFlinger mixer
1830                // before reaching the hardware.
1831                // TODO
1832                mCurrentOffloadInfo = offloadInfo;
1833                if (!mPaused) { // for preview mode, don't start if paused
1834                    err = mAudioSink->start();
1835                }
1836                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1837            }
1838            if (err != OK) {
1839                // Clean up, fall back to non offload mode.
1840                mAudioSink->close();
1841                onDisableOffloadAudio();
1842                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1843                ALOGV("openAudioSink: offload failed");
1844                if (offloadOnly) {
1845                    notifyAudioTearDown(kForceNonOffload);
1846                }
1847            } else {
1848                mUseAudioCallback = true;  // offload mode transfers data through callback
1849                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1850            }
1851        }
1852    }
1853    if (!offloadOnly && !offloadingAudio()) {
1854        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1855        uint32_t pcmFlags = flags;
1856        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1857
1858        const PcmInfo info = {
1859                (audio_channel_mask_t)channelMask,
1860                (audio_output_flags_t)pcmFlags,
1861                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1862                numChannels,
1863                sampleRate
1864        };
1865        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1866            ALOGV("openAudioSink: no change in pcm mode");
1867            // no change from previous configuration, everything ok.
1868            return OK;
1869        }
1870
1871        audioSinkChanged = true;
1872        mAudioSink->close();
1873        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1874        // Note: It is possible to set up the callback, but not use it to send audio data.
1875        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1876        mUseAudioCallback = getUseAudioCallbackSetting();
1877        if (mUseAudioCallback) {
1878            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1879        }
1880
1881        // Compute the desired buffer size.
1882        // For callback mode, the amount of time before wakeup is about half the buffer size.
1883        const uint32_t frameCount =
1884                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1885
1886        // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
1887        // AudioSink. We don't want this when there's video because it will cause a video seek to
1888        // the previous I frame. But we do want this when there's only audio because it will give
1889        // NuPlayer a chance to switch from non-offload mode to offload mode.
1890        // So we only set doNotReconnect when there's no video.
1891        const bool doNotReconnect = !hasVideo;
1892
1893        // We should always be able to set our playback settings if the sink is closed.
1894        LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
1895                "onOpenAudioSink: can't set playback rate on closed sink");
1896        status_t err = mAudioSink->open(
1897                    sampleRate,
1898                    numChannels,
1899                    (audio_channel_mask_t)channelMask,
1900                    AUDIO_FORMAT_PCM_16_BIT,
1901                    0 /* bufferCount - unused */,
1902                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1903                    mUseAudioCallback ? this : NULL,
1904                    (audio_output_flags_t)pcmFlags,
1905                    NULL,
1906                    doNotReconnect,
1907                    frameCount);
1908        if (err != OK) {
1909            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1910            mAudioSink->close();
1911            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1912            return err;
1913        }
1914        mCurrentPcmInfo = info;
1915        if (!mPaused) { // for preview mode, don't start if paused
1916            mAudioSink->start();
1917        }
1918    }
1919    if (audioSinkChanged) {
1920        onAudioSinkChanged();
1921    }
1922    mAudioTornDown = false;
1923    return OK;
1924}
1925
1926void NuPlayer::Renderer::onCloseAudioSink() {
1927    mAudioSink->close();
1928    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1929    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1930}
1931
1932}  // namespace android
1933
1934