NuPlayerRenderer.cpp revision 03cee24b8d54e5b5a94957b9fb7049738ff68765
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <algorithm>
23#include <cutils/properties.h>
24#include <media/stagefright/foundation/ABuffer.h>
25#include <media/stagefright/foundation/ADebug.h>
26#include <media/stagefright/foundation/AMessage.h>
27#include <media/stagefright/foundation/AUtils.h>
28#include <media/stagefright/foundation/AWakeLock.h>
29#include <media/stagefright/MediaClock.h>
30#include <media/stagefright/MediaErrors.h>
31#include <media/stagefright/MetaData.h>
32#include <media/stagefright/Utils.h>
33#include <media/stagefright/VideoFrameScheduler.h>
34
35#include <inttypes.h>
36
37namespace android {
38
39/*
40 * Example of common configuration settings in shell script form
41
42   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43   adb shell setprop audio.offload.disable 1
44
45   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46   adb shell setprop audio.offload.video 1
47
48   #Use audio callbacks for PCM data
49   adb shell setprop media.stagefright.audio.cbk 1
50
51   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52   adb shell setprop media.stagefright.audio.deep 1
53
54   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55   adb shell setprop media.stagefright.audio.sink 1000
56
57 * These configurations take effect for the next track played (not the current track).
58 */
59
60static inline bool getUseAudioCallbackSetting() {
61    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62}
63
64static inline int32_t getAudioSinkPcmMsSetting() {
65    return property_get_int32(
66            "media.stagefright.audio.sink", 500 /* default_value */);
67}
68
69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70// is closed to allow the audio DSP to power down.
71static const int64_t kOffloadPauseMaxUs = 10000000ll;
72
73// Maximum allowed delay from AudioSink, 1.5 seconds.
74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75
76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77
78// static
79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80        AUDIO_CHANNEL_NONE,
81        AUDIO_OUTPUT_FLAG_NONE,
82        AUDIO_FORMAT_INVALID,
83        0, // mNumChannels
84        0 // mSampleRate
85};
86
87// static
88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89
90NuPlayer::Renderer::Renderer(
91        const sp<MediaPlayerBase::AudioSink> &sink,
92        const sp<AMessage> &notify,
93        uint32_t flags)
94    : mAudioSink(sink),
95      mUseVirtualAudioSink(false),
96      mNotify(notify),
97      mFlags(flags),
98      mNumFramesWritten(0),
99      mDrainAudioQueuePending(false),
100      mDrainVideoQueuePending(false),
101      mAudioQueueGeneration(0),
102      mVideoQueueGeneration(0),
103      mAudioDrainGeneration(0),
104      mVideoDrainGeneration(0),
105      mAudioEOSGeneration(0),
106      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
107      mAudioFirstAnchorTimeMediaUs(-1),
108      mAnchorTimeMediaUs(-1),
109      mAnchorNumFramesWritten(-1),
110      mVideoLateByUs(0ll),
111      mHasAudio(false),
112      mHasVideo(false),
113      mNotifyCompleteAudio(false),
114      mNotifyCompleteVideo(false),
115      mSyncQueues(false),
116      mPaused(false),
117      mPauseDrainAudioAllowedUs(0),
118      mVideoSampleReceived(false),
119      mVideoRenderingStarted(false),
120      mVideoRenderingStartGeneration(0),
121      mAudioRenderingStartGeneration(0),
122      mRenderingDataDelivered(false),
123      mNextAudioClockUpdateTimeUs(-1),
124      mLastAudioMediaTimeUs(-1),
125      mAudioOffloadPauseTimeoutGeneration(0),
126      mAudioTornDown(false),
127      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
128      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
129      mTotalBuffersQueued(0),
130      mLastAudioBufferDrained(0),
131      mUseAudioCallback(false),
132      mWakeLock(new AWakeLock()) {
133    mMediaClock = new MediaClock;
134    mPlaybackRate = mPlaybackSettings.mSpeed;
135    mMediaClock->setPlaybackRate(mPlaybackRate);
136}
137
138NuPlayer::Renderer::~Renderer() {
139    if (offloadingAudio()) {
140        mAudioSink->stop();
141        mAudioSink->flush();
142        mAudioSink->close();
143    }
144}
145
146void NuPlayer::Renderer::queueBuffer(
147        bool audio,
148        const sp<ABuffer> &buffer,
149        const sp<AMessage> &notifyConsumed) {
150    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
151    msg->setInt32("queueGeneration", getQueueGeneration(audio));
152    msg->setInt32("audio", static_cast<int32_t>(audio));
153    msg->setBuffer("buffer", buffer);
154    msg->setMessage("notifyConsumed", notifyConsumed);
155    msg->post();
156}
157
158void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
159    CHECK_NE(finalResult, (status_t)OK);
160
161    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
162    msg->setInt32("queueGeneration", getQueueGeneration(audio));
163    msg->setInt32("audio", static_cast<int32_t>(audio));
164    msg->setInt32("finalResult", finalResult);
165    msg->post();
166}
167
168status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
169    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
170    writeToAMessage(msg, rate);
171    sp<AMessage> response;
172    status_t err = msg->postAndAwaitResponse(&response);
173    if (err == OK && response != NULL) {
174        CHECK(response->findInt32("err", &err));
175    }
176    return err;
177}
178
179status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
180    if (rate.mSpeed == 0.f) {
181        onPause();
182        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
183        // have to correspond to the any non-0 speed (e.g old speed). Keep
184        // settings nonetheless, using the old speed, in case audiosink changes.
185        AudioPlaybackRate newRate = rate;
186        newRate.mSpeed = mPlaybackSettings.mSpeed;
187        mPlaybackSettings = newRate;
188        return OK;
189    }
190
191    if (mAudioSink != NULL && mAudioSink->ready()) {
192        status_t err = mAudioSink->setPlaybackRate(rate);
193        if (err != OK) {
194            return err;
195        }
196    }
197    mPlaybackSettings = rate;
198    mPlaybackRate = rate.mSpeed;
199    mMediaClock->setPlaybackRate(mPlaybackRate);
200    return OK;
201}
202
203status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
204    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
205    sp<AMessage> response;
206    status_t err = msg->postAndAwaitResponse(&response);
207    if (err == OK && response != NULL) {
208        CHECK(response->findInt32("err", &err));
209        if (err == OK) {
210            readFromAMessage(response, rate);
211        }
212    }
213    return err;
214}
215
216status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
217    if (mAudioSink != NULL && mAudioSink->ready()) {
218        status_t err = mAudioSink->getPlaybackRate(rate);
219        if (err == OK) {
220            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
221                ALOGW("correcting mismatch in internal/external playback rate");
222            }
223            // get playback settings used by audiosink, as it may be
224            // slightly off due to audiosink not taking small changes.
225            mPlaybackSettings = *rate;
226            if (mPaused) {
227                rate->mSpeed = 0.f;
228            }
229        }
230        return err;
231    }
232    *rate = mPlaybackSettings;
233    return OK;
234}
235
236status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
237    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
238    writeToAMessage(msg, sync, videoFpsHint);
239    sp<AMessage> response;
240    status_t err = msg->postAndAwaitResponse(&response);
241    if (err == OK && response != NULL) {
242        CHECK(response->findInt32("err", &err));
243    }
244    return err;
245}
246
247status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
248    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
249        return BAD_VALUE;
250    }
251    // TODO: support sync sources
252    return INVALID_OPERATION;
253}
254
255status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
256    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
257    sp<AMessage> response;
258    status_t err = msg->postAndAwaitResponse(&response);
259    if (err == OK && response != NULL) {
260        CHECK(response->findInt32("err", &err));
261        if (err == OK) {
262            readFromAMessage(response, sync, videoFps);
263        }
264    }
265    return err;
266}
267
268status_t NuPlayer::Renderer::onGetSyncSettings(
269        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
270    *sync = mSyncSettings;
271    *videoFps = -1.f;
272    return OK;
273}
274
275void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
276    {
277        Mutex::Autolock autoLock(mLock);
278        if (audio) {
279            mNotifyCompleteAudio |= notifyComplete;
280            clearAudioFirstAnchorTime_l();
281            ++mAudioQueueGeneration;
282            ++mAudioDrainGeneration;
283        } else {
284            mNotifyCompleteVideo |= notifyComplete;
285            ++mVideoQueueGeneration;
286            ++mVideoDrainGeneration;
287        }
288
289        clearAnchorTime_l();
290        mVideoLateByUs = 0;
291        mSyncQueues = false;
292    }
293
294    sp<AMessage> msg = new AMessage(kWhatFlush, this);
295    msg->setInt32("audio", static_cast<int32_t>(audio));
296    msg->post();
297}
298
299void NuPlayer::Renderer::signalTimeDiscontinuity() {
300}
301
302void NuPlayer::Renderer::signalDisableOffloadAudio() {
303    (new AMessage(kWhatDisableOffloadAudio, this))->post();
304}
305
306void NuPlayer::Renderer::signalEnableOffloadAudio() {
307    (new AMessage(kWhatEnableOffloadAudio, this))->post();
308}
309
310void NuPlayer::Renderer::pause() {
311    (new AMessage(kWhatPause, this))->post();
312}
313
314void NuPlayer::Renderer::resume() {
315    (new AMessage(kWhatResume, this))->post();
316}
317
318void NuPlayer::Renderer::setVideoFrameRate(float fps) {
319    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
320    msg->setFloat("frame-rate", fps);
321    msg->post();
322}
323
324// Called on any threads without mLock acquired.
325status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
326    status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
327    if (result == OK) {
328        return result;
329    }
330
331    // MediaClock has not started yet. Try to start it if possible.
332    {
333        Mutex::Autolock autoLock(mLock);
334        if (mAudioFirstAnchorTimeMediaUs == -1) {
335            return result;
336        }
337
338        AudioTimestamp ts;
339        status_t res = mAudioSink->getTimestamp(ts);
340        if (res != OK) {
341            return result;
342        }
343
344        // AudioSink has rendered some frames.
345        int64_t nowUs = ALooper::GetNowUs();
346        int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
347                + mAudioFirstAnchorTimeMediaUs;
348        mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
349    }
350
351    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
352}
353
354void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
355    mAudioFirstAnchorTimeMediaUs = -1;
356    mMediaClock->setStartingTimeMedia(-1);
357}
358
359void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
360    if (mAudioFirstAnchorTimeMediaUs == -1) {
361        mAudioFirstAnchorTimeMediaUs = mediaUs;
362        mMediaClock->setStartingTimeMedia(mediaUs);
363    }
364}
365
366void NuPlayer::Renderer::clearAnchorTime_l() {
367    mMediaClock->clearAnchor();
368    mAnchorTimeMediaUs = -1;
369    mAnchorNumFramesWritten = -1;
370}
371
372void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
373    Mutex::Autolock autoLock(mLock);
374    mVideoLateByUs = lateUs;
375}
376
377int64_t NuPlayer::Renderer::getVideoLateByUs() {
378    Mutex::Autolock autoLock(mLock);
379    return mVideoLateByUs;
380}
381
382status_t NuPlayer::Renderer::openAudioSink(
383        const sp<AMessage> &format,
384        bool offloadOnly,
385        bool hasVideo,
386        uint32_t flags,
387        bool *isOffloaded) {
388    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
389    msg->setMessage("format", format);
390    msg->setInt32("offload-only", offloadOnly);
391    msg->setInt32("has-video", hasVideo);
392    msg->setInt32("flags", flags);
393
394    sp<AMessage> response;
395    msg->postAndAwaitResponse(&response);
396
397    int32_t err;
398    if (!response->findInt32("err", &err)) {
399        err = INVALID_OPERATION;
400    } else if (err == OK && isOffloaded != NULL) {
401        int32_t offload;
402        CHECK(response->findInt32("offload", &offload));
403        *isOffloaded = (offload != 0);
404    }
405    return err;
406}
407
408void NuPlayer::Renderer::closeAudioSink() {
409    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
410
411    sp<AMessage> response;
412    msg->postAndAwaitResponse(&response);
413}
414
415void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
416    switch (msg->what()) {
417        case kWhatOpenAudioSink:
418        {
419            sp<AMessage> format;
420            CHECK(msg->findMessage("format", &format));
421
422            int32_t offloadOnly;
423            CHECK(msg->findInt32("offload-only", &offloadOnly));
424
425            int32_t hasVideo;
426            CHECK(msg->findInt32("has-video", &hasVideo));
427
428            uint32_t flags;
429            CHECK(msg->findInt32("flags", (int32_t *)&flags));
430
431            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
432
433            sp<AMessage> response = new AMessage;
434            response->setInt32("err", err);
435            response->setInt32("offload", offloadingAudio());
436
437            sp<AReplyToken> replyID;
438            CHECK(msg->senderAwaitsResponse(&replyID));
439            response->postReply(replyID);
440
441            break;
442        }
443
444        case kWhatCloseAudioSink:
445        {
446            sp<AReplyToken> replyID;
447            CHECK(msg->senderAwaitsResponse(&replyID));
448
449            onCloseAudioSink();
450
451            sp<AMessage> response = new AMessage;
452            response->postReply(replyID);
453            break;
454        }
455
456        case kWhatStopAudioSink:
457        {
458            mAudioSink->stop();
459            break;
460        }
461
462        case kWhatDrainAudioQueue:
463        {
464            mDrainAudioQueuePending = false;
465
466            int32_t generation;
467            CHECK(msg->findInt32("drainGeneration", &generation));
468            if (generation != getDrainGeneration(true /* audio */)) {
469                break;
470            }
471
472            if (onDrainAudioQueue()) {
473                uint32_t numFramesPlayed;
474                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
475                         (status_t)OK);
476
477                uint32_t numFramesPendingPlayout =
478                    mNumFramesWritten - numFramesPlayed;
479
480                // This is how long the audio sink will have data to
481                // play back.
482                int64_t delayUs =
483                    mAudioSink->msecsPerFrame()
484                        * numFramesPendingPlayout * 1000ll;
485                if (mPlaybackRate > 1.0f) {
486                    delayUs /= mPlaybackRate;
487                }
488
489                // Let's give it more data after about half that time
490                // has elapsed.
491                delayUs /= 2;
492                // check the buffer size to estimate maximum delay permitted.
493                const int64_t maxDrainDelayUs = std::max(
494                        mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
495                ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
496                        (long long)delayUs, (long long)maxDrainDelayUs);
497                Mutex::Autolock autoLock(mLock);
498                postDrainAudioQueue_l(delayUs);
499            }
500            break;
501        }
502
503        case kWhatDrainVideoQueue:
504        {
505            int32_t generation;
506            CHECK(msg->findInt32("drainGeneration", &generation));
507            if (generation != getDrainGeneration(false /* audio */)) {
508                break;
509            }
510
511            mDrainVideoQueuePending = false;
512
513            onDrainVideoQueue();
514
515            postDrainVideoQueue();
516            break;
517        }
518
519        case kWhatPostDrainVideoQueue:
520        {
521            int32_t generation;
522            CHECK(msg->findInt32("drainGeneration", &generation));
523            if (generation != getDrainGeneration(false /* audio */)) {
524                break;
525            }
526
527            mDrainVideoQueuePending = false;
528            postDrainVideoQueue();
529            break;
530        }
531
532        case kWhatQueueBuffer:
533        {
534            onQueueBuffer(msg);
535            break;
536        }
537
538        case kWhatQueueEOS:
539        {
540            onQueueEOS(msg);
541            break;
542        }
543
544        case kWhatEOS:
545        {
546            int32_t generation;
547            CHECK(msg->findInt32("audioEOSGeneration", &generation));
548            if (generation != mAudioEOSGeneration) {
549                break;
550            }
551            status_t finalResult;
552            CHECK(msg->findInt32("finalResult", &finalResult));
553            notifyEOS(true /* audio */, finalResult);
554            break;
555        }
556
557        case kWhatConfigPlayback:
558        {
559            sp<AReplyToken> replyID;
560            CHECK(msg->senderAwaitsResponse(&replyID));
561            AudioPlaybackRate rate;
562            readFromAMessage(msg, &rate);
563            status_t err = onConfigPlayback(rate);
564            sp<AMessage> response = new AMessage;
565            response->setInt32("err", err);
566            response->postReply(replyID);
567            break;
568        }
569
570        case kWhatGetPlaybackSettings:
571        {
572            sp<AReplyToken> replyID;
573            CHECK(msg->senderAwaitsResponse(&replyID));
574            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
575            status_t err = onGetPlaybackSettings(&rate);
576            sp<AMessage> response = new AMessage;
577            if (err == OK) {
578                writeToAMessage(response, rate);
579            }
580            response->setInt32("err", err);
581            response->postReply(replyID);
582            break;
583        }
584
585        case kWhatConfigSync:
586        {
587            sp<AReplyToken> replyID;
588            CHECK(msg->senderAwaitsResponse(&replyID));
589            AVSyncSettings sync;
590            float videoFpsHint;
591            readFromAMessage(msg, &sync, &videoFpsHint);
592            status_t err = onConfigSync(sync, videoFpsHint);
593            sp<AMessage> response = new AMessage;
594            response->setInt32("err", err);
595            response->postReply(replyID);
596            break;
597        }
598
599        case kWhatGetSyncSettings:
600        {
601            sp<AReplyToken> replyID;
602            CHECK(msg->senderAwaitsResponse(&replyID));
603
604            ALOGV("kWhatGetSyncSettings");
605            AVSyncSettings sync;
606            float videoFps = -1.f;
607            status_t err = onGetSyncSettings(&sync, &videoFps);
608            sp<AMessage> response = new AMessage;
609            if (err == OK) {
610                writeToAMessage(response, sync, videoFps);
611            }
612            response->setInt32("err", err);
613            response->postReply(replyID);
614            break;
615        }
616
617        case kWhatFlush:
618        {
619            onFlush(msg);
620            break;
621        }
622
623        case kWhatDisableOffloadAudio:
624        {
625            onDisableOffloadAudio();
626            break;
627        }
628
629        case kWhatEnableOffloadAudio:
630        {
631            onEnableOffloadAudio();
632            break;
633        }
634
635        case kWhatPause:
636        {
637            onPause();
638            break;
639        }
640
641        case kWhatResume:
642        {
643            onResume();
644            break;
645        }
646
647        case kWhatSetVideoFrameRate:
648        {
649            float fps;
650            CHECK(msg->findFloat("frame-rate", &fps));
651            onSetVideoFrameRate(fps);
652            break;
653        }
654
655        case kWhatAudioTearDown:
656        {
657            int32_t reason;
658            CHECK(msg->findInt32("reason", &reason));
659
660            onAudioTearDown((AudioTearDownReason)reason);
661            break;
662        }
663
664        case kWhatAudioOffloadPauseTimeout:
665        {
666            int32_t generation;
667            CHECK(msg->findInt32("drainGeneration", &generation));
668            if (generation != mAudioOffloadPauseTimeoutGeneration) {
669                break;
670            }
671            ALOGV("Audio Offload tear down due to pause timeout.");
672            onAudioTearDown(kDueToTimeout);
673            mWakeLock->release();
674            break;
675        }
676
677        default:
678            TRESPASS();
679            break;
680    }
681}
682
683void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
684    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
685        return;
686    }
687
688    if (mAudioQueue.empty()) {
689        return;
690    }
691
692    // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
693    if (mPaused) {
694        const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
695        if (diffUs > delayUs) {
696            delayUs = diffUs;
697        }
698    }
699
700    mDrainAudioQueuePending = true;
701    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
702    msg->setInt32("drainGeneration", mAudioDrainGeneration);
703    msg->post(delayUs);
704}
705
706void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
707    mAudioRenderingStartGeneration = mAudioDrainGeneration;
708    mVideoRenderingStartGeneration = mVideoDrainGeneration;
709    mRenderingDataDelivered = false;
710}
711
712void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
713    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
714        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
715        mRenderingDataDelivered = true;
716        if (mPaused) {
717            return;
718        }
719        mVideoRenderingStartGeneration = -1;
720        mAudioRenderingStartGeneration = -1;
721
722        sp<AMessage> notify = mNotify->dup();
723        notify->setInt32("what", kWhatMediaRenderingStart);
724        notify->post();
725    }
726}
727
728// static
729size_t NuPlayer::Renderer::AudioSinkCallback(
730        MediaPlayerBase::AudioSink * /* audioSink */,
731        void *buffer,
732        size_t size,
733        void *cookie,
734        MediaPlayerBase::AudioSink::cb_event_t event) {
735    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
736
737    switch (event) {
738        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
739        {
740            return me->fillAudioBuffer(buffer, size);
741            break;
742        }
743
744        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
745        {
746            ALOGV("AudioSink::CB_EVENT_STREAM_END");
747            me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
748            break;
749        }
750
751        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
752        {
753            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
754            me->notifyAudioTearDown(kDueToError);
755            break;
756        }
757    }
758
759    return 0;
760}
761
762size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
763    Mutex::Autolock autoLock(mLock);
764
765    if (!mUseAudioCallback) {
766        return 0;
767    }
768
769    bool hasEOS = false;
770
771    size_t sizeCopied = 0;
772    bool firstEntry = true;
773    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
774    while (sizeCopied < size && !mAudioQueue.empty()) {
775        entry = &*mAudioQueue.begin();
776
777        if (entry->mBuffer == NULL) { // EOS
778            hasEOS = true;
779            mAudioQueue.erase(mAudioQueue.begin());
780            break;
781        }
782
783        if (firstEntry && entry->mOffset == 0) {
784            firstEntry = false;
785            int64_t mediaTimeUs;
786            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
787            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
788            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
789        }
790
791        size_t copy = entry->mBuffer->size() - entry->mOffset;
792        size_t sizeRemaining = size - sizeCopied;
793        if (copy > sizeRemaining) {
794            copy = sizeRemaining;
795        }
796
797        memcpy((char *)buffer + sizeCopied,
798               entry->mBuffer->data() + entry->mOffset,
799               copy);
800
801        entry->mOffset += copy;
802        if (entry->mOffset == entry->mBuffer->size()) {
803            entry->mNotifyConsumed->post();
804            mAudioQueue.erase(mAudioQueue.begin());
805            entry = NULL;
806        }
807        sizeCopied += copy;
808
809        notifyIfMediaRenderingStarted_l();
810    }
811
812    if (mAudioFirstAnchorTimeMediaUs >= 0) {
813        int64_t nowUs = ALooper::GetNowUs();
814        int64_t nowMediaUs =
815            mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
816        // we don't know how much data we are queueing for offloaded tracks.
817        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
818    }
819
820    // for non-offloaded audio, we need to compute the frames written because
821    // there is no EVENT_STREAM_END notification. The frames written gives
822    // an estimate on the pending played out duration.
823    if (!offloadingAudio()) {
824        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
825    }
826
827    if (hasEOS) {
828        (new AMessage(kWhatStopAudioSink, this))->post();
829        // As there is currently no EVENT_STREAM_END callback notification for
830        // non-offloaded audio tracks, we need to post the EOS ourselves.
831        if (!offloadingAudio()) {
832            int64_t postEOSDelayUs = 0;
833            if (mAudioSink->needsTrailingPadding()) {
834                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
835            }
836            ALOGV("fillAudioBuffer: notifyEOS "
837                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
838                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
839            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
840        }
841    }
842    return sizeCopied;
843}
844
845void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
846    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
847    bool foundEOS = false;
848    while (it != mAudioQueue.end()) {
849        int32_t eos;
850        QueueEntry *entry = &*it++;
851        if (entry->mBuffer == NULL
852                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
853            itEOS = it;
854            foundEOS = true;
855        }
856    }
857
858    if (foundEOS) {
859        // post all replies before EOS and drop the samples
860        for (it = mAudioQueue.begin(); it != itEOS; it++) {
861            if (it->mBuffer == NULL) {
862                // delay doesn't matter as we don't even have an AudioTrack
863                notifyEOS(true /* audio */, it->mFinalResult);
864            } else {
865                it->mNotifyConsumed->post();
866            }
867        }
868        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
869    }
870}
871
872bool NuPlayer::Renderer::onDrainAudioQueue() {
873    // do not drain audio during teardown as queued buffers may be invalid.
874    if (mAudioTornDown) {
875        return false;
876    }
877    // TODO: This call to getPosition checks if AudioTrack has been created
878    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
879    // CHECKs on getPosition will fail.
880    // We still need to figure out why AudioTrack is not created when
881    // this function is called. One possible reason could be leftover
882    // audio. Another possible place is to check whether decoder
883    // has received INFO_FORMAT_CHANGED as the first buffer since
884    // AudioSink is opened there, and possible interactions with flush
885    // immediately after start. Investigate error message
886    // "vorbis_dsp_synthesis returned -135", along with RTSP.
887    uint32_t numFramesPlayed;
888    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
889        // When getPosition fails, renderer will not reschedule the draining
890        // unless new samples are queued.
891        // If we have pending EOS (or "eos" marker for discontinuities), we need
892        // to post these now as NuPlayerDecoder might be waiting for it.
893        drainAudioQueueUntilLastEOS();
894
895        ALOGW("onDrainAudioQueue(): audio sink is not ready");
896        return false;
897    }
898
899#if 0
900    ssize_t numFramesAvailableToWrite =
901        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
902
903    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
904        ALOGI("audio sink underrun");
905    } else {
906        ALOGV("audio queue has %d frames left to play",
907             mAudioSink->frameCount() - numFramesAvailableToWrite);
908    }
909#endif
910
911    uint32_t prevFramesWritten = mNumFramesWritten;
912    while (!mAudioQueue.empty()) {
913        QueueEntry *entry = &*mAudioQueue.begin();
914
915        mLastAudioBufferDrained = entry->mBufferOrdinal;
916
917        if (entry->mBuffer == NULL) {
918            // EOS
919            int64_t postEOSDelayUs = 0;
920            if (mAudioSink->needsTrailingPadding()) {
921                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
922            }
923            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
924            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
925
926            mAudioQueue.erase(mAudioQueue.begin());
927            entry = NULL;
928            if (mAudioSink->needsTrailingPadding()) {
929                // If we're not in gapless playback (i.e. through setNextPlayer), we
930                // need to stop the track here, because that will play out the last
931                // little bit at the end of the file. Otherwise short files won't play.
932                mAudioSink->stop();
933                mNumFramesWritten = 0;
934            }
935            return false;
936        }
937
938        // ignore 0-sized buffer which could be EOS marker with no data
939        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
940            int64_t mediaTimeUs;
941            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
942            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
943                    mediaTimeUs / 1E6);
944            onNewAudioMediaTime(mediaTimeUs);
945        }
946
947        size_t copy = entry->mBuffer->size() - entry->mOffset;
948
949        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
950                                            copy, false /* blocking */);
951        if (written < 0) {
952            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
953            if (written == WOULD_BLOCK) {
954                ALOGV("AudioSink write would block when writing %zu bytes", copy);
955            } else {
956                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
957                // This can only happen when AudioSink was opened with doNotReconnect flag set to
958                // true, in which case the NuPlayer will handle the reconnect.
959                notifyAudioTearDown(kDueToError);
960            }
961            break;
962        }
963
964        entry->mOffset += written;
965        size_t remainder = entry->mBuffer->size() - entry->mOffset;
966        if ((ssize_t)remainder < mAudioSink->frameSize()) {
967            if (remainder > 0) {
968                ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
969                        remainder);
970                entry->mOffset += remainder;
971                copy -= remainder;
972            }
973
974            entry->mNotifyConsumed->post();
975            mAudioQueue.erase(mAudioQueue.begin());
976
977            entry = NULL;
978        }
979
980        size_t copiedFrames = written / mAudioSink->frameSize();
981        mNumFramesWritten += copiedFrames;
982
983        {
984            Mutex::Autolock autoLock(mLock);
985            int64_t maxTimeMedia;
986            maxTimeMedia =
987                mAnchorTimeMediaUs +
988                        (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
989                                * 1000LL * mAudioSink->msecsPerFrame());
990            mMediaClock->updateMaxTimeMedia(maxTimeMedia);
991
992            notifyIfMediaRenderingStarted_l();
993        }
994
995        if (written != (ssize_t)copy) {
996            // A short count was received from AudioSink::write()
997            //
998            // AudioSink write is called in non-blocking mode.
999            // It may return with a short count when:
1000            //
1001            // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1002            //    discarded.
1003            // 2) The data to be copied exceeds the available buffer in AudioSink.
1004            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1005            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1006
1007            // (Case 1)
1008            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1009            // needs to fail, as we should not carry over fractional frames between calls.
1010            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
1011
1012            // (Case 2, 3, 4)
1013            // Return early to the caller.
1014            // Beware of calling immediately again as this may busy-loop if you are not careful.
1015            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1016            break;
1017        }
1018    }
1019
1020    // calculate whether we need to reschedule another write.
1021    bool reschedule = !mAudioQueue.empty()
1022            && (!mPaused
1023                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1024    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1025    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1026    return reschedule;
1027}
1028
1029int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1030    int32_t sampleRate = offloadingAudio() ?
1031            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1032    if (sampleRate == 0) {
1033        ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1034        return 0;
1035    }
1036    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1037    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1038}
1039
1040// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1041int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1042    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1043    if (mUseVirtualAudioSink) {
1044        int64_t nowUs = ALooper::GetNowUs();
1045        int64_t mediaUs;
1046        if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1047            return 0ll;
1048        } else {
1049            return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1050        }
1051    }
1052    return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs);
1053}
1054
1055int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1056    int64_t realUs;
1057    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1058        // If failed to get current position, e.g. due to audio clock is
1059        // not ready, then just play out video immediately without delay.
1060        return nowUs;
1061    }
1062    return realUs;
1063}
1064
1065void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1066    Mutex::Autolock autoLock(mLock);
1067    // TRICKY: vorbis decoder generates multiple frames with the same
1068    // timestamp, so only update on the first frame with a given timestamp
1069    if (mediaTimeUs == mAnchorTimeMediaUs) {
1070        return;
1071    }
1072    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1073
1074    // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1075    if (mNextAudioClockUpdateTimeUs == -1) {
1076        AudioTimestamp ts;
1077        if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1078            mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1079        }
1080    }
1081    int64_t nowUs = ALooper::GetNowUs();
1082    if (mNextAudioClockUpdateTimeUs >= 0) {
1083        if (nowUs >= mNextAudioClockUpdateTimeUs) {
1084            int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1085            mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1086            mUseVirtualAudioSink = false;
1087            mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1088        }
1089    } else {
1090        int64_t unused;
1091        if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1092                && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1093                        > kMaxAllowedAudioSinkDelayUs)) {
1094            // Enough data has been sent to AudioSink, but AudioSink has not rendered
1095            // any data yet. Something is wrong with AudioSink, e.g., the device is not
1096            // connected to audio out.
1097            // Switch to system clock. This essentially creates a virtual AudioSink with
1098            // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1099            // This virtual AudioSink renders audio data starting from the very first sample
1100            // and it's paced by system clock.
1101            ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1102            mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1103            mUseVirtualAudioSink = true;
1104        }
1105    }
1106    mAnchorNumFramesWritten = mNumFramesWritten;
1107    mAnchorTimeMediaUs = mediaTimeUs;
1108}
1109
1110// Called without mLock acquired.
1111void NuPlayer::Renderer::postDrainVideoQueue() {
1112    if (mDrainVideoQueuePending
1113            || getSyncQueues()
1114            || (mPaused && mVideoSampleReceived)) {
1115        return;
1116    }
1117
1118    if (mVideoQueue.empty()) {
1119        return;
1120    }
1121
1122    QueueEntry &entry = *mVideoQueue.begin();
1123
1124    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1125    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1126
1127    if (entry.mBuffer == NULL) {
1128        // EOS doesn't carry a timestamp.
1129        msg->post();
1130        mDrainVideoQueuePending = true;
1131        return;
1132    }
1133
1134    bool needRepostDrainVideoQueue = false;
1135    int64_t delayUs;
1136    int64_t nowUs = ALooper::GetNowUs();
1137    int64_t realTimeUs;
1138    if (mFlags & FLAG_REAL_TIME) {
1139        int64_t mediaTimeUs;
1140        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1141        realTimeUs = mediaTimeUs;
1142    } else {
1143        int64_t mediaTimeUs;
1144        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1145
1146        {
1147            Mutex::Autolock autoLock(mLock);
1148            if (mAnchorTimeMediaUs < 0) {
1149                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1150                mAnchorTimeMediaUs = mediaTimeUs;
1151                realTimeUs = nowUs;
1152            } else if (!mVideoSampleReceived) {
1153                // Always render the first video frame.
1154                realTimeUs = nowUs;
1155            } else if (mAudioFirstAnchorTimeMediaUs < 0
1156                || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
1157                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1158            } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
1159                needRepostDrainVideoQueue = true;
1160                realTimeUs = nowUs;
1161            } else {
1162                realTimeUs = nowUs;
1163            }
1164        }
1165        if (!mHasAudio) {
1166            // smooth out videos >= 10fps
1167            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1168        }
1169
1170        // Heuristics to handle situation when media time changed without a
1171        // discontinuity. If we have not drained an audio buffer that was
1172        // received after this buffer, repost in 10 msec. Otherwise repost
1173        // in 500 msec.
1174        delayUs = realTimeUs - nowUs;
1175        int64_t postDelayUs = -1;
1176        if (delayUs > 500000) {
1177            postDelayUs = 500000;
1178            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1179                postDelayUs = 10000;
1180            }
1181        } else if (needRepostDrainVideoQueue) {
1182            // CHECK(mPlaybackRate > 0);
1183            // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
1184            // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
1185            postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
1186            postDelayUs /= mPlaybackRate;
1187        }
1188
1189        if (postDelayUs >= 0) {
1190            msg->setWhat(kWhatPostDrainVideoQueue);
1191            msg->post(postDelayUs);
1192            mVideoScheduler->restart();
1193            ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms",
1194                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1195            mDrainVideoQueuePending = true;
1196            return;
1197        }
1198    }
1199
1200    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1201    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1202
1203    delayUs = realTimeUs - nowUs;
1204
1205    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1206    // post 2 display refreshes before rendering is due
1207    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1208
1209    mDrainVideoQueuePending = true;
1210}
1211
1212void NuPlayer::Renderer::onDrainVideoQueue() {
1213    if (mVideoQueue.empty()) {
1214        return;
1215    }
1216
1217    QueueEntry *entry = &*mVideoQueue.begin();
1218
1219    if (entry->mBuffer == NULL) {
1220        // EOS
1221
1222        notifyEOS(false /* audio */, entry->mFinalResult);
1223
1224        mVideoQueue.erase(mVideoQueue.begin());
1225        entry = NULL;
1226
1227        setVideoLateByUs(0);
1228        return;
1229    }
1230
1231    int64_t nowUs = ALooper::GetNowUs();
1232    int64_t realTimeUs;
1233    int64_t mediaTimeUs = -1;
1234    if (mFlags & FLAG_REAL_TIME) {
1235        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1236    } else {
1237        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1238
1239        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1240    }
1241
1242    bool tooLate = false;
1243
1244    if (!mPaused) {
1245        setVideoLateByUs(nowUs - realTimeUs);
1246        tooLate = (mVideoLateByUs > 40000);
1247
1248        if (tooLate) {
1249            ALOGV("video late by %lld us (%.2f secs)",
1250                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1251        } else {
1252            int64_t mediaUs = 0;
1253            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1254            ALOGV("rendering video at media time %.2f secs",
1255                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1256                    mediaUs) / 1E6);
1257
1258            if (!(mFlags & FLAG_REAL_TIME)
1259                    && mLastAudioMediaTimeUs != -1
1260                    && mediaTimeUs > mLastAudioMediaTimeUs) {
1261                // If audio ends before video, video continues to drive media clock.
1262                // Also smooth out videos >= 10fps.
1263                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1264            }
1265        }
1266    } else {
1267        setVideoLateByUs(0);
1268        if (!mVideoSampleReceived && !mHasAudio) {
1269            // This will ensure that the first frame after a flush won't be used as anchor
1270            // when renderer is in paused state, because resume can happen any time after seek.
1271            Mutex::Autolock autoLock(mLock);
1272            clearAnchorTime_l();
1273        }
1274    }
1275
1276    // Always render the first video frame while keeping stats on A/V sync.
1277    if (!mVideoSampleReceived) {
1278        realTimeUs = nowUs;
1279        tooLate = false;
1280    }
1281
1282    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1283    entry->mNotifyConsumed->setInt32("render", !tooLate);
1284    entry->mNotifyConsumed->post();
1285    mVideoQueue.erase(mVideoQueue.begin());
1286    entry = NULL;
1287
1288    mVideoSampleReceived = true;
1289
1290    if (!mPaused) {
1291        if (!mVideoRenderingStarted) {
1292            mVideoRenderingStarted = true;
1293            notifyVideoRenderingStart();
1294        }
1295        Mutex::Autolock autoLock(mLock);
1296        notifyIfMediaRenderingStarted_l();
1297    }
1298}
1299
1300void NuPlayer::Renderer::notifyVideoRenderingStart() {
1301    sp<AMessage> notify = mNotify->dup();
1302    notify->setInt32("what", kWhatVideoRenderingStart);
1303    notify->post();
1304}
1305
1306void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1307    if (audio && delayUs > 0) {
1308        sp<AMessage> msg = new AMessage(kWhatEOS, this);
1309        msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1310        msg->setInt32("finalResult", finalResult);
1311        msg->post(delayUs);
1312        return;
1313    }
1314    sp<AMessage> notify = mNotify->dup();
1315    notify->setInt32("what", kWhatEOS);
1316    notify->setInt32("audio", static_cast<int32_t>(audio));
1317    notify->setInt32("finalResult", finalResult);
1318    notify->post(delayUs);
1319}
1320
1321void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1322    sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1323    msg->setInt32("reason", reason);
1324    msg->post();
1325}
1326
1327void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1328    int32_t audio;
1329    CHECK(msg->findInt32("audio", &audio));
1330
1331    if (dropBufferIfStale(audio, msg)) {
1332        return;
1333    }
1334
1335    if (audio) {
1336        mHasAudio = true;
1337    } else {
1338        mHasVideo = true;
1339    }
1340
1341    if (mHasVideo) {
1342        if (mVideoScheduler == NULL) {
1343            mVideoScheduler = new VideoFrameScheduler();
1344            mVideoScheduler->init();
1345        }
1346    }
1347
1348    sp<ABuffer> buffer;
1349    CHECK(msg->findBuffer("buffer", &buffer));
1350
1351    sp<AMessage> notifyConsumed;
1352    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1353
1354    QueueEntry entry;
1355    entry.mBuffer = buffer;
1356    entry.mNotifyConsumed = notifyConsumed;
1357    entry.mOffset = 0;
1358    entry.mFinalResult = OK;
1359    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1360
1361    if (audio) {
1362        Mutex::Autolock autoLock(mLock);
1363        mAudioQueue.push_back(entry);
1364        postDrainAudioQueue_l();
1365    } else {
1366        mVideoQueue.push_back(entry);
1367        postDrainVideoQueue();
1368    }
1369
1370    Mutex::Autolock autoLock(mLock);
1371    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1372        return;
1373    }
1374
1375    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1376    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1377
1378    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1379        // EOS signalled on either queue.
1380        syncQueuesDone_l();
1381        return;
1382    }
1383
1384    int64_t firstAudioTimeUs;
1385    int64_t firstVideoTimeUs;
1386    CHECK(firstAudioBuffer->meta()
1387            ->findInt64("timeUs", &firstAudioTimeUs));
1388    CHECK(firstVideoBuffer->meta()
1389            ->findInt64("timeUs", &firstVideoTimeUs));
1390
1391    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1392
1393    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1394
1395    if (diff > 100000ll) {
1396        // Audio data starts More than 0.1 secs before video.
1397        // Drop some audio.
1398
1399        (*mAudioQueue.begin()).mNotifyConsumed->post();
1400        mAudioQueue.erase(mAudioQueue.begin());
1401        return;
1402    }
1403
1404    syncQueuesDone_l();
1405}
1406
1407void NuPlayer::Renderer::syncQueuesDone_l() {
1408    if (!mSyncQueues) {
1409        return;
1410    }
1411
1412    mSyncQueues = false;
1413
1414    if (!mAudioQueue.empty()) {
1415        postDrainAudioQueue_l();
1416    }
1417
1418    if (!mVideoQueue.empty()) {
1419        mLock.unlock();
1420        postDrainVideoQueue();
1421        mLock.lock();
1422    }
1423}
1424
1425void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1426    int32_t audio;
1427    CHECK(msg->findInt32("audio", &audio));
1428
1429    if (dropBufferIfStale(audio, msg)) {
1430        return;
1431    }
1432
1433    int32_t finalResult;
1434    CHECK(msg->findInt32("finalResult", &finalResult));
1435
1436    QueueEntry entry;
1437    entry.mOffset = 0;
1438    entry.mFinalResult = finalResult;
1439
1440    if (audio) {
1441        Mutex::Autolock autoLock(mLock);
1442        if (mAudioQueue.empty() && mSyncQueues) {
1443            syncQueuesDone_l();
1444        }
1445        mAudioQueue.push_back(entry);
1446        postDrainAudioQueue_l();
1447    } else {
1448        if (mVideoQueue.empty() && getSyncQueues()) {
1449            Mutex::Autolock autoLock(mLock);
1450            syncQueuesDone_l();
1451        }
1452        mVideoQueue.push_back(entry);
1453        postDrainVideoQueue();
1454    }
1455}
1456
1457void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1458    int32_t audio, notifyComplete;
1459    CHECK(msg->findInt32("audio", &audio));
1460
1461    {
1462        Mutex::Autolock autoLock(mLock);
1463        if (audio) {
1464            notifyComplete = mNotifyCompleteAudio;
1465            mNotifyCompleteAudio = false;
1466            mLastAudioMediaTimeUs = -1;
1467        } else {
1468            notifyComplete = mNotifyCompleteVideo;
1469            mNotifyCompleteVideo = false;
1470        }
1471
1472        // If we're currently syncing the queues, i.e. dropping audio while
1473        // aligning the first audio/video buffer times and only one of the
1474        // two queues has data, we may starve that queue by not requesting
1475        // more buffers from the decoder. If the other source then encounters
1476        // a discontinuity that leads to flushing, we'll never find the
1477        // corresponding discontinuity on the other queue.
1478        // Therefore we'll stop syncing the queues if at least one of them
1479        // is flushed.
1480        syncQueuesDone_l();
1481        clearAnchorTime_l();
1482    }
1483
1484    ALOGV("flushing %s", audio ? "audio" : "video");
1485    if (audio) {
1486        {
1487            Mutex::Autolock autoLock(mLock);
1488            flushQueue(&mAudioQueue);
1489
1490            ++mAudioDrainGeneration;
1491            ++mAudioEOSGeneration;
1492            prepareForMediaRenderingStart_l();
1493
1494            // the frame count will be reset after flush.
1495            clearAudioFirstAnchorTime_l();
1496        }
1497
1498        mDrainAudioQueuePending = false;
1499
1500        if (offloadingAudio()) {
1501            mAudioSink->pause();
1502            mAudioSink->flush();
1503            if (!mPaused) {
1504                mAudioSink->start();
1505            }
1506        } else {
1507            mAudioSink->pause();
1508            mAudioSink->flush();
1509            // Call stop() to signal to the AudioSink to completely fill the
1510            // internal buffer before resuming playback.
1511            // FIXME: this is ignored after flush().
1512            mAudioSink->stop();
1513            if (mPaused) {
1514                // Race condition: if renderer is paused and audio sink is stopped,
1515                // we need to make sure that the audio track buffer fully drains
1516                // before delivering data.
1517                // FIXME: remove this if we can detect if stop() is complete.
1518                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1519                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1520            } else {
1521                mAudioSink->start();
1522            }
1523            mNumFramesWritten = 0;
1524        }
1525        mNextAudioClockUpdateTimeUs = -1;
1526    } else {
1527        flushQueue(&mVideoQueue);
1528
1529        mDrainVideoQueuePending = false;
1530
1531        if (mVideoScheduler != NULL) {
1532            mVideoScheduler->restart();
1533        }
1534
1535        Mutex::Autolock autoLock(mLock);
1536        ++mVideoDrainGeneration;
1537        prepareForMediaRenderingStart_l();
1538    }
1539
1540    mVideoSampleReceived = false;
1541
1542    if (notifyComplete) {
1543        notifyFlushComplete(audio);
1544    }
1545}
1546
1547void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1548    while (!queue->empty()) {
1549        QueueEntry *entry = &*queue->begin();
1550
1551        if (entry->mBuffer != NULL) {
1552            entry->mNotifyConsumed->post();
1553        }
1554
1555        queue->erase(queue->begin());
1556        entry = NULL;
1557    }
1558}
1559
1560void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1561    sp<AMessage> notify = mNotify->dup();
1562    notify->setInt32("what", kWhatFlushComplete);
1563    notify->setInt32("audio", static_cast<int32_t>(audio));
1564    notify->post();
1565}
1566
1567bool NuPlayer::Renderer::dropBufferIfStale(
1568        bool audio, const sp<AMessage> &msg) {
1569    int32_t queueGeneration;
1570    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1571
1572    if (queueGeneration == getQueueGeneration(audio)) {
1573        return false;
1574    }
1575
1576    sp<AMessage> notifyConsumed;
1577    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1578        notifyConsumed->post();
1579    }
1580
1581    return true;
1582}
1583
1584void NuPlayer::Renderer::onAudioSinkChanged() {
1585    if (offloadingAudio()) {
1586        return;
1587    }
1588    CHECK(!mDrainAudioQueuePending);
1589    mNumFramesWritten = 0;
1590    {
1591        Mutex::Autolock autoLock(mLock);
1592        mAnchorNumFramesWritten = -1;
1593    }
1594    uint32_t written;
1595    if (mAudioSink->getFramesWritten(&written) == OK) {
1596        mNumFramesWritten = written;
1597    }
1598}
1599
1600void NuPlayer::Renderer::onDisableOffloadAudio() {
1601    Mutex::Autolock autoLock(mLock);
1602    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1603    ++mAudioDrainGeneration;
1604    if (mAudioRenderingStartGeneration != -1) {
1605        prepareForMediaRenderingStart_l();
1606    }
1607}
1608
1609void NuPlayer::Renderer::onEnableOffloadAudio() {
1610    Mutex::Autolock autoLock(mLock);
1611    mFlags |= FLAG_OFFLOAD_AUDIO;
1612    ++mAudioDrainGeneration;
1613    if (mAudioRenderingStartGeneration != -1) {
1614        prepareForMediaRenderingStart_l();
1615    }
1616}
1617
1618void NuPlayer::Renderer::onPause() {
1619    if (mPaused) {
1620        return;
1621    }
1622
1623    {
1624        Mutex::Autolock autoLock(mLock);
1625        // we do not increment audio drain generation so that we fill audio buffer during pause.
1626        ++mVideoDrainGeneration;
1627        prepareForMediaRenderingStart_l();
1628        mPaused = true;
1629        mMediaClock->setPlaybackRate(0.0);
1630    }
1631
1632    mDrainAudioQueuePending = false;
1633    mDrainVideoQueuePending = false;
1634
1635    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1636    mAudioSink->pause();
1637    startAudioOffloadPauseTimeout();
1638
1639    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1640          mAudioQueue.size(), mVideoQueue.size());
1641}
1642
1643void NuPlayer::Renderer::onResume() {
1644    if (!mPaused) {
1645        return;
1646    }
1647
1648    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1649    cancelAudioOffloadPauseTimeout();
1650    if (mAudioSink->ready()) {
1651        status_t err = mAudioSink->start();
1652        if (err != OK) {
1653            ALOGE("cannot start AudioSink err %d", err);
1654            notifyAudioTearDown(kDueToError);
1655        }
1656    }
1657
1658    {
1659        Mutex::Autolock autoLock(mLock);
1660        mPaused = false;
1661        // rendering started message may have been delayed if we were paused.
1662        if (mRenderingDataDelivered) {
1663            notifyIfMediaRenderingStarted_l();
1664        }
1665        // configure audiosink as we did not do it when pausing
1666        if (mAudioSink != NULL && mAudioSink->ready()) {
1667            mAudioSink->setPlaybackRate(mPlaybackSettings);
1668        }
1669
1670        mMediaClock->setPlaybackRate(mPlaybackRate);
1671
1672        if (!mAudioQueue.empty()) {
1673            postDrainAudioQueue_l();
1674        }
1675    }
1676
1677    if (!mVideoQueue.empty()) {
1678        postDrainVideoQueue();
1679    }
1680}
1681
1682void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1683    if (mVideoScheduler == NULL) {
1684        mVideoScheduler = new VideoFrameScheduler();
1685    }
1686    mVideoScheduler->init(fps);
1687}
1688
1689int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1690    Mutex::Autolock autoLock(mLock);
1691    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1692}
1693
1694int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1695    Mutex::Autolock autoLock(mLock);
1696    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1697}
1698
1699bool NuPlayer::Renderer::getSyncQueues() {
1700    Mutex::Autolock autoLock(mLock);
1701    return mSyncQueues;
1702}
1703
1704void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1705    if (mAudioTornDown) {
1706        return;
1707    }
1708    mAudioTornDown = true;
1709
1710    int64_t currentPositionUs;
1711    sp<AMessage> notify = mNotify->dup();
1712    if (getCurrentPosition(&currentPositionUs) == OK) {
1713        notify->setInt64("positionUs", currentPositionUs);
1714    }
1715
1716    mAudioSink->stop();
1717    mAudioSink->flush();
1718
1719    notify->setInt32("what", kWhatAudioTearDown);
1720    notify->setInt32("reason", reason);
1721    notify->post();
1722}
1723
1724void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1725    if (offloadingAudio()) {
1726        mWakeLock->acquire();
1727        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1728        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1729        msg->post(kOffloadPauseMaxUs);
1730    }
1731}
1732
1733void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1734    // We may have called startAudioOffloadPauseTimeout() without
1735    // the AudioSink open and with offloadingAudio enabled.
1736    //
1737    // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1738    // we always release the wakelock and increment the pause timeout generation.
1739    //
1740    // Note: The acquired wakelock prevents the device from suspending
1741    // immediately after offload pause (in case a resume happens shortly thereafter).
1742    mWakeLock->release(true);
1743    ++mAudioOffloadPauseTimeoutGeneration;
1744}
1745
1746status_t NuPlayer::Renderer::onOpenAudioSink(
1747        const sp<AMessage> &format,
1748        bool offloadOnly,
1749        bool hasVideo,
1750        uint32_t flags) {
1751    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1752            offloadOnly, offloadingAudio());
1753    bool audioSinkChanged = false;
1754
1755    int32_t numChannels;
1756    CHECK(format->findInt32("channel-count", &numChannels));
1757
1758    int32_t channelMask;
1759    if (!format->findInt32("channel-mask", &channelMask)) {
1760        // signal to the AudioSink to derive the mask from count.
1761        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1762    }
1763
1764    int32_t sampleRate;
1765    CHECK(format->findInt32("sample-rate", &sampleRate));
1766
1767    if (offloadingAudio()) {
1768        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1769        AString mime;
1770        CHECK(format->findString("mime", &mime));
1771        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1772
1773        if (err != OK) {
1774            ALOGE("Couldn't map mime \"%s\" to a valid "
1775                    "audio_format", mime.c_str());
1776            onDisableOffloadAudio();
1777        } else {
1778            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1779                    mime.c_str(), audioFormat);
1780
1781            int avgBitRate = -1;
1782            format->findInt32("bitrate", &avgBitRate);
1783
1784            int32_t aacProfile = -1;
1785            if (audioFormat == AUDIO_FORMAT_AAC
1786                    && format->findInt32("aac-profile", &aacProfile)) {
1787                // Redefine AAC format as per aac profile
1788                mapAACProfileToAudioFormat(
1789                        audioFormat,
1790                        aacProfile);
1791            }
1792
1793            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1794            offloadInfo.duration_us = -1;
1795            format->findInt64(
1796                    "durationUs", &offloadInfo.duration_us);
1797            offloadInfo.sample_rate = sampleRate;
1798            offloadInfo.channel_mask = channelMask;
1799            offloadInfo.format = audioFormat;
1800            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1801            offloadInfo.bit_rate = avgBitRate;
1802            offloadInfo.has_video = hasVideo;
1803            offloadInfo.is_streaming = true;
1804
1805            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1806                ALOGV("openAudioSink: no change in offload mode");
1807                // no change from previous configuration, everything ok.
1808                return OK;
1809            }
1810            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1811
1812            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1813            uint32_t offloadFlags = flags;
1814            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1815            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1816            audioSinkChanged = true;
1817            mAudioSink->close();
1818
1819            err = mAudioSink->open(
1820                    sampleRate,
1821                    numChannels,
1822                    (audio_channel_mask_t)channelMask,
1823                    audioFormat,
1824                    0 /* bufferCount - unused */,
1825                    &NuPlayer::Renderer::AudioSinkCallback,
1826                    this,
1827                    (audio_output_flags_t)offloadFlags,
1828                    &offloadInfo);
1829
1830            if (err == OK) {
1831                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1832            }
1833
1834            if (err == OK) {
1835                // If the playback is offloaded to h/w, we pass
1836                // the HAL some metadata information.
1837                // We don't want to do this for PCM because it
1838                // will be going through the AudioFlinger mixer
1839                // before reaching the hardware.
1840                // TODO
1841                mCurrentOffloadInfo = offloadInfo;
1842                if (!mPaused) { // for preview mode, don't start if paused
1843                    err = mAudioSink->start();
1844                }
1845                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1846            }
1847            if (err != OK) {
1848                // Clean up, fall back to non offload mode.
1849                mAudioSink->close();
1850                onDisableOffloadAudio();
1851                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1852                ALOGV("openAudioSink: offload failed");
1853                if (offloadOnly) {
1854                    notifyAudioTearDown(kForceNonOffload);
1855                }
1856            } else {
1857                mUseAudioCallback = true;  // offload mode transfers data through callback
1858                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1859            }
1860        }
1861    }
1862    if (!offloadOnly && !offloadingAudio()) {
1863        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1864        uint32_t pcmFlags = flags;
1865        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1866
1867        const PcmInfo info = {
1868                (audio_channel_mask_t)channelMask,
1869                (audio_output_flags_t)pcmFlags,
1870                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1871                numChannels,
1872                sampleRate
1873        };
1874        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1875            ALOGV("openAudioSink: no change in pcm mode");
1876            // no change from previous configuration, everything ok.
1877            return OK;
1878        }
1879
1880        audioSinkChanged = true;
1881        mAudioSink->close();
1882        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1883        // Note: It is possible to set up the callback, but not use it to send audio data.
1884        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1885        mUseAudioCallback = getUseAudioCallbackSetting();
1886        if (mUseAudioCallback) {
1887            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1888        }
1889
1890        // Compute the desired buffer size.
1891        // For callback mode, the amount of time before wakeup is about half the buffer size.
1892        const uint32_t frameCount =
1893                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1894
1895        // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
1896        // AudioSink. We don't want this when there's video because it will cause a video seek to
1897        // the previous I frame. But we do want this when there's only audio because it will give
1898        // NuPlayer a chance to switch from non-offload mode to offload mode.
1899        // So we only set doNotReconnect when there's no video.
1900        const bool doNotReconnect = !hasVideo;
1901
1902        // We should always be able to set our playback settings if the sink is closed.
1903        LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
1904                "onOpenAudioSink: can't set playback rate on closed sink");
1905        status_t err = mAudioSink->open(
1906                    sampleRate,
1907                    numChannels,
1908                    (audio_channel_mask_t)channelMask,
1909                    AUDIO_FORMAT_PCM_16_BIT,
1910                    0 /* bufferCount - unused */,
1911                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1912                    mUseAudioCallback ? this : NULL,
1913                    (audio_output_flags_t)pcmFlags,
1914                    NULL,
1915                    doNotReconnect,
1916                    frameCount);
1917        if (err != OK) {
1918            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1919            mAudioSink->close();
1920            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1921            return err;
1922        }
1923        mCurrentPcmInfo = info;
1924        if (!mPaused) { // for preview mode, don't start if paused
1925            mAudioSink->start();
1926        }
1927    }
1928    if (audioSinkChanged) {
1929        onAudioSinkChanged();
1930    }
1931    mAudioTornDown = false;
1932    return OK;
1933}
1934
1935void NuPlayer::Renderer::onCloseAudioSink() {
1936    mAudioSink->close();
1937    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1938    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1939}
1940
1941}  // namespace android
1942
1943