NuPlayerRenderer.cpp revision 9a3101b22b5115717faeac986b43fc6618fd3b30
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <algorithm>
23#include <cutils/properties.h>
24#include <media/stagefright/foundation/ADebug.h>
25#include <media/stagefright/foundation/AMessage.h>
26#include <media/stagefright/foundation/AUtils.h>
27#include <media/stagefright/foundation/AWakeLock.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
30#include <media/stagefright/MetaData.h>
31#include <media/stagefright/Utils.h>
32#include <media/stagefright/VideoFrameScheduler.h>
33#include <media/MediaCodecBuffer.h>
34
35#include <inttypes.h>
36
37namespace android {
38
39/*
40 * Example of common configuration settings in shell script form
41
42   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43   adb shell setprop audio.offload.disable 1
44
45   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46   adb shell setprop audio.offload.video 1
47
48   #Use audio callbacks for PCM data
49   adb shell setprop media.stagefright.audio.cbk 1
50
51   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
52   adb shell setprop media.stagefright.audio.deep 1
53
54   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
55   adb shell setprop media.stagefright.audio.sink 1000
56
57 * These configurations take effect for the next track played (not the current track).
58 */
59
60static inline bool getUseAudioCallbackSetting() {
61    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
62}
63
64static inline int32_t getAudioSinkPcmMsSetting() {
65    return property_get_int32(
66            "media.stagefright.audio.sink", 500 /* default_value */);
67}
68
69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
70// is closed to allow the audio DSP to power down.
71static const int64_t kOffloadPauseMaxUs = 10000000ll;
72
73// Maximum allowed delay from AudioSink, 1.5 seconds.
74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll;
75
76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000;
77
78// static
79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
80        AUDIO_CHANNEL_NONE,
81        AUDIO_OUTPUT_FLAG_NONE,
82        AUDIO_FORMAT_INVALID,
83        0, // mNumChannels
84        0 // mSampleRate
85};
86
87// static
88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
89
90NuPlayer::Renderer::Renderer(
91        const sp<MediaPlayerBase::AudioSink> &sink,
92        const sp<AMessage> &notify,
93        uint32_t flags)
94    : mAudioSink(sink),
95      mUseVirtualAudioSink(false),
96      mNotify(notify),
97      mFlags(flags),
98      mNumFramesWritten(0),
99      mDrainAudioQueuePending(false),
100      mDrainVideoQueuePending(false),
101      mAudioQueueGeneration(0),
102      mVideoQueueGeneration(0),
103      mAudioDrainGeneration(0),
104      mVideoDrainGeneration(0),
105      mAudioEOSGeneration(0),
106      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
107      mAudioFirstAnchorTimeMediaUs(-1),
108      mAnchorTimeMediaUs(-1),
109      mAnchorNumFramesWritten(-1),
110      mVideoLateByUs(0ll),
111      mHasAudio(false),
112      mHasVideo(false),
113      mNotifyCompleteAudio(false),
114      mNotifyCompleteVideo(false),
115      mSyncQueues(false),
116      mPaused(false),
117      mPauseDrainAudioAllowedUs(0),
118      mVideoSampleReceived(false),
119      mVideoRenderingStarted(false),
120      mVideoRenderingStartGeneration(0),
121      mAudioRenderingStartGeneration(0),
122      mRenderingDataDelivered(false),
123      mNextAudioClockUpdateTimeUs(-1),
124      mLastAudioMediaTimeUs(-1),
125      mAudioOffloadPauseTimeoutGeneration(0),
126      mAudioTornDown(false),
127      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
128      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
129      mTotalBuffersQueued(0),
130      mLastAudioBufferDrained(0),
131      mUseAudioCallback(false),
132      mWakeLock(new AWakeLock()) {
133    mMediaClock = new MediaClock;
134    mPlaybackRate = mPlaybackSettings.mSpeed;
135    mMediaClock->setPlaybackRate(mPlaybackRate);
136}
137
138NuPlayer::Renderer::~Renderer() {
139    if (offloadingAudio()) {
140        mAudioSink->stop();
141        mAudioSink->flush();
142        mAudioSink->close();
143    }
144
145    // Try to avoid racing condition in case callback is still on.
146    Mutex::Autolock autoLock(mLock);
147    if (mUseAudioCallback) {
148        flushQueue(&mAudioQueue);
149        flushQueue(&mVideoQueue);
150    }
151    mWakeLock.clear();
152    mMediaClock.clear();
153    mVideoScheduler.clear();
154    mNotify.clear();
155    mAudioSink.clear();
156}
157
158void NuPlayer::Renderer::queueBuffer(
159        bool audio,
160        const sp<MediaCodecBuffer> &buffer,
161        const sp<AMessage> &notifyConsumed) {
162    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
163    msg->setInt32("queueGeneration", getQueueGeneration(audio));
164    msg->setInt32("audio", static_cast<int32_t>(audio));
165    msg->setObject("buffer", buffer);
166    msg->setMessage("notifyConsumed", notifyConsumed);
167    msg->post();
168}
169
170void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
171    CHECK_NE(finalResult, (status_t)OK);
172
173    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
174    msg->setInt32("queueGeneration", getQueueGeneration(audio));
175    msg->setInt32("audio", static_cast<int32_t>(audio));
176    msg->setInt32("finalResult", finalResult);
177    msg->post();
178}
179
180status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
181    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
182    writeToAMessage(msg, rate);
183    sp<AMessage> response;
184    status_t err = msg->postAndAwaitResponse(&response);
185    if (err == OK && response != NULL) {
186        CHECK(response->findInt32("err", &err));
187    }
188    return err;
189}
190
191status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
192    if (rate.mSpeed == 0.f) {
193        onPause();
194        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
195        // have to correspond to the any non-0 speed (e.g old speed). Keep
196        // settings nonetheless, using the old speed, in case audiosink changes.
197        AudioPlaybackRate newRate = rate;
198        newRate.mSpeed = mPlaybackSettings.mSpeed;
199        mPlaybackSettings = newRate;
200        return OK;
201    }
202
203    if (mAudioSink != NULL && mAudioSink->ready()) {
204        status_t err = mAudioSink->setPlaybackRate(rate);
205        if (err != OK) {
206            return err;
207        }
208    }
209    mPlaybackSettings = rate;
210    mPlaybackRate = rate.mSpeed;
211    mMediaClock->setPlaybackRate(mPlaybackRate);
212    return OK;
213}
214
215status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
216    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
217    sp<AMessage> response;
218    status_t err = msg->postAndAwaitResponse(&response);
219    if (err == OK && response != NULL) {
220        CHECK(response->findInt32("err", &err));
221        if (err == OK) {
222            readFromAMessage(response, rate);
223        }
224    }
225    return err;
226}
227
228status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
229    if (mAudioSink != NULL && mAudioSink->ready()) {
230        status_t err = mAudioSink->getPlaybackRate(rate);
231        if (err == OK) {
232            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
233                ALOGW("correcting mismatch in internal/external playback rate");
234            }
235            // get playback settings used by audiosink, as it may be
236            // slightly off due to audiosink not taking small changes.
237            mPlaybackSettings = *rate;
238            if (mPaused) {
239                rate->mSpeed = 0.f;
240            }
241        }
242        return err;
243    }
244    *rate = mPlaybackSettings;
245    return OK;
246}
247
248status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
249    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
250    writeToAMessage(msg, sync, videoFpsHint);
251    sp<AMessage> response;
252    status_t err = msg->postAndAwaitResponse(&response);
253    if (err == OK && response != NULL) {
254        CHECK(response->findInt32("err", &err));
255    }
256    return err;
257}
258
259status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
260    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
261        return BAD_VALUE;
262    }
263    // TODO: support sync sources
264    return INVALID_OPERATION;
265}
266
267status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
268    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
269    sp<AMessage> response;
270    status_t err = msg->postAndAwaitResponse(&response);
271    if (err == OK && response != NULL) {
272        CHECK(response->findInt32("err", &err));
273        if (err == OK) {
274            readFromAMessage(response, sync, videoFps);
275        }
276    }
277    return err;
278}
279
280status_t NuPlayer::Renderer::onGetSyncSettings(
281        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
282    *sync = mSyncSettings;
283    *videoFps = -1.f;
284    return OK;
285}
286
287void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
288    {
289        Mutex::Autolock autoLock(mLock);
290        if (audio) {
291            mNotifyCompleteAudio |= notifyComplete;
292            clearAudioFirstAnchorTime_l();
293            ++mAudioQueueGeneration;
294            ++mAudioDrainGeneration;
295        } else {
296            mNotifyCompleteVideo |= notifyComplete;
297            ++mVideoQueueGeneration;
298            ++mVideoDrainGeneration;
299        }
300
301        mMediaClock->clearAnchor();
302        mVideoLateByUs = 0;
303        mSyncQueues = false;
304    }
305
306    sp<AMessage> msg = new AMessage(kWhatFlush, this);
307    msg->setInt32("audio", static_cast<int32_t>(audio));
308    msg->post();
309}
310
311void NuPlayer::Renderer::signalTimeDiscontinuity() {
312}
313
314void NuPlayer::Renderer::signalDisableOffloadAudio() {
315    (new AMessage(kWhatDisableOffloadAudio, this))->post();
316}
317
318void NuPlayer::Renderer::signalEnableOffloadAudio() {
319    (new AMessage(kWhatEnableOffloadAudio, this))->post();
320}
321
322void NuPlayer::Renderer::pause() {
323    (new AMessage(kWhatPause, this))->post();
324}
325
326void NuPlayer::Renderer::resume() {
327    (new AMessage(kWhatResume, this))->post();
328}
329
330void NuPlayer::Renderer::setVideoFrameRate(float fps) {
331    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
332    msg->setFloat("frame-rate", fps);
333    msg->post();
334}
335
336// Called on any threads without mLock acquired.
337status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
338    status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
339    if (result == OK) {
340        return result;
341    }
342
343    // MediaClock has not started yet. Try to start it if possible.
344    {
345        Mutex::Autolock autoLock(mLock);
346        if (mAudioFirstAnchorTimeMediaUs == -1) {
347            return result;
348        }
349
350        AudioTimestamp ts;
351        status_t res = mAudioSink->getTimestamp(ts);
352        if (res != OK) {
353            return result;
354        }
355
356        // AudioSink has rendered some frames.
357        int64_t nowUs = ALooper::GetNowUs();
358        int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs)
359                + mAudioFirstAnchorTimeMediaUs;
360        mMediaClock->updateAnchor(nowMediaUs, nowUs, -1);
361    }
362
363    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
364}
365
366void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
367    mAudioFirstAnchorTimeMediaUs = -1;
368    mMediaClock->setStartingTimeMedia(-1);
369}
370
371void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
372    if (mAudioFirstAnchorTimeMediaUs == -1) {
373        mAudioFirstAnchorTimeMediaUs = mediaUs;
374        mMediaClock->setStartingTimeMedia(mediaUs);
375    }
376}
377
378// Called on renderer looper.
379void NuPlayer::Renderer::clearAnchorTime() {
380    mMediaClock->clearAnchor();
381    mAnchorTimeMediaUs = -1;
382    mAnchorNumFramesWritten = -1;
383}
384
385void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
386    Mutex::Autolock autoLock(mLock);
387    mVideoLateByUs = lateUs;
388}
389
390int64_t NuPlayer::Renderer::getVideoLateByUs() {
391    Mutex::Autolock autoLock(mLock);
392    return mVideoLateByUs;
393}
394
395status_t NuPlayer::Renderer::openAudioSink(
396        const sp<AMessage> &format,
397        bool offloadOnly,
398        bool hasVideo,
399        uint32_t flags,
400        bool *isOffloaded) {
401    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
402    msg->setMessage("format", format);
403    msg->setInt32("offload-only", offloadOnly);
404    msg->setInt32("has-video", hasVideo);
405    msg->setInt32("flags", flags);
406
407    sp<AMessage> response;
408    msg->postAndAwaitResponse(&response);
409
410    int32_t err;
411    if (!response->findInt32("err", &err)) {
412        err = INVALID_OPERATION;
413    } else if (err == OK && isOffloaded != NULL) {
414        int32_t offload;
415        CHECK(response->findInt32("offload", &offload));
416        *isOffloaded = (offload != 0);
417    }
418    return err;
419}
420
421void NuPlayer::Renderer::closeAudioSink() {
422    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
423
424    sp<AMessage> response;
425    msg->postAndAwaitResponse(&response);
426}
427
428void NuPlayer::Renderer::changeAudioFormat(
429        const sp<AMessage> &format,
430        bool offloadOnly,
431        bool hasVideo,
432        uint32_t flags,
433        const sp<AMessage> &notify) {
434    sp<AMessage> meta = new AMessage;
435    meta->setMessage("format", format);
436    meta->setInt32("offload-only", offloadOnly);
437    meta->setInt32("has-video", hasVideo);
438    meta->setInt32("flags", flags);
439
440    sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this);
441    msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */));
442    msg->setMessage("notify", notify);
443    msg->setMessage("meta", meta);
444    msg->post();
445}
446
447void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
448    switch (msg->what()) {
449        case kWhatOpenAudioSink:
450        {
451            sp<AMessage> format;
452            CHECK(msg->findMessage("format", &format));
453
454            int32_t offloadOnly;
455            CHECK(msg->findInt32("offload-only", &offloadOnly));
456
457            int32_t hasVideo;
458            CHECK(msg->findInt32("has-video", &hasVideo));
459
460            uint32_t flags;
461            CHECK(msg->findInt32("flags", (int32_t *)&flags));
462
463            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
464
465            sp<AMessage> response = new AMessage;
466            response->setInt32("err", err);
467            response->setInt32("offload", offloadingAudio());
468
469            sp<AReplyToken> replyID;
470            CHECK(msg->senderAwaitsResponse(&replyID));
471            response->postReply(replyID);
472
473            break;
474        }
475
476        case kWhatCloseAudioSink:
477        {
478            sp<AReplyToken> replyID;
479            CHECK(msg->senderAwaitsResponse(&replyID));
480
481            onCloseAudioSink();
482
483            sp<AMessage> response = new AMessage;
484            response->postReply(replyID);
485            break;
486        }
487
488        case kWhatStopAudioSink:
489        {
490            mAudioSink->stop();
491            break;
492        }
493
494        case kWhatChangeAudioFormat:
495        {
496            int32_t queueGeneration;
497            CHECK(msg->findInt32("queueGeneration", &queueGeneration));
498
499            sp<AMessage> notify;
500            CHECK(msg->findMessage("notify", &notify));
501
502            if (offloadingAudio()) {
503                ALOGW("changeAudioFormat should NOT be called in offload mode");
504                notify->setInt32("err", INVALID_OPERATION);
505                notify->post();
506                break;
507            }
508
509            sp<AMessage> meta;
510            CHECK(msg->findMessage("meta", &meta));
511
512            if (queueGeneration != getQueueGeneration(true /* audio */)
513                    || mAudioQueue.empty()) {
514                onChangeAudioFormat(meta, notify);
515                break;
516            }
517
518            QueueEntry entry;
519            entry.mNotifyConsumed = notify;
520            entry.mMeta = meta;
521
522            Mutex::Autolock autoLock(mLock);
523            mAudioQueue.push_back(entry);
524            postDrainAudioQueue_l();
525
526            break;
527        }
528
529        case kWhatDrainAudioQueue:
530        {
531            mDrainAudioQueuePending = false;
532
533            int32_t generation;
534            CHECK(msg->findInt32("drainGeneration", &generation));
535            if (generation != getDrainGeneration(true /* audio */)) {
536                break;
537            }
538
539            if (onDrainAudioQueue()) {
540                uint32_t numFramesPlayed;
541                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
542                         (status_t)OK);
543
544                uint32_t numFramesPendingPlayout =
545                    mNumFramesWritten - numFramesPlayed;
546
547                // This is how long the audio sink will have data to
548                // play back.
549                int64_t delayUs =
550                    mAudioSink->msecsPerFrame()
551                        * numFramesPendingPlayout * 1000ll;
552                if (mPlaybackRate > 1.0f) {
553                    delayUs /= mPlaybackRate;
554                }
555
556                // Let's give it more data after about half that time
557                // has elapsed.
558                delayUs /= 2;
559                // check the buffer size to estimate maximum delay permitted.
560                const int64_t maxDrainDelayUs = std::max(
561                        mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */);
562                ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld",
563                        (long long)delayUs, (long long)maxDrainDelayUs);
564                Mutex::Autolock autoLock(mLock);
565                postDrainAudioQueue_l(delayUs);
566            }
567            break;
568        }
569
570        case kWhatDrainVideoQueue:
571        {
572            int32_t generation;
573            CHECK(msg->findInt32("drainGeneration", &generation));
574            if (generation != getDrainGeneration(false /* audio */)) {
575                break;
576            }
577
578            mDrainVideoQueuePending = false;
579
580            onDrainVideoQueue();
581
582            postDrainVideoQueue();
583            break;
584        }
585
586        case kWhatPostDrainVideoQueue:
587        {
588            int32_t generation;
589            CHECK(msg->findInt32("drainGeneration", &generation));
590            if (generation != getDrainGeneration(false /* audio */)) {
591                break;
592            }
593
594            mDrainVideoQueuePending = false;
595            postDrainVideoQueue();
596            break;
597        }
598
599        case kWhatQueueBuffer:
600        {
601            onQueueBuffer(msg);
602            break;
603        }
604
605        case kWhatQueueEOS:
606        {
607            onQueueEOS(msg);
608            break;
609        }
610
611        case kWhatEOS:
612        {
613            int32_t generation;
614            CHECK(msg->findInt32("audioEOSGeneration", &generation));
615            if (generation != mAudioEOSGeneration) {
616                break;
617            }
618            status_t finalResult;
619            CHECK(msg->findInt32("finalResult", &finalResult));
620            notifyEOS(true /* audio */, finalResult);
621            break;
622        }
623
624        case kWhatConfigPlayback:
625        {
626            sp<AReplyToken> replyID;
627            CHECK(msg->senderAwaitsResponse(&replyID));
628            AudioPlaybackRate rate;
629            readFromAMessage(msg, &rate);
630            status_t err = onConfigPlayback(rate);
631            sp<AMessage> response = new AMessage;
632            response->setInt32("err", err);
633            response->postReply(replyID);
634            break;
635        }
636
637        case kWhatGetPlaybackSettings:
638        {
639            sp<AReplyToken> replyID;
640            CHECK(msg->senderAwaitsResponse(&replyID));
641            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
642            status_t err = onGetPlaybackSettings(&rate);
643            sp<AMessage> response = new AMessage;
644            if (err == OK) {
645                writeToAMessage(response, rate);
646            }
647            response->setInt32("err", err);
648            response->postReply(replyID);
649            break;
650        }
651
652        case kWhatConfigSync:
653        {
654            sp<AReplyToken> replyID;
655            CHECK(msg->senderAwaitsResponse(&replyID));
656            AVSyncSettings sync;
657            float videoFpsHint;
658            readFromAMessage(msg, &sync, &videoFpsHint);
659            status_t err = onConfigSync(sync, videoFpsHint);
660            sp<AMessage> response = new AMessage;
661            response->setInt32("err", err);
662            response->postReply(replyID);
663            break;
664        }
665
666        case kWhatGetSyncSettings:
667        {
668            sp<AReplyToken> replyID;
669            CHECK(msg->senderAwaitsResponse(&replyID));
670
671            ALOGV("kWhatGetSyncSettings");
672            AVSyncSettings sync;
673            float videoFps = -1.f;
674            status_t err = onGetSyncSettings(&sync, &videoFps);
675            sp<AMessage> response = new AMessage;
676            if (err == OK) {
677                writeToAMessage(response, sync, videoFps);
678            }
679            response->setInt32("err", err);
680            response->postReply(replyID);
681            break;
682        }
683
684        case kWhatFlush:
685        {
686            onFlush(msg);
687            break;
688        }
689
690        case kWhatDisableOffloadAudio:
691        {
692            onDisableOffloadAudio();
693            break;
694        }
695
696        case kWhatEnableOffloadAudio:
697        {
698            onEnableOffloadAudio();
699            break;
700        }
701
702        case kWhatPause:
703        {
704            onPause();
705            break;
706        }
707
708        case kWhatResume:
709        {
710            onResume();
711            break;
712        }
713
714        case kWhatSetVideoFrameRate:
715        {
716            float fps;
717            CHECK(msg->findFloat("frame-rate", &fps));
718            onSetVideoFrameRate(fps);
719            break;
720        }
721
722        case kWhatAudioTearDown:
723        {
724            int32_t reason;
725            CHECK(msg->findInt32("reason", &reason));
726
727            onAudioTearDown((AudioTearDownReason)reason);
728            break;
729        }
730
731        case kWhatAudioOffloadPauseTimeout:
732        {
733            int32_t generation;
734            CHECK(msg->findInt32("drainGeneration", &generation));
735            if (generation != mAudioOffloadPauseTimeoutGeneration) {
736                break;
737            }
738            ALOGV("Audio Offload tear down due to pause timeout.");
739            onAudioTearDown(kDueToTimeout);
740            mWakeLock->release();
741            break;
742        }
743
744        default:
745            TRESPASS();
746            break;
747    }
748}
749
750void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
751    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
752        return;
753    }
754
755    if (mAudioQueue.empty()) {
756        return;
757    }
758
759    // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
760    if (mPaused) {
761        const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
762        if (diffUs > delayUs) {
763            delayUs = diffUs;
764        }
765    }
766
767    mDrainAudioQueuePending = true;
768    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
769    msg->setInt32("drainGeneration", mAudioDrainGeneration);
770    msg->post(delayUs);
771}
772
773void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
774    mAudioRenderingStartGeneration = mAudioDrainGeneration;
775    mVideoRenderingStartGeneration = mVideoDrainGeneration;
776    mRenderingDataDelivered = false;
777}
778
779void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
780    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
781        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
782        mRenderingDataDelivered = true;
783        if (mPaused) {
784            return;
785        }
786        mVideoRenderingStartGeneration = -1;
787        mAudioRenderingStartGeneration = -1;
788
789        sp<AMessage> notify = mNotify->dup();
790        notify->setInt32("what", kWhatMediaRenderingStart);
791        notify->post();
792    }
793}
794
795// static
796size_t NuPlayer::Renderer::AudioSinkCallback(
797        MediaPlayerBase::AudioSink * /* audioSink */,
798        void *buffer,
799        size_t size,
800        void *cookie,
801        MediaPlayerBase::AudioSink::cb_event_t event) {
802    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
803
804    switch (event) {
805        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
806        {
807            return me->fillAudioBuffer(buffer, size);
808            break;
809        }
810
811        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
812        {
813            ALOGV("AudioSink::CB_EVENT_STREAM_END");
814            me->notifyEOSCallback();
815            break;
816        }
817
818        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
819        {
820            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
821            me->notifyAudioTearDown(kDueToError);
822            break;
823        }
824    }
825
826    return 0;
827}
828
829void NuPlayer::Renderer::notifyEOSCallback() {
830    Mutex::Autolock autoLock(mLock);
831
832    if (!mUseAudioCallback) {
833        return;
834    }
835
836    notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
837}
838
839size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
840    Mutex::Autolock autoLock(mLock);
841
842    if (!mUseAudioCallback) {
843        return 0;
844    }
845
846    bool hasEOS = false;
847
848    size_t sizeCopied = 0;
849    bool firstEntry = true;
850    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
851    while (sizeCopied < size && !mAudioQueue.empty()) {
852        entry = &*mAudioQueue.begin();
853
854        if (entry->mBuffer == NULL) { // EOS
855            hasEOS = true;
856            mAudioQueue.erase(mAudioQueue.begin());
857            break;
858        }
859
860        if (firstEntry && entry->mOffset == 0) {
861            firstEntry = false;
862            int64_t mediaTimeUs;
863            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
864            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
865            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
866        }
867
868        size_t copy = entry->mBuffer->size() - entry->mOffset;
869        size_t sizeRemaining = size - sizeCopied;
870        if (copy > sizeRemaining) {
871            copy = sizeRemaining;
872        }
873
874        memcpy((char *)buffer + sizeCopied,
875               entry->mBuffer->data() + entry->mOffset,
876               copy);
877
878        entry->mOffset += copy;
879        if (entry->mOffset == entry->mBuffer->size()) {
880            entry->mNotifyConsumed->post();
881            mAudioQueue.erase(mAudioQueue.begin());
882            entry = NULL;
883        }
884        sizeCopied += copy;
885
886        notifyIfMediaRenderingStarted_l();
887    }
888
889    if (mAudioFirstAnchorTimeMediaUs >= 0) {
890        int64_t nowUs = ALooper::GetNowUs();
891        int64_t nowMediaUs =
892            mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs);
893        // we don't know how much data we are queueing for offloaded tracks.
894        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
895    }
896
897    // for non-offloaded audio, we need to compute the frames written because
898    // there is no EVENT_STREAM_END notification. The frames written gives
899    // an estimate on the pending played out duration.
900    if (!offloadingAudio()) {
901        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
902    }
903
904    if (hasEOS) {
905        (new AMessage(kWhatStopAudioSink, this))->post();
906        // As there is currently no EVENT_STREAM_END callback notification for
907        // non-offloaded audio tracks, we need to post the EOS ourselves.
908        if (!offloadingAudio()) {
909            int64_t postEOSDelayUs = 0;
910            if (mAudioSink->needsTrailingPadding()) {
911                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
912            }
913            ALOGV("fillAudioBuffer: notifyEOS "
914                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
915                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
916            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
917        }
918    }
919    return sizeCopied;
920}
921
922void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
923    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
924    bool foundEOS = false;
925    while (it != mAudioQueue.end()) {
926        int32_t eos;
927        QueueEntry *entry = &*it++;
928        if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr)
929                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
930            itEOS = it;
931            foundEOS = true;
932        }
933    }
934
935    if (foundEOS) {
936        // post all replies before EOS and drop the samples
937        for (it = mAudioQueue.begin(); it != itEOS; it++) {
938            if (it->mBuffer == nullptr) {
939                if (it->mNotifyConsumed == nullptr) {
940                    // delay doesn't matter as we don't even have an AudioTrack
941                    notifyEOS(true /* audio */, it->mFinalResult);
942                } else {
943                    // TAG for re-opening audio sink.
944                    onChangeAudioFormat(it->mMeta, it->mNotifyConsumed);
945                }
946            } else {
947                it->mNotifyConsumed->post();
948            }
949        }
950        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
951    }
952}
953
954bool NuPlayer::Renderer::onDrainAudioQueue() {
955    // do not drain audio during teardown as queued buffers may be invalid.
956    if (mAudioTornDown) {
957        return false;
958    }
959    // TODO: This call to getPosition checks if AudioTrack has been created
960    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
961    // CHECKs on getPosition will fail.
962    // We still need to figure out why AudioTrack is not created when
963    // this function is called. One possible reason could be leftover
964    // audio. Another possible place is to check whether decoder
965    // has received INFO_FORMAT_CHANGED as the first buffer since
966    // AudioSink is opened there, and possible interactions with flush
967    // immediately after start. Investigate error message
968    // "vorbis_dsp_synthesis returned -135", along with RTSP.
969    uint32_t numFramesPlayed;
970    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
971        // When getPosition fails, renderer will not reschedule the draining
972        // unless new samples are queued.
973        // If we have pending EOS (or "eos" marker for discontinuities), we need
974        // to post these now as NuPlayerDecoder might be waiting for it.
975        drainAudioQueueUntilLastEOS();
976
977        ALOGW("onDrainAudioQueue(): audio sink is not ready");
978        return false;
979    }
980
981#if 0
982    ssize_t numFramesAvailableToWrite =
983        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
984
985    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
986        ALOGI("audio sink underrun");
987    } else {
988        ALOGV("audio queue has %d frames left to play",
989             mAudioSink->frameCount() - numFramesAvailableToWrite);
990    }
991#endif
992
993    uint32_t prevFramesWritten = mNumFramesWritten;
994    while (!mAudioQueue.empty()) {
995        QueueEntry *entry = &*mAudioQueue.begin();
996
997        if (entry->mBuffer == NULL) {
998            if (entry->mNotifyConsumed != nullptr) {
999                // TAG for re-open audio sink.
1000                onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1001                mAudioQueue.erase(mAudioQueue.begin());
1002                continue;
1003            }
1004
1005            // EOS
1006            int64_t postEOSDelayUs = 0;
1007            if (mAudioSink->needsTrailingPadding()) {
1008                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
1009            }
1010            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
1011            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1012
1013            mAudioQueue.erase(mAudioQueue.begin());
1014            entry = NULL;
1015            if (mAudioSink->needsTrailingPadding()) {
1016                // If we're not in gapless playback (i.e. through setNextPlayer), we
1017                // need to stop the track here, because that will play out the last
1018                // little bit at the end of the file. Otherwise short files won't play.
1019                mAudioSink->stop();
1020                mNumFramesWritten = 0;
1021            }
1022            return false;
1023        }
1024
1025        mLastAudioBufferDrained = entry->mBufferOrdinal;
1026
1027        // ignore 0-sized buffer which could be EOS marker with no data
1028        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
1029            int64_t mediaTimeUs;
1030            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1031            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
1032                    mediaTimeUs / 1E6);
1033            onNewAudioMediaTime(mediaTimeUs);
1034        }
1035
1036        size_t copy = entry->mBuffer->size() - entry->mOffset;
1037
1038        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
1039                                            copy, false /* blocking */);
1040        if (written < 0) {
1041            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
1042            if (written == WOULD_BLOCK) {
1043                ALOGV("AudioSink write would block when writing %zu bytes", copy);
1044            } else {
1045                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
1046                // This can only happen when AudioSink was opened with doNotReconnect flag set to
1047                // true, in which case the NuPlayer will handle the reconnect.
1048                notifyAudioTearDown(kDueToError);
1049            }
1050            break;
1051        }
1052
1053        entry->mOffset += written;
1054        size_t remainder = entry->mBuffer->size() - entry->mOffset;
1055        if ((ssize_t)remainder < mAudioSink->frameSize()) {
1056            if (remainder > 0) {
1057                ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.",
1058                        remainder);
1059                entry->mOffset += remainder;
1060                copy -= remainder;
1061            }
1062
1063            entry->mNotifyConsumed->post();
1064            mAudioQueue.erase(mAudioQueue.begin());
1065
1066            entry = NULL;
1067        }
1068
1069        size_t copiedFrames = written / mAudioSink->frameSize();
1070        mNumFramesWritten += copiedFrames;
1071
1072        {
1073            Mutex::Autolock autoLock(mLock);
1074            int64_t maxTimeMedia;
1075            maxTimeMedia =
1076                mAnchorTimeMediaUs +
1077                        (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
1078                                * 1000LL * mAudioSink->msecsPerFrame());
1079            mMediaClock->updateMaxTimeMedia(maxTimeMedia);
1080
1081            notifyIfMediaRenderingStarted_l();
1082        }
1083
1084        if (written != (ssize_t)copy) {
1085            // A short count was received from AudioSink::write()
1086            //
1087            // AudioSink write is called in non-blocking mode.
1088            // It may return with a short count when:
1089            //
1090            // 1) Size to be copied is not a multiple of the frame size. Fractional frames are
1091            //    discarded.
1092            // 2) The data to be copied exceeds the available buffer in AudioSink.
1093            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
1094            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
1095
1096            // (Case 1)
1097            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
1098            // needs to fail, as we should not carry over fractional frames between calls.
1099            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
1100
1101            // (Case 2, 3, 4)
1102            // Return early to the caller.
1103            // Beware of calling immediately again as this may busy-loop if you are not careful.
1104            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
1105            break;
1106        }
1107    }
1108
1109    // calculate whether we need to reschedule another write.
1110    bool reschedule = !mAudioQueue.empty()
1111            && (!mPaused
1112                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
1113    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
1114    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
1115    return reschedule;
1116}
1117
1118int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
1119    int32_t sampleRate = offloadingAudio() ?
1120            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
1121    if (sampleRate == 0) {
1122        ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
1123        return 0;
1124    }
1125    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
1126    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
1127}
1128
1129// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
1130int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
1131    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
1132    if (mUseVirtualAudioSink) {
1133        int64_t nowUs = ALooper::GetNowUs();
1134        int64_t mediaUs;
1135        if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) {
1136            return 0ll;
1137        } else {
1138            return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs);
1139        }
1140    }
1141    return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs);
1142}
1143
1144int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
1145    int64_t realUs;
1146    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
1147        // If failed to get current position, e.g. due to audio clock is
1148        // not ready, then just play out video immediately without delay.
1149        return nowUs;
1150    }
1151    return realUs;
1152}
1153
1154void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1155    Mutex::Autolock autoLock(mLock);
1156    // TRICKY: vorbis decoder generates multiple frames with the same
1157    // timestamp, so only update on the first frame with a given timestamp
1158    if (mediaTimeUs == mAnchorTimeMediaUs) {
1159        return;
1160    }
1161    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1162
1163    // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start
1164    if (mNextAudioClockUpdateTimeUs == -1) {
1165        AudioTimestamp ts;
1166        if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) {
1167            mNextAudioClockUpdateTimeUs = 0; // start our clock updates
1168        }
1169    }
1170    int64_t nowUs = ALooper::GetNowUs();
1171    if (mNextAudioClockUpdateTimeUs >= 0) {
1172        if (nowUs >= mNextAudioClockUpdateTimeUs) {
1173            int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1174            mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1175            mUseVirtualAudioSink = false;
1176            mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs;
1177        }
1178    } else {
1179        int64_t unused;
1180        if ((mMediaClock->getMediaTime(nowUs, &unused) != OK)
1181                && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten)
1182                        > kMaxAllowedAudioSinkDelayUs)) {
1183            // Enough data has been sent to AudioSink, but AudioSink has not rendered
1184            // any data yet. Something is wrong with AudioSink, e.g., the device is not
1185            // connected to audio out.
1186            // Switch to system clock. This essentially creates a virtual AudioSink with
1187            // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten).
1188            // This virtual AudioSink renders audio data starting from the very first sample
1189            // and it's paced by system clock.
1190            ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock.");
1191            mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs);
1192            mUseVirtualAudioSink = true;
1193        }
1194    }
1195    mAnchorNumFramesWritten = mNumFramesWritten;
1196    mAnchorTimeMediaUs = mediaTimeUs;
1197}
1198
1199// Called without mLock acquired.
1200void NuPlayer::Renderer::postDrainVideoQueue() {
1201    if (mDrainVideoQueuePending
1202            || getSyncQueues()
1203            || (mPaused && mVideoSampleReceived)) {
1204        return;
1205    }
1206
1207    if (mVideoQueue.empty()) {
1208        return;
1209    }
1210
1211    QueueEntry &entry = *mVideoQueue.begin();
1212
1213    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1214    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1215
1216    if (entry.mBuffer == NULL) {
1217        // EOS doesn't carry a timestamp.
1218        msg->post();
1219        mDrainVideoQueuePending = true;
1220        return;
1221    }
1222
1223    bool needRepostDrainVideoQueue = false;
1224    int64_t delayUs;
1225    int64_t nowUs = ALooper::GetNowUs();
1226    int64_t realTimeUs;
1227    if (mFlags & FLAG_REAL_TIME) {
1228        int64_t mediaTimeUs;
1229        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1230        realTimeUs = mediaTimeUs;
1231    } else {
1232        int64_t mediaTimeUs;
1233        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1234
1235        {
1236            Mutex::Autolock autoLock(mLock);
1237            if (mAnchorTimeMediaUs < 0) {
1238                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1239                mAnchorTimeMediaUs = mediaTimeUs;
1240                realTimeUs = nowUs;
1241            } else if (!mVideoSampleReceived) {
1242                // Always render the first video frame.
1243                realTimeUs = nowUs;
1244            } else if (mAudioFirstAnchorTimeMediaUs < 0
1245                || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) {
1246                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1247            } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) {
1248                needRepostDrainVideoQueue = true;
1249                realTimeUs = nowUs;
1250            } else {
1251                realTimeUs = nowUs;
1252            }
1253        }
1254        if (!mHasAudio) {
1255            // smooth out videos >= 10fps
1256            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1257        }
1258
1259        // Heuristics to handle situation when media time changed without a
1260        // discontinuity. If we have not drained an audio buffer that was
1261        // received after this buffer, repost in 10 msec. Otherwise repost
1262        // in 500 msec.
1263        delayUs = realTimeUs - nowUs;
1264        int64_t postDelayUs = -1;
1265        if (delayUs > 500000) {
1266            postDelayUs = 500000;
1267            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1268                postDelayUs = 10000;
1269            }
1270        } else if (needRepostDrainVideoQueue) {
1271            // CHECK(mPlaybackRate > 0);
1272            // CHECK(mAudioFirstAnchorTimeMediaUs >= 0);
1273            // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0);
1274            postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs;
1275            postDelayUs /= mPlaybackRate;
1276        }
1277
1278        if (postDelayUs >= 0) {
1279            msg->setWhat(kWhatPostDrainVideoQueue);
1280            msg->post(postDelayUs);
1281            mVideoScheduler->restart();
1282            ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock,"
1283                    " retrying in %dms",
1284                    (int)(delayUs / 1000), (long long)mediaTimeUs,
1285                    (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000));
1286            mDrainVideoQueuePending = true;
1287            return;
1288        }
1289    }
1290
1291    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1292    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1293
1294    delayUs = realTimeUs - nowUs;
1295
1296    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1297    // post 2 display refreshes before rendering is due
1298    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1299
1300    mDrainVideoQueuePending = true;
1301}
1302
1303void NuPlayer::Renderer::onDrainVideoQueue() {
1304    if (mVideoQueue.empty()) {
1305        return;
1306    }
1307
1308    QueueEntry *entry = &*mVideoQueue.begin();
1309
1310    if (entry->mBuffer == NULL) {
1311        // EOS
1312
1313        notifyEOS(false /* audio */, entry->mFinalResult);
1314
1315        mVideoQueue.erase(mVideoQueue.begin());
1316        entry = NULL;
1317
1318        setVideoLateByUs(0);
1319        return;
1320    }
1321
1322    int64_t nowUs = ALooper::GetNowUs();
1323    int64_t realTimeUs;
1324    int64_t mediaTimeUs = -1;
1325    if (mFlags & FLAG_REAL_TIME) {
1326        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1327    } else {
1328        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1329
1330        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1331    }
1332
1333    bool tooLate = false;
1334
1335    if (!mPaused) {
1336        setVideoLateByUs(nowUs - realTimeUs);
1337        tooLate = (mVideoLateByUs > 40000);
1338
1339        if (tooLate) {
1340            ALOGV("video late by %lld us (%.2f secs)",
1341                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1342        } else {
1343            int64_t mediaUs = 0;
1344            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1345            ALOGV("rendering video at media time %.2f secs",
1346                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1347                    mediaUs) / 1E6);
1348
1349            if (!(mFlags & FLAG_REAL_TIME)
1350                    && mLastAudioMediaTimeUs != -1
1351                    && mediaTimeUs > mLastAudioMediaTimeUs) {
1352                // If audio ends before video, video continues to drive media clock.
1353                // Also smooth out videos >= 10fps.
1354                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1355            }
1356        }
1357    } else {
1358        setVideoLateByUs(0);
1359        if (!mVideoSampleReceived && !mHasAudio) {
1360            // This will ensure that the first frame after a flush won't be used as anchor
1361            // when renderer is in paused state, because resume can happen any time after seek.
1362            clearAnchorTime();
1363        }
1364    }
1365
1366    // Always render the first video frame while keeping stats on A/V sync.
1367    if (!mVideoSampleReceived) {
1368        realTimeUs = nowUs;
1369        tooLate = false;
1370    }
1371
1372    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1373    entry->mNotifyConsumed->setInt32("render", !tooLate);
1374    entry->mNotifyConsumed->post();
1375    mVideoQueue.erase(mVideoQueue.begin());
1376    entry = NULL;
1377
1378    mVideoSampleReceived = true;
1379
1380    if (!mPaused) {
1381        if (!mVideoRenderingStarted) {
1382            mVideoRenderingStarted = true;
1383            notifyVideoRenderingStart();
1384        }
1385        Mutex::Autolock autoLock(mLock);
1386        notifyIfMediaRenderingStarted_l();
1387    }
1388}
1389
1390void NuPlayer::Renderer::notifyVideoRenderingStart() {
1391    sp<AMessage> notify = mNotify->dup();
1392    notify->setInt32("what", kWhatVideoRenderingStart);
1393    notify->post();
1394}
1395
1396void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1397    if (audio && delayUs > 0) {
1398        sp<AMessage> msg = new AMessage(kWhatEOS, this);
1399        msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1400        msg->setInt32("finalResult", finalResult);
1401        msg->post(delayUs);
1402        return;
1403    }
1404    sp<AMessage> notify = mNotify->dup();
1405    notify->setInt32("what", kWhatEOS);
1406    notify->setInt32("audio", static_cast<int32_t>(audio));
1407    notify->setInt32("finalResult", finalResult);
1408    notify->post(delayUs);
1409}
1410
1411void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) {
1412    sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this);
1413    msg->setInt32("reason", reason);
1414    msg->post();
1415}
1416
1417void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1418    int32_t audio;
1419    CHECK(msg->findInt32("audio", &audio));
1420
1421    if (dropBufferIfStale(audio, msg)) {
1422        return;
1423    }
1424
1425    if (audio) {
1426        mHasAudio = true;
1427    } else {
1428        mHasVideo = true;
1429    }
1430
1431    if (mHasVideo) {
1432        if (mVideoScheduler == NULL) {
1433            mVideoScheduler = new VideoFrameScheduler();
1434            mVideoScheduler->init();
1435        }
1436    }
1437
1438    sp<RefBase> obj;
1439    CHECK(msg->findObject("buffer", &obj));
1440    sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get());
1441
1442    sp<AMessage> notifyConsumed;
1443    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1444
1445    QueueEntry entry;
1446    entry.mBuffer = buffer;
1447    entry.mNotifyConsumed = notifyConsumed;
1448    entry.mOffset = 0;
1449    entry.mFinalResult = OK;
1450    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1451
1452    if (audio) {
1453        Mutex::Autolock autoLock(mLock);
1454        mAudioQueue.push_back(entry);
1455        postDrainAudioQueue_l();
1456    } else {
1457        mVideoQueue.push_back(entry);
1458        postDrainVideoQueue();
1459    }
1460
1461    Mutex::Autolock autoLock(mLock);
1462    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1463        return;
1464    }
1465
1466    sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1467    sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1468
1469    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1470        // EOS signalled on either queue.
1471        syncQueuesDone_l();
1472        return;
1473    }
1474
1475    int64_t firstAudioTimeUs;
1476    int64_t firstVideoTimeUs;
1477    CHECK(firstAudioBuffer->meta()
1478            ->findInt64("timeUs", &firstAudioTimeUs));
1479    CHECK(firstVideoBuffer->meta()
1480            ->findInt64("timeUs", &firstVideoTimeUs));
1481
1482    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1483
1484    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1485
1486    if (diff > 100000ll) {
1487        // Audio data starts More than 0.1 secs before video.
1488        // Drop some audio.
1489
1490        (*mAudioQueue.begin()).mNotifyConsumed->post();
1491        mAudioQueue.erase(mAudioQueue.begin());
1492        return;
1493    }
1494
1495    syncQueuesDone_l();
1496}
1497
1498void NuPlayer::Renderer::syncQueuesDone_l() {
1499    if (!mSyncQueues) {
1500        return;
1501    }
1502
1503    mSyncQueues = false;
1504
1505    if (!mAudioQueue.empty()) {
1506        postDrainAudioQueue_l();
1507    }
1508
1509    if (!mVideoQueue.empty()) {
1510        mLock.unlock();
1511        postDrainVideoQueue();
1512        mLock.lock();
1513    }
1514}
1515
1516void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1517    int32_t audio;
1518    CHECK(msg->findInt32("audio", &audio));
1519
1520    if (dropBufferIfStale(audio, msg)) {
1521        return;
1522    }
1523
1524    int32_t finalResult;
1525    CHECK(msg->findInt32("finalResult", &finalResult));
1526
1527    QueueEntry entry;
1528    entry.mOffset = 0;
1529    entry.mFinalResult = finalResult;
1530
1531    if (audio) {
1532        Mutex::Autolock autoLock(mLock);
1533        if (mAudioQueue.empty() && mSyncQueues) {
1534            syncQueuesDone_l();
1535        }
1536        mAudioQueue.push_back(entry);
1537        postDrainAudioQueue_l();
1538    } else {
1539        if (mVideoQueue.empty() && getSyncQueues()) {
1540            Mutex::Autolock autoLock(mLock);
1541            syncQueuesDone_l();
1542        }
1543        mVideoQueue.push_back(entry);
1544        postDrainVideoQueue();
1545    }
1546}
1547
1548void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1549    int32_t audio, notifyComplete;
1550    CHECK(msg->findInt32("audio", &audio));
1551
1552    {
1553        Mutex::Autolock autoLock(mLock);
1554        if (audio) {
1555            notifyComplete = mNotifyCompleteAudio;
1556            mNotifyCompleteAudio = false;
1557            mLastAudioMediaTimeUs = -1;
1558        } else {
1559            notifyComplete = mNotifyCompleteVideo;
1560            mNotifyCompleteVideo = false;
1561        }
1562
1563        // If we're currently syncing the queues, i.e. dropping audio while
1564        // aligning the first audio/video buffer times and only one of the
1565        // two queues has data, we may starve that queue by not requesting
1566        // more buffers from the decoder. If the other source then encounters
1567        // a discontinuity that leads to flushing, we'll never find the
1568        // corresponding discontinuity on the other queue.
1569        // Therefore we'll stop syncing the queues if at least one of them
1570        // is flushed.
1571        syncQueuesDone_l();
1572    }
1573    clearAnchorTime();
1574
1575    ALOGV("flushing %s", audio ? "audio" : "video");
1576    if (audio) {
1577        {
1578            Mutex::Autolock autoLock(mLock);
1579            flushQueue(&mAudioQueue);
1580
1581            ++mAudioDrainGeneration;
1582            ++mAudioEOSGeneration;
1583            prepareForMediaRenderingStart_l();
1584
1585            // the frame count will be reset after flush.
1586            clearAudioFirstAnchorTime_l();
1587        }
1588
1589        mDrainAudioQueuePending = false;
1590
1591        if (offloadingAudio()) {
1592            mAudioSink->pause();
1593            mAudioSink->flush();
1594            if (!mPaused) {
1595                mAudioSink->start();
1596            }
1597        } else {
1598            mAudioSink->pause();
1599            mAudioSink->flush();
1600            // Call stop() to signal to the AudioSink to completely fill the
1601            // internal buffer before resuming playback.
1602            // FIXME: this is ignored after flush().
1603            mAudioSink->stop();
1604            if (mPaused) {
1605                // Race condition: if renderer is paused and audio sink is stopped,
1606                // we need to make sure that the audio track buffer fully drains
1607                // before delivering data.
1608                // FIXME: remove this if we can detect if stop() is complete.
1609                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1610                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1611            } else {
1612                mAudioSink->start();
1613            }
1614            mNumFramesWritten = 0;
1615        }
1616        mNextAudioClockUpdateTimeUs = -1;
1617    } else {
1618        flushQueue(&mVideoQueue);
1619
1620        mDrainVideoQueuePending = false;
1621
1622        if (mVideoScheduler != NULL) {
1623            mVideoScheduler->restart();
1624        }
1625
1626        Mutex::Autolock autoLock(mLock);
1627        ++mVideoDrainGeneration;
1628        prepareForMediaRenderingStart_l();
1629    }
1630
1631    mVideoSampleReceived = false;
1632
1633    if (notifyComplete) {
1634        notifyFlushComplete(audio);
1635    }
1636}
1637
1638void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1639    while (!queue->empty()) {
1640        QueueEntry *entry = &*queue->begin();
1641
1642        if (entry->mBuffer != NULL) {
1643            entry->mNotifyConsumed->post();
1644        } else if (entry->mNotifyConsumed != nullptr) {
1645            // Is it needed to open audio sink now?
1646            onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed);
1647        }
1648
1649        queue->erase(queue->begin());
1650        entry = NULL;
1651    }
1652}
1653
1654void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1655    sp<AMessage> notify = mNotify->dup();
1656    notify->setInt32("what", kWhatFlushComplete);
1657    notify->setInt32("audio", static_cast<int32_t>(audio));
1658    notify->post();
1659}
1660
1661bool NuPlayer::Renderer::dropBufferIfStale(
1662        bool audio, const sp<AMessage> &msg) {
1663    int32_t queueGeneration;
1664    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1665
1666    if (queueGeneration == getQueueGeneration(audio)) {
1667        return false;
1668    }
1669
1670    sp<AMessage> notifyConsumed;
1671    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1672        notifyConsumed->post();
1673    }
1674
1675    return true;
1676}
1677
1678void NuPlayer::Renderer::onAudioSinkChanged() {
1679    if (offloadingAudio()) {
1680        return;
1681    }
1682    CHECK(!mDrainAudioQueuePending);
1683    mNumFramesWritten = 0;
1684    mAnchorNumFramesWritten = -1;
1685    uint32_t written;
1686    if (mAudioSink->getFramesWritten(&written) == OK) {
1687        mNumFramesWritten = written;
1688    }
1689}
1690
1691void NuPlayer::Renderer::onDisableOffloadAudio() {
1692    Mutex::Autolock autoLock(mLock);
1693    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1694    ++mAudioDrainGeneration;
1695    if (mAudioRenderingStartGeneration != -1) {
1696        prepareForMediaRenderingStart_l();
1697    }
1698}
1699
1700void NuPlayer::Renderer::onEnableOffloadAudio() {
1701    Mutex::Autolock autoLock(mLock);
1702    mFlags |= FLAG_OFFLOAD_AUDIO;
1703    ++mAudioDrainGeneration;
1704    if (mAudioRenderingStartGeneration != -1) {
1705        prepareForMediaRenderingStart_l();
1706    }
1707}
1708
1709void NuPlayer::Renderer::onPause() {
1710    if (mPaused) {
1711        return;
1712    }
1713
1714    {
1715        Mutex::Autolock autoLock(mLock);
1716        // we do not increment audio drain generation so that we fill audio buffer during pause.
1717        ++mVideoDrainGeneration;
1718        prepareForMediaRenderingStart_l();
1719        mPaused = true;
1720        mMediaClock->setPlaybackRate(0.0);
1721    }
1722
1723    mDrainAudioQueuePending = false;
1724    mDrainVideoQueuePending = false;
1725
1726    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1727    mAudioSink->pause();
1728    startAudioOffloadPauseTimeout();
1729
1730    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1731          mAudioQueue.size(), mVideoQueue.size());
1732}
1733
1734void NuPlayer::Renderer::onResume() {
1735    if (!mPaused) {
1736        return;
1737    }
1738
1739    // Note: audio data may not have been decoded, and the AudioSink may not be opened.
1740    cancelAudioOffloadPauseTimeout();
1741    if (mAudioSink->ready()) {
1742        status_t err = mAudioSink->start();
1743        if (err != OK) {
1744            ALOGE("cannot start AudioSink err %d", err);
1745            notifyAudioTearDown(kDueToError);
1746        }
1747    }
1748
1749    {
1750        Mutex::Autolock autoLock(mLock);
1751        mPaused = false;
1752        // rendering started message may have been delayed if we were paused.
1753        if (mRenderingDataDelivered) {
1754            notifyIfMediaRenderingStarted_l();
1755        }
1756        // configure audiosink as we did not do it when pausing
1757        if (mAudioSink != NULL && mAudioSink->ready()) {
1758            mAudioSink->setPlaybackRate(mPlaybackSettings);
1759        }
1760
1761        mMediaClock->setPlaybackRate(mPlaybackRate);
1762
1763        if (!mAudioQueue.empty()) {
1764            postDrainAudioQueue_l();
1765        }
1766    }
1767
1768    if (!mVideoQueue.empty()) {
1769        postDrainVideoQueue();
1770    }
1771}
1772
1773void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1774    if (mVideoScheduler == NULL) {
1775        mVideoScheduler = new VideoFrameScheduler();
1776    }
1777    mVideoScheduler->init(fps);
1778}
1779
1780int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1781    Mutex::Autolock autoLock(mLock);
1782    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1783}
1784
1785int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1786    Mutex::Autolock autoLock(mLock);
1787    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1788}
1789
1790bool NuPlayer::Renderer::getSyncQueues() {
1791    Mutex::Autolock autoLock(mLock);
1792    return mSyncQueues;
1793}
1794
1795void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1796    if (mAudioTornDown) {
1797        return;
1798    }
1799    mAudioTornDown = true;
1800
1801    int64_t currentPositionUs;
1802    sp<AMessage> notify = mNotify->dup();
1803    if (getCurrentPosition(&currentPositionUs) == OK) {
1804        notify->setInt64("positionUs", currentPositionUs);
1805    }
1806
1807    mAudioSink->stop();
1808    mAudioSink->flush();
1809
1810    notify->setInt32("what", kWhatAudioTearDown);
1811    notify->setInt32("reason", reason);
1812    notify->post();
1813}
1814
1815void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1816    if (offloadingAudio()) {
1817        mWakeLock->acquire();
1818        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1819        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1820        msg->post(kOffloadPauseMaxUs);
1821    }
1822}
1823
1824void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1825    // We may have called startAudioOffloadPauseTimeout() without
1826    // the AudioSink open and with offloadingAudio enabled.
1827    //
1828    // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless
1829    // we always release the wakelock and increment the pause timeout generation.
1830    //
1831    // Note: The acquired wakelock prevents the device from suspending
1832    // immediately after offload pause (in case a resume happens shortly thereafter).
1833    mWakeLock->release(true);
1834    ++mAudioOffloadPauseTimeoutGeneration;
1835}
1836
1837status_t NuPlayer::Renderer::onOpenAudioSink(
1838        const sp<AMessage> &format,
1839        bool offloadOnly,
1840        bool hasVideo,
1841        uint32_t flags) {
1842    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1843            offloadOnly, offloadingAudio());
1844    bool audioSinkChanged = false;
1845
1846    int32_t numChannels;
1847    CHECK(format->findInt32("channel-count", &numChannels));
1848
1849    int32_t channelMask;
1850    if (!format->findInt32("channel-mask", &channelMask)) {
1851        // signal to the AudioSink to derive the mask from count.
1852        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1853    }
1854
1855    int32_t sampleRate;
1856    CHECK(format->findInt32("sample-rate", &sampleRate));
1857
1858    if (offloadingAudio()) {
1859        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1860        AString mime;
1861        CHECK(format->findString("mime", &mime));
1862        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1863
1864        if (err != OK) {
1865            ALOGE("Couldn't map mime \"%s\" to a valid "
1866                    "audio_format", mime.c_str());
1867            onDisableOffloadAudio();
1868        } else {
1869            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1870                    mime.c_str(), audioFormat);
1871
1872            int avgBitRate = -1;
1873            format->findInt32("bitrate", &avgBitRate);
1874
1875            int32_t aacProfile = -1;
1876            if (audioFormat == AUDIO_FORMAT_AAC
1877                    && format->findInt32("aac-profile", &aacProfile)) {
1878                // Redefine AAC format as per aac profile
1879                mapAACProfileToAudioFormat(
1880                        audioFormat,
1881                        aacProfile);
1882            }
1883
1884            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1885            offloadInfo.duration_us = -1;
1886            format->findInt64(
1887                    "durationUs", &offloadInfo.duration_us);
1888            offloadInfo.sample_rate = sampleRate;
1889            offloadInfo.channel_mask = channelMask;
1890            offloadInfo.format = audioFormat;
1891            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1892            offloadInfo.bit_rate = avgBitRate;
1893            offloadInfo.has_video = hasVideo;
1894            offloadInfo.is_streaming = true;
1895
1896            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1897                ALOGV("openAudioSink: no change in offload mode");
1898                // no change from previous configuration, everything ok.
1899                return OK;
1900            }
1901            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1902
1903            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1904            uint32_t offloadFlags = flags;
1905            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1906            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1907            audioSinkChanged = true;
1908            mAudioSink->close();
1909
1910            err = mAudioSink->open(
1911                    sampleRate,
1912                    numChannels,
1913                    (audio_channel_mask_t)channelMask,
1914                    audioFormat,
1915                    0 /* bufferCount - unused */,
1916                    &NuPlayer::Renderer::AudioSinkCallback,
1917                    this,
1918                    (audio_output_flags_t)offloadFlags,
1919                    &offloadInfo);
1920
1921            if (err == OK) {
1922                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1923            }
1924
1925            if (err == OK) {
1926                // If the playback is offloaded to h/w, we pass
1927                // the HAL some metadata information.
1928                // We don't want to do this for PCM because it
1929                // will be going through the AudioFlinger mixer
1930                // before reaching the hardware.
1931                // TODO
1932                mCurrentOffloadInfo = offloadInfo;
1933                if (!mPaused) { // for preview mode, don't start if paused
1934                    err = mAudioSink->start();
1935                }
1936                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1937            }
1938            if (err != OK) {
1939                // Clean up, fall back to non offload mode.
1940                mAudioSink->close();
1941                onDisableOffloadAudio();
1942                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1943                ALOGV("openAudioSink: offload failed");
1944                if (offloadOnly) {
1945                    notifyAudioTearDown(kForceNonOffload);
1946                }
1947            } else {
1948                mUseAudioCallback = true;  // offload mode transfers data through callback
1949                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1950            }
1951        }
1952    }
1953    if (!offloadOnly && !offloadingAudio()) {
1954        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1955        uint32_t pcmFlags = flags;
1956        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1957
1958        const PcmInfo info = {
1959                (audio_channel_mask_t)channelMask,
1960                (audio_output_flags_t)pcmFlags,
1961                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1962                numChannels,
1963                sampleRate
1964        };
1965        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1966            ALOGV("openAudioSink: no change in pcm mode");
1967            // no change from previous configuration, everything ok.
1968            return OK;
1969        }
1970
1971        audioSinkChanged = true;
1972        mAudioSink->close();
1973        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1974        // Note: It is possible to set up the callback, but not use it to send audio data.
1975        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1976        mUseAudioCallback = getUseAudioCallbackSetting();
1977        if (mUseAudioCallback) {
1978            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1979        }
1980
1981        // Compute the desired buffer size.
1982        // For callback mode, the amount of time before wakeup is about half the buffer size.
1983        const uint32_t frameCount =
1984                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1985
1986        // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
1987        // AudioSink. We don't want this when there's video because it will cause a video seek to
1988        // the previous I frame. But we do want this when there's only audio because it will give
1989        // NuPlayer a chance to switch from non-offload mode to offload mode.
1990        // So we only set doNotReconnect when there's no video.
1991        const bool doNotReconnect = !hasVideo;
1992
1993        // We should always be able to set our playback settings if the sink is closed.
1994        LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK,
1995                "onOpenAudioSink: can't set playback rate on closed sink");
1996        status_t err = mAudioSink->open(
1997                    sampleRate,
1998                    numChannels,
1999                    (audio_channel_mask_t)channelMask,
2000                    AUDIO_FORMAT_PCM_16_BIT,
2001                    0 /* bufferCount - unused */,
2002                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
2003                    mUseAudioCallback ? this : NULL,
2004                    (audio_output_flags_t)pcmFlags,
2005                    NULL,
2006                    doNotReconnect,
2007                    frameCount);
2008        if (err != OK) {
2009            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
2010            mAudioSink->close();
2011            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2012            return err;
2013        }
2014        mCurrentPcmInfo = info;
2015        if (!mPaused) { // for preview mode, don't start if paused
2016            mAudioSink->start();
2017        }
2018    }
2019    if (audioSinkChanged) {
2020        onAudioSinkChanged();
2021    }
2022    mAudioTornDown = false;
2023    return OK;
2024}
2025
2026void NuPlayer::Renderer::onCloseAudioSink() {
2027    mAudioSink->close();
2028    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
2029    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
2030}
2031
2032void NuPlayer::Renderer::onChangeAudioFormat(
2033        const sp<AMessage> &meta, const sp<AMessage> &notify) {
2034    sp<AMessage> format;
2035    CHECK(meta->findMessage("format", &format));
2036
2037    int32_t offloadOnly;
2038    CHECK(meta->findInt32("offload-only", &offloadOnly));
2039
2040    int32_t hasVideo;
2041    CHECK(meta->findInt32("has-video", &hasVideo));
2042
2043    uint32_t flags;
2044    CHECK(meta->findInt32("flags", (int32_t *)&flags));
2045
2046    status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
2047
2048    if (err != OK) {
2049        notify->setInt32("err", err);
2050    }
2051    notify->post();
2052}
2053
2054}  // namespace android
2055
2056