NuPlayerRenderer.cpp revision b03dcb34cd44d77e5fe1559e72323e03c59931db
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <cutils/properties.h>
23#include <media/stagefright/foundation/ABuffer.h>
24#include <media/stagefright/foundation/ADebug.h>
25#include <media/stagefright/foundation/AMessage.h>
26#include <media/stagefright/foundation/AUtils.h>
27#include <media/stagefright/foundation/AWakeLock.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
30#include <media/stagefright/MetaData.h>
31#include <media/stagefright/Utils.h>
32#include <media/stagefright/VideoFrameScheduler.h>
33
34#include <inttypes.h>
35
36namespace android {
37
38/*
39 * Example of common configuration settings in shell script form
40
41   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
42   adb shell setprop audio.offload.disable 1
43
44   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
45   adb shell setprop audio.offload.video 1
46
47   #Use audio callbacks for PCM data
48   adb shell setprop media.stagefright.audio.cbk 1
49
50   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
51   adb shell setprop media.stagefright.audio.deep 1
52
53   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
54   adb shell setprop media.stagefright.audio.sink 1000
55
56 * These configurations take effect for the next track played (not the current track).
57 */
58
59static inline bool getUseAudioCallbackSetting() {
60    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
61}
62
63static inline int32_t getAudioSinkPcmMsSetting() {
64    return property_get_int32(
65            "media.stagefright.audio.sink", 500 /* default_value */);
66}
67
68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
69// is closed to allow the audio DSP to power down.
70static const int64_t kOffloadPauseMaxUs = 10000000ll;
71
72// static
73const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
74        AUDIO_CHANNEL_NONE,
75        AUDIO_OUTPUT_FLAG_NONE,
76        AUDIO_FORMAT_INVALID,
77        0, // mNumChannels
78        0 // mSampleRate
79};
80
81// static
82const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
83
84NuPlayer::Renderer::Renderer(
85        const sp<MediaPlayerBase::AudioSink> &sink,
86        const sp<AMessage> &notify,
87        uint32_t flags)
88    : mAudioSink(sink),
89      mNotify(notify),
90      mFlags(flags),
91      mNumFramesWritten(0),
92      mDrainAudioQueuePending(false),
93      mDrainVideoQueuePending(false),
94      mAudioQueueGeneration(0),
95      mVideoQueueGeneration(0),
96      mAudioDrainGeneration(0),
97      mVideoDrainGeneration(0),
98      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
99      mAudioFirstAnchorTimeMediaUs(-1),
100      mAnchorTimeMediaUs(-1),
101      mAnchorNumFramesWritten(-1),
102      mVideoLateByUs(0ll),
103      mHasAudio(false),
104      mHasVideo(false),
105      mNotifyCompleteAudio(false),
106      mNotifyCompleteVideo(false),
107      mSyncQueues(false),
108      mPaused(false),
109      mPauseDrainAudioAllowedUs(0),
110      mVideoSampleReceived(false),
111      mVideoRenderingStarted(false),
112      mVideoRenderingStartGeneration(0),
113      mAudioRenderingStartGeneration(0),
114      mLastAudioMediaTimeUs(-1),
115      mAudioOffloadPauseTimeoutGeneration(0),
116      mAudioTornDown(false),
117      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
118      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
119      mTotalBuffersQueued(0),
120      mLastAudioBufferDrained(0),
121      mUseAudioCallback(false),
122      mWakeLock(new AWakeLock()) {
123    mMediaClock = new MediaClock;
124    mPlaybackRate = mPlaybackSettings.mSpeed;
125    mMediaClock->setPlaybackRate(mPlaybackRate);
126}
127
128NuPlayer::Renderer::~Renderer() {
129    if (offloadingAudio()) {
130        mAudioSink->stop();
131        mAudioSink->flush();
132        mAudioSink->close();
133    }
134}
135
136void NuPlayer::Renderer::queueBuffer(
137        bool audio,
138        const sp<ABuffer> &buffer,
139        const sp<AMessage> &notifyConsumed) {
140    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
141    msg->setInt32("queueGeneration", getQueueGeneration(audio));
142    msg->setInt32("audio", static_cast<int32_t>(audio));
143    msg->setBuffer("buffer", buffer);
144    msg->setMessage("notifyConsumed", notifyConsumed);
145    msg->post();
146}
147
148void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
149    CHECK_NE(finalResult, (status_t)OK);
150
151    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
152    msg->setInt32("queueGeneration", getQueueGeneration(audio));
153    msg->setInt32("audio", static_cast<int32_t>(audio));
154    msg->setInt32("finalResult", finalResult);
155    msg->post();
156}
157
158status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
159    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
160    writeToAMessage(msg, rate);
161    sp<AMessage> response;
162    status_t err = msg->postAndAwaitResponse(&response);
163    if (err == OK && response != NULL) {
164        CHECK(response->findInt32("err", &err));
165    }
166    return err;
167}
168
169status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
170    if (rate.mSpeed == 0.f) {
171        onPause();
172        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
173        // have to correspond to the any non-0 speed (e.g old speed). Keep
174        // settings nonetheless, using the old speed, in case audiosink changes.
175        AudioPlaybackRate newRate = rate;
176        newRate.mSpeed = mPlaybackSettings.mSpeed;
177        mPlaybackSettings = newRate;
178        return OK;
179    }
180
181    if (mAudioSink != NULL && mAudioSink->ready()) {
182        status_t err = mAudioSink->setPlaybackRate(rate);
183        if (err != OK) {
184            return err;
185        }
186    }
187    mPlaybackSettings = rate;
188    mPlaybackRate = rate.mSpeed;
189    mMediaClock->setPlaybackRate(mPlaybackRate);
190    return OK;
191}
192
193status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
194    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
195    sp<AMessage> response;
196    status_t err = msg->postAndAwaitResponse(&response);
197    if (err == OK && response != NULL) {
198        CHECK(response->findInt32("err", &err));
199        if (err == OK) {
200            readFromAMessage(response, rate);
201        }
202    }
203    return err;
204}
205
206status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
207    if (mAudioSink != NULL && mAudioSink->ready()) {
208        status_t err = mAudioSink->getPlaybackRate(rate);
209        if (err == OK) {
210            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
211                ALOGW("correcting mismatch in internal/external playback rate");
212            }
213            // get playback settings used by audiosink, as it may be
214            // slightly off due to audiosink not taking small changes.
215            mPlaybackSettings = *rate;
216            if (mPaused) {
217                rate->mSpeed = 0.f;
218            }
219        }
220        return err;
221    }
222    *rate = mPlaybackSettings;
223    return OK;
224}
225
226status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
227    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
228    writeToAMessage(msg, sync, videoFpsHint);
229    sp<AMessage> response;
230    status_t err = msg->postAndAwaitResponse(&response);
231    if (err == OK && response != NULL) {
232        CHECK(response->findInt32("err", &err));
233    }
234    return err;
235}
236
237status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
238    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
239        return BAD_VALUE;
240    }
241    // TODO: support sync sources
242    return INVALID_OPERATION;
243}
244
245status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
246    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
247    sp<AMessage> response;
248    status_t err = msg->postAndAwaitResponse(&response);
249    if (err == OK && response != NULL) {
250        CHECK(response->findInt32("err", &err));
251        if (err == OK) {
252            readFromAMessage(response, sync, videoFps);
253        }
254    }
255    return err;
256}
257
258status_t NuPlayer::Renderer::onGetSyncSettings(
259        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
260    *sync = mSyncSettings;
261    *videoFps = -1.f;
262    return OK;
263}
264
265void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
266    {
267        Mutex::Autolock autoLock(mLock);
268        if (audio) {
269            mNotifyCompleteAudio |= notifyComplete;
270            clearAudioFirstAnchorTime_l();
271            ++mAudioQueueGeneration;
272            ++mAudioDrainGeneration;
273        } else {
274            mNotifyCompleteVideo |= notifyComplete;
275            ++mVideoQueueGeneration;
276            ++mVideoDrainGeneration;
277        }
278
279        clearAnchorTime_l();
280        mVideoLateByUs = 0;
281        mSyncQueues = false;
282    }
283
284    sp<AMessage> msg = new AMessage(kWhatFlush, this);
285    msg->setInt32("audio", static_cast<int32_t>(audio));
286    msg->post();
287}
288
289void NuPlayer::Renderer::signalTimeDiscontinuity() {
290}
291
292void NuPlayer::Renderer::signalDisableOffloadAudio() {
293    (new AMessage(kWhatDisableOffloadAudio, this))->post();
294}
295
296void NuPlayer::Renderer::signalEnableOffloadAudio() {
297    (new AMessage(kWhatEnableOffloadAudio, this))->post();
298}
299
300void NuPlayer::Renderer::pause() {
301    (new AMessage(kWhatPause, this))->post();
302}
303
304void NuPlayer::Renderer::resume() {
305    (new AMessage(kWhatResume, this))->post();
306}
307
308void NuPlayer::Renderer::setVideoFrameRate(float fps) {
309    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
310    msg->setFloat("frame-rate", fps);
311    msg->post();
312}
313
314// Called on any threads.
315status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
316    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
317}
318
319void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
320    mAudioFirstAnchorTimeMediaUs = -1;
321    mMediaClock->setStartingTimeMedia(-1);
322}
323
324void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
325    if (mAudioFirstAnchorTimeMediaUs == -1) {
326        mAudioFirstAnchorTimeMediaUs = mediaUs;
327        mMediaClock->setStartingTimeMedia(mediaUs);
328    }
329}
330
331void NuPlayer::Renderer::clearAnchorTime_l() {
332    mMediaClock->clearAnchor();
333    mAnchorTimeMediaUs = -1;
334    mAnchorNumFramesWritten = -1;
335}
336
337void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
338    Mutex::Autolock autoLock(mLock);
339    mVideoLateByUs = lateUs;
340}
341
342int64_t NuPlayer::Renderer::getVideoLateByUs() {
343    Mutex::Autolock autoLock(mLock);
344    return mVideoLateByUs;
345}
346
347status_t NuPlayer::Renderer::openAudioSink(
348        const sp<AMessage> &format,
349        bool offloadOnly,
350        bool hasVideo,
351        uint32_t flags,
352        bool *isOffloaded) {
353    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
354    msg->setMessage("format", format);
355    msg->setInt32("offload-only", offloadOnly);
356    msg->setInt32("has-video", hasVideo);
357    msg->setInt32("flags", flags);
358
359    sp<AMessage> response;
360    msg->postAndAwaitResponse(&response);
361
362    int32_t err;
363    if (!response->findInt32("err", &err)) {
364        err = INVALID_OPERATION;
365    } else if (err == OK && isOffloaded != NULL) {
366        int32_t offload;
367        CHECK(response->findInt32("offload", &offload));
368        *isOffloaded = (offload != 0);
369    }
370    return err;
371}
372
373void NuPlayer::Renderer::closeAudioSink() {
374    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
375
376    sp<AMessage> response;
377    msg->postAndAwaitResponse(&response);
378}
379
380void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
381    switch (msg->what()) {
382        case kWhatOpenAudioSink:
383        {
384            sp<AMessage> format;
385            CHECK(msg->findMessage("format", &format));
386
387            int32_t offloadOnly;
388            CHECK(msg->findInt32("offload-only", &offloadOnly));
389
390            int32_t hasVideo;
391            CHECK(msg->findInt32("has-video", &hasVideo));
392
393            uint32_t flags;
394            CHECK(msg->findInt32("flags", (int32_t *)&flags));
395
396            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
397
398            sp<AMessage> response = new AMessage;
399            response->setInt32("err", err);
400            response->setInt32("offload", offloadingAudio());
401
402            sp<AReplyToken> replyID;
403            CHECK(msg->senderAwaitsResponse(&replyID));
404            response->postReply(replyID);
405
406            break;
407        }
408
409        case kWhatCloseAudioSink:
410        {
411            sp<AReplyToken> replyID;
412            CHECK(msg->senderAwaitsResponse(&replyID));
413
414            onCloseAudioSink();
415
416            sp<AMessage> response = new AMessage;
417            response->postReply(replyID);
418            break;
419        }
420
421        case kWhatStopAudioSink:
422        {
423            mAudioSink->stop();
424            break;
425        }
426
427        case kWhatDrainAudioQueue:
428        {
429            mDrainAudioQueuePending = false;
430
431            int32_t generation;
432            CHECK(msg->findInt32("drainGeneration", &generation));
433            if (generation != getDrainGeneration(true /* audio */)) {
434                break;
435            }
436
437            if (onDrainAudioQueue()) {
438                uint32_t numFramesPlayed;
439                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
440                         (status_t)OK);
441
442                uint32_t numFramesPendingPlayout =
443                    mNumFramesWritten - numFramesPlayed;
444
445                // This is how long the audio sink will have data to
446                // play back.
447                int64_t delayUs =
448                    mAudioSink->msecsPerFrame()
449                        * numFramesPendingPlayout * 1000ll;
450                if (mPlaybackRate > 1.0f) {
451                    delayUs /= mPlaybackRate;
452                }
453
454                // Let's give it more data after about half that time
455                // has elapsed.
456                Mutex::Autolock autoLock(mLock);
457                postDrainAudioQueue_l(delayUs / 2);
458            }
459            break;
460        }
461
462        case kWhatDrainVideoQueue:
463        {
464            int32_t generation;
465            CHECK(msg->findInt32("drainGeneration", &generation));
466            if (generation != getDrainGeneration(false /* audio */)) {
467                break;
468            }
469
470            mDrainVideoQueuePending = false;
471
472            onDrainVideoQueue();
473
474            postDrainVideoQueue();
475            break;
476        }
477
478        case kWhatPostDrainVideoQueue:
479        {
480            int32_t generation;
481            CHECK(msg->findInt32("drainGeneration", &generation));
482            if (generation != getDrainGeneration(false /* audio */)) {
483                break;
484            }
485
486            mDrainVideoQueuePending = false;
487            postDrainVideoQueue();
488            break;
489        }
490
491        case kWhatQueueBuffer:
492        {
493            onQueueBuffer(msg);
494            break;
495        }
496
497        case kWhatQueueEOS:
498        {
499            onQueueEOS(msg);
500            break;
501        }
502
503        case kWhatConfigPlayback:
504        {
505            sp<AReplyToken> replyID;
506            CHECK(msg->senderAwaitsResponse(&replyID));
507            AudioPlaybackRate rate;
508            readFromAMessage(msg, &rate);
509            status_t err = onConfigPlayback(rate);
510            sp<AMessage> response = new AMessage;
511            response->setInt32("err", err);
512            response->postReply(replyID);
513            break;
514        }
515
516        case kWhatGetPlaybackSettings:
517        {
518            sp<AReplyToken> replyID;
519            CHECK(msg->senderAwaitsResponse(&replyID));
520            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
521            status_t err = onGetPlaybackSettings(&rate);
522            sp<AMessage> response = new AMessage;
523            if (err == OK) {
524                writeToAMessage(response, rate);
525            }
526            response->setInt32("err", err);
527            response->postReply(replyID);
528            break;
529        }
530
531        case kWhatConfigSync:
532        {
533            sp<AReplyToken> replyID;
534            CHECK(msg->senderAwaitsResponse(&replyID));
535            AVSyncSettings sync;
536            float videoFpsHint;
537            readFromAMessage(msg, &sync, &videoFpsHint);
538            status_t err = onConfigSync(sync, videoFpsHint);
539            sp<AMessage> response = new AMessage;
540            response->setInt32("err", err);
541            response->postReply(replyID);
542            break;
543        }
544
545        case kWhatGetSyncSettings:
546        {
547            sp<AReplyToken> replyID;
548            CHECK(msg->senderAwaitsResponse(&replyID));
549
550            ALOGV("kWhatGetSyncSettings");
551            AVSyncSettings sync;
552            float videoFps = -1.f;
553            status_t err = onGetSyncSettings(&sync, &videoFps);
554            sp<AMessage> response = new AMessage;
555            if (err == OK) {
556                writeToAMessage(response, sync, videoFps);
557            }
558            response->setInt32("err", err);
559            response->postReply(replyID);
560            break;
561        }
562
563        case kWhatFlush:
564        {
565            onFlush(msg);
566            break;
567        }
568
569        case kWhatDisableOffloadAudio:
570        {
571            onDisableOffloadAudio();
572            break;
573        }
574
575        case kWhatEnableOffloadAudio:
576        {
577            onEnableOffloadAudio();
578            break;
579        }
580
581        case kWhatPause:
582        {
583            onPause();
584            break;
585        }
586
587        case kWhatResume:
588        {
589            onResume();
590            break;
591        }
592
593        case kWhatSetVideoFrameRate:
594        {
595            float fps;
596            CHECK(msg->findFloat("frame-rate", &fps));
597            onSetVideoFrameRate(fps);
598            break;
599        }
600
601        case kWhatAudioTearDown:
602        {
603            onAudioTearDown(kDueToError);
604            break;
605        }
606
607        case kWhatAudioOffloadPauseTimeout:
608        {
609            int32_t generation;
610            CHECK(msg->findInt32("drainGeneration", &generation));
611            if (generation != mAudioOffloadPauseTimeoutGeneration) {
612                break;
613            }
614            ALOGV("Audio Offload tear down due to pause timeout.");
615            onAudioTearDown(kDueToTimeout);
616            mWakeLock->release();
617            break;
618        }
619
620        default:
621            TRESPASS();
622            break;
623    }
624}
625
626void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
627    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
628        return;
629    }
630
631    if (mAudioQueue.empty()) {
632        return;
633    }
634
635    // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
636    if (mPaused) {
637        const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
638        if (diffUs > delayUs) {
639            delayUs = diffUs;
640        }
641    }
642
643    mDrainAudioQueuePending = true;
644    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
645    msg->setInt32("drainGeneration", mAudioDrainGeneration);
646    msg->post(delayUs);
647}
648
649void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
650    mAudioRenderingStartGeneration = mAudioDrainGeneration;
651    mVideoRenderingStartGeneration = mVideoDrainGeneration;
652}
653
654void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
655    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
656        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
657        mVideoRenderingStartGeneration = -1;
658        mAudioRenderingStartGeneration = -1;
659
660        sp<AMessage> notify = mNotify->dup();
661        notify->setInt32("what", kWhatMediaRenderingStart);
662        notify->post();
663    }
664}
665
666// static
667size_t NuPlayer::Renderer::AudioSinkCallback(
668        MediaPlayerBase::AudioSink * /* audioSink */,
669        void *buffer,
670        size_t size,
671        void *cookie,
672        MediaPlayerBase::AudioSink::cb_event_t event) {
673    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
674
675    switch (event) {
676        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
677        {
678            return me->fillAudioBuffer(buffer, size);
679            break;
680        }
681
682        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
683        {
684            ALOGV("AudioSink::CB_EVENT_STREAM_END");
685            me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
686            break;
687        }
688
689        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
690        {
691            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
692            me->notifyAudioTearDown();
693            break;
694        }
695    }
696
697    return 0;
698}
699
700size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
701    Mutex::Autolock autoLock(mLock);
702
703    if (!mUseAudioCallback) {
704        return 0;
705    }
706
707    bool hasEOS = false;
708
709    size_t sizeCopied = 0;
710    bool firstEntry = true;
711    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
712    while (sizeCopied < size && !mAudioQueue.empty()) {
713        entry = &*mAudioQueue.begin();
714
715        if (entry->mBuffer == NULL) { // EOS
716            hasEOS = true;
717            mAudioQueue.erase(mAudioQueue.begin());
718            break;
719        }
720
721        if (firstEntry && entry->mOffset == 0) {
722            firstEntry = false;
723            int64_t mediaTimeUs;
724            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
725            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
726            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
727        }
728
729        size_t copy = entry->mBuffer->size() - entry->mOffset;
730        size_t sizeRemaining = size - sizeCopied;
731        if (copy > sizeRemaining) {
732            copy = sizeRemaining;
733        }
734
735        memcpy((char *)buffer + sizeCopied,
736               entry->mBuffer->data() + entry->mOffset,
737               copy);
738
739        entry->mOffset += copy;
740        if (entry->mOffset == entry->mBuffer->size()) {
741            entry->mNotifyConsumed->post();
742            mAudioQueue.erase(mAudioQueue.begin());
743            entry = NULL;
744        }
745        sizeCopied += copy;
746
747        notifyIfMediaRenderingStarted_l();
748    }
749
750    if (mAudioFirstAnchorTimeMediaUs >= 0) {
751        int64_t nowUs = ALooper::GetNowUs();
752        int64_t nowMediaUs =
753            mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
754        // we don't know how much data we are queueing for offloaded tracks.
755        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
756    }
757
758    // for non-offloaded audio, we need to compute the frames written because
759    // there is no EVENT_STREAM_END notification. The frames written gives
760    // an estimate on the pending played out duration.
761    if (!offloadingAudio()) {
762        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
763    }
764
765    if (hasEOS) {
766        (new AMessage(kWhatStopAudioSink, this))->post();
767        // As there is currently no EVENT_STREAM_END callback notification for
768        // non-offloaded audio tracks, we need to post the EOS ourselves.
769        if (!offloadingAudio()) {
770            int64_t postEOSDelayUs = 0;
771            if (mAudioSink->needsTrailingPadding()) {
772                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
773            }
774            ALOGV("fillAudioBuffer: notifyEOS "
775                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
776                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
777            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
778        }
779    }
780    return sizeCopied;
781}
782
783void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
784    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
785    bool foundEOS = false;
786    while (it != mAudioQueue.end()) {
787        int32_t eos;
788        QueueEntry *entry = &*it++;
789        if (entry->mBuffer == NULL
790                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
791            itEOS = it;
792            foundEOS = true;
793        }
794    }
795
796    if (foundEOS) {
797        // post all replies before EOS and drop the samples
798        for (it = mAudioQueue.begin(); it != itEOS; it++) {
799            if (it->mBuffer == NULL) {
800                // delay doesn't matter as we don't even have an AudioTrack
801                notifyEOS(true /* audio */, it->mFinalResult);
802            } else {
803                it->mNotifyConsumed->post();
804            }
805        }
806        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
807    }
808}
809
810bool NuPlayer::Renderer::onDrainAudioQueue() {
811    // TODO: This call to getPosition checks if AudioTrack has been created
812    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
813    // CHECKs on getPosition will fail.
814    // We still need to figure out why AudioTrack is not created when
815    // this function is called. One possible reason could be leftover
816    // audio. Another possible place is to check whether decoder
817    // has received INFO_FORMAT_CHANGED as the first buffer since
818    // AudioSink is opened there, and possible interactions with flush
819    // immediately after start. Investigate error message
820    // "vorbis_dsp_synthesis returned -135", along with RTSP.
821    uint32_t numFramesPlayed;
822    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
823        // When getPosition fails, renderer will not reschedule the draining
824        // unless new samples are queued.
825        // If we have pending EOS (or "eos" marker for discontinuities), we need
826        // to post these now as NuPlayerDecoder might be waiting for it.
827        drainAudioQueueUntilLastEOS();
828
829        ALOGW("onDrainAudioQueue(): audio sink is not ready");
830        return false;
831    }
832
833#if 0
834    ssize_t numFramesAvailableToWrite =
835        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
836
837    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
838        ALOGI("audio sink underrun");
839    } else {
840        ALOGV("audio queue has %d frames left to play",
841             mAudioSink->frameCount() - numFramesAvailableToWrite);
842    }
843#endif
844
845    uint32_t prevFramesWritten = mNumFramesWritten;
846    while (!mAudioQueue.empty()) {
847        QueueEntry *entry = &*mAudioQueue.begin();
848
849        mLastAudioBufferDrained = entry->mBufferOrdinal;
850
851        if (entry->mBuffer == NULL) {
852            // EOS
853            int64_t postEOSDelayUs = 0;
854            if (mAudioSink->needsTrailingPadding()) {
855                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
856            }
857            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
858            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
859
860            mAudioQueue.erase(mAudioQueue.begin());
861            entry = NULL;
862            if (mAudioSink->needsTrailingPadding()) {
863                // If we're not in gapless playback (i.e. through setNextPlayer), we
864                // need to stop the track here, because that will play out the last
865                // little bit at the end of the file. Otherwise short files won't play.
866                mAudioSink->stop();
867                mNumFramesWritten = 0;
868            }
869            return false;
870        }
871
872        // ignore 0-sized buffer which could be EOS marker with no data
873        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
874            int64_t mediaTimeUs;
875            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
876            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
877                    mediaTimeUs / 1E6);
878            onNewAudioMediaTime(mediaTimeUs);
879        }
880
881        size_t copy = entry->mBuffer->size() - entry->mOffset;
882
883        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
884                                            copy, false /* blocking */);
885        if (written < 0) {
886            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
887            if (written == WOULD_BLOCK) {
888                ALOGV("AudioSink write would block when writing %zu bytes", copy);
889            } else {
890                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
891                notifyAudioTearDown();
892            }
893            break;
894        }
895
896        entry->mOffset += written;
897        if (entry->mOffset == entry->mBuffer->size()) {
898            entry->mNotifyConsumed->post();
899            mAudioQueue.erase(mAudioQueue.begin());
900
901            entry = NULL;
902        }
903
904        size_t copiedFrames = written / mAudioSink->frameSize();
905        mNumFramesWritten += copiedFrames;
906
907        {
908            Mutex::Autolock autoLock(mLock);
909            notifyIfMediaRenderingStarted_l();
910        }
911
912        if (written != (ssize_t)copy) {
913            // A short count was received from AudioSink::write()
914            //
915            // AudioSink write is called in non-blocking mode.
916            // It may return with a short count when:
917            //
918            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
919            // 2) The data to be copied exceeds the available buffer in AudioSink.
920            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
921            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
922
923            // (Case 1)
924            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
925            // needs to fail, as we should not carry over fractional frames between calls.
926            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
927
928            // (Case 2, 3, 4)
929            // Return early to the caller.
930            // Beware of calling immediately again as this may busy-loop if you are not careful.
931            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
932            break;
933        }
934    }
935    int64_t maxTimeMedia;
936    {
937        Mutex::Autolock autoLock(mLock);
938        maxTimeMedia =
939            mAnchorTimeMediaUs +
940                    (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
941                            * 1000LL * mAudioSink->msecsPerFrame());
942    }
943    mMediaClock->updateMaxTimeMedia(maxTimeMedia);
944
945    // calculate whether we need to reschedule another write.
946    bool reschedule = !mAudioQueue.empty()
947            && (!mPaused
948                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
949    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
950    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
951    return reschedule;
952}
953
954int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
955    int32_t sampleRate = offloadingAudio() ?
956            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
957    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
958    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
959}
960
961// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
962int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
963    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
964    return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
965}
966
967int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
968    int64_t realUs;
969    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
970        // If failed to get current position, e.g. due to audio clock is
971        // not ready, then just play out video immediately without delay.
972        return nowUs;
973    }
974    return realUs;
975}
976
977void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
978    Mutex::Autolock autoLock(mLock);
979    // TRICKY: vorbis decoder generates multiple frames with the same
980    // timestamp, so only update on the first frame with a given timestamp
981    if (mediaTimeUs == mAnchorTimeMediaUs) {
982        return;
983    }
984    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
985    int64_t nowUs = ALooper::GetNowUs();
986    int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
987    mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
988    mAnchorNumFramesWritten = mNumFramesWritten;
989    mAnchorTimeMediaUs = mediaTimeUs;
990}
991
992// Called without mLock acquired.
993void NuPlayer::Renderer::postDrainVideoQueue() {
994    if (mDrainVideoQueuePending
995            || getSyncQueues()
996            || (mPaused && mVideoSampleReceived)) {
997        return;
998    }
999
1000    if (mVideoQueue.empty()) {
1001        return;
1002    }
1003
1004    QueueEntry &entry = *mVideoQueue.begin();
1005
1006    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1007    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1008
1009    if (entry.mBuffer == NULL) {
1010        // EOS doesn't carry a timestamp.
1011        msg->post();
1012        mDrainVideoQueuePending = true;
1013        return;
1014    }
1015
1016    int64_t delayUs;
1017    int64_t nowUs = ALooper::GetNowUs();
1018    int64_t realTimeUs;
1019    if (mFlags & FLAG_REAL_TIME) {
1020        int64_t mediaTimeUs;
1021        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1022        realTimeUs = mediaTimeUs;
1023    } else {
1024        int64_t mediaTimeUs;
1025        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1026
1027        {
1028            Mutex::Autolock autoLock(mLock);
1029            if (mAnchorTimeMediaUs < 0) {
1030                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1031                mAnchorTimeMediaUs = mediaTimeUs;
1032                realTimeUs = nowUs;
1033            } else {
1034                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1035            }
1036        }
1037        if (!mHasAudio) {
1038            // smooth out videos >= 10fps
1039            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1040        }
1041
1042        // Heuristics to handle situation when media time changed without a
1043        // discontinuity. If we have not drained an audio buffer that was
1044        // received after this buffer, repost in 10 msec. Otherwise repost
1045        // in 500 msec.
1046        delayUs = realTimeUs - nowUs;
1047        if (delayUs > 500000) {
1048            int64_t postDelayUs = 500000;
1049            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1050                postDelayUs = 10000;
1051            }
1052            msg->setWhat(kWhatPostDrainVideoQueue);
1053            msg->post(postDelayUs);
1054            mVideoScheduler->restart();
1055            ALOGI("possible video time jump of %dms, retrying in %dms",
1056                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1057            mDrainVideoQueuePending = true;
1058            return;
1059        }
1060    }
1061
1062    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1063    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1064
1065    delayUs = realTimeUs - nowUs;
1066
1067    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1068    // post 2 display refreshes before rendering is due
1069    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1070
1071    mDrainVideoQueuePending = true;
1072}
1073
1074void NuPlayer::Renderer::onDrainVideoQueue() {
1075    if (mVideoQueue.empty()) {
1076        return;
1077    }
1078
1079    QueueEntry *entry = &*mVideoQueue.begin();
1080
1081    if (entry->mBuffer == NULL) {
1082        // EOS
1083
1084        notifyEOS(false /* audio */, entry->mFinalResult);
1085
1086        mVideoQueue.erase(mVideoQueue.begin());
1087        entry = NULL;
1088
1089        setVideoLateByUs(0);
1090        return;
1091    }
1092
1093    int64_t nowUs = -1;
1094    int64_t realTimeUs;
1095    int64_t mediaTimeUs = -1;
1096    if (mFlags & FLAG_REAL_TIME) {
1097        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1098    } else {
1099        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1100
1101        nowUs = ALooper::GetNowUs();
1102        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1103    }
1104
1105    bool tooLate = false;
1106
1107    if (!mPaused) {
1108        if (nowUs == -1) {
1109            nowUs = ALooper::GetNowUs();
1110        }
1111        setVideoLateByUs(nowUs - realTimeUs);
1112        tooLate = (mVideoLateByUs > 40000);
1113
1114        if (tooLate) {
1115            ALOGV("video late by %lld us (%.2f secs)",
1116                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1117        } else {
1118            int64_t mediaUs = 0;
1119            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1120            ALOGV("rendering video at media time %.2f secs",
1121                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1122                    mediaUs) / 1E6);
1123
1124            if (!(mFlags & FLAG_REAL_TIME)
1125                    && mLastAudioMediaTimeUs != -1
1126                    && mediaTimeUs > mLastAudioMediaTimeUs) {
1127                // If audio ends before video, video continues to drive media clock.
1128                // Also smooth out videos >= 10fps.
1129                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1130            }
1131        }
1132    } else {
1133        setVideoLateByUs(0);
1134        if (!mVideoSampleReceived && !mHasAudio) {
1135            // This will ensure that the first frame after a flush won't be used as anchor
1136            // when renderer is in paused state, because resume can happen any time after seek.
1137            Mutex::Autolock autoLock(mLock);
1138            clearAnchorTime_l();
1139        }
1140    }
1141
1142    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1143    entry->mNotifyConsumed->setInt32("render", !tooLate);
1144    entry->mNotifyConsumed->post();
1145    mVideoQueue.erase(mVideoQueue.begin());
1146    entry = NULL;
1147
1148    mVideoSampleReceived = true;
1149
1150    if (!mPaused) {
1151        if (!mVideoRenderingStarted) {
1152            mVideoRenderingStarted = true;
1153            notifyVideoRenderingStart();
1154        }
1155        Mutex::Autolock autoLock(mLock);
1156        notifyIfMediaRenderingStarted_l();
1157    }
1158}
1159
1160void NuPlayer::Renderer::notifyVideoRenderingStart() {
1161    sp<AMessage> notify = mNotify->dup();
1162    notify->setInt32("what", kWhatVideoRenderingStart);
1163    notify->post();
1164}
1165
1166void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1167    sp<AMessage> notify = mNotify->dup();
1168    notify->setInt32("what", kWhatEOS);
1169    notify->setInt32("audio", static_cast<int32_t>(audio));
1170    notify->setInt32("finalResult", finalResult);
1171    notify->post(delayUs);
1172}
1173
1174void NuPlayer::Renderer::notifyAudioTearDown() {
1175    (new AMessage(kWhatAudioTearDown, this))->post();
1176}
1177
1178void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1179    int32_t audio;
1180    CHECK(msg->findInt32("audio", &audio));
1181
1182    if (dropBufferIfStale(audio, msg)) {
1183        return;
1184    }
1185
1186    if (audio) {
1187        mHasAudio = true;
1188    } else {
1189        mHasVideo = true;
1190    }
1191
1192    if (mHasVideo) {
1193        if (mVideoScheduler == NULL) {
1194            mVideoScheduler = new VideoFrameScheduler();
1195            mVideoScheduler->init();
1196        }
1197    }
1198
1199    sp<ABuffer> buffer;
1200    CHECK(msg->findBuffer("buffer", &buffer));
1201
1202    sp<AMessage> notifyConsumed;
1203    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1204
1205    QueueEntry entry;
1206    entry.mBuffer = buffer;
1207    entry.mNotifyConsumed = notifyConsumed;
1208    entry.mOffset = 0;
1209    entry.mFinalResult = OK;
1210    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1211
1212    if (audio) {
1213        Mutex::Autolock autoLock(mLock);
1214        mAudioQueue.push_back(entry);
1215        postDrainAudioQueue_l();
1216    } else {
1217        mVideoQueue.push_back(entry);
1218        postDrainVideoQueue();
1219    }
1220
1221    Mutex::Autolock autoLock(mLock);
1222    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1223        return;
1224    }
1225
1226    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1227    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1228
1229    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1230        // EOS signalled on either queue.
1231        syncQueuesDone_l();
1232        return;
1233    }
1234
1235    int64_t firstAudioTimeUs;
1236    int64_t firstVideoTimeUs;
1237    CHECK(firstAudioBuffer->meta()
1238            ->findInt64("timeUs", &firstAudioTimeUs));
1239    CHECK(firstVideoBuffer->meta()
1240            ->findInt64("timeUs", &firstVideoTimeUs));
1241
1242    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1243
1244    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1245
1246    if (diff > 100000ll) {
1247        // Audio data starts More than 0.1 secs before video.
1248        // Drop some audio.
1249
1250        (*mAudioQueue.begin()).mNotifyConsumed->post();
1251        mAudioQueue.erase(mAudioQueue.begin());
1252        return;
1253    }
1254
1255    syncQueuesDone_l();
1256}
1257
1258void NuPlayer::Renderer::syncQueuesDone_l() {
1259    if (!mSyncQueues) {
1260        return;
1261    }
1262
1263    mSyncQueues = false;
1264
1265    if (!mAudioQueue.empty()) {
1266        postDrainAudioQueue_l();
1267    }
1268
1269    if (!mVideoQueue.empty()) {
1270        mLock.unlock();
1271        postDrainVideoQueue();
1272        mLock.lock();
1273    }
1274}
1275
1276void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1277    int32_t audio;
1278    CHECK(msg->findInt32("audio", &audio));
1279
1280    if (dropBufferIfStale(audio, msg)) {
1281        return;
1282    }
1283
1284    int32_t finalResult;
1285    CHECK(msg->findInt32("finalResult", &finalResult));
1286
1287    QueueEntry entry;
1288    entry.mOffset = 0;
1289    entry.mFinalResult = finalResult;
1290
1291    if (audio) {
1292        Mutex::Autolock autoLock(mLock);
1293        if (mAudioQueue.empty() && mSyncQueues) {
1294            syncQueuesDone_l();
1295        }
1296        mAudioQueue.push_back(entry);
1297        postDrainAudioQueue_l();
1298    } else {
1299        if (mVideoQueue.empty() && getSyncQueues()) {
1300            Mutex::Autolock autoLock(mLock);
1301            syncQueuesDone_l();
1302        }
1303        mVideoQueue.push_back(entry);
1304        postDrainVideoQueue();
1305    }
1306}
1307
1308void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1309    int32_t audio, notifyComplete;
1310    CHECK(msg->findInt32("audio", &audio));
1311
1312    {
1313        Mutex::Autolock autoLock(mLock);
1314        if (audio) {
1315            notifyComplete = mNotifyCompleteAudio;
1316            mNotifyCompleteAudio = false;
1317            mLastAudioMediaTimeUs = -1;
1318        } else {
1319            notifyComplete = mNotifyCompleteVideo;
1320            mNotifyCompleteVideo = false;
1321        }
1322
1323        // If we're currently syncing the queues, i.e. dropping audio while
1324        // aligning the first audio/video buffer times and only one of the
1325        // two queues has data, we may starve that queue by not requesting
1326        // more buffers from the decoder. If the other source then encounters
1327        // a discontinuity that leads to flushing, we'll never find the
1328        // corresponding discontinuity on the other queue.
1329        // Therefore we'll stop syncing the queues if at least one of them
1330        // is flushed.
1331        syncQueuesDone_l();
1332        clearAnchorTime_l();
1333    }
1334
1335    ALOGV("flushing %s", audio ? "audio" : "video");
1336    if (audio) {
1337        {
1338            Mutex::Autolock autoLock(mLock);
1339            flushQueue(&mAudioQueue);
1340
1341            ++mAudioDrainGeneration;
1342            prepareForMediaRenderingStart_l();
1343
1344            // the frame count will be reset after flush.
1345            clearAudioFirstAnchorTime_l();
1346        }
1347
1348        mDrainAudioQueuePending = false;
1349
1350        if (offloadingAudio()) {
1351            mAudioSink->pause();
1352            mAudioSink->flush();
1353            if (!mPaused) {
1354                mAudioSink->start();
1355            }
1356        } else {
1357            mAudioSink->pause();
1358            mAudioSink->flush();
1359            // Call stop() to signal to the AudioSink to completely fill the
1360            // internal buffer before resuming playback.
1361            // FIXME: this is ignored after flush().
1362            mAudioSink->stop();
1363            if (mPaused) {
1364                // Race condition: if renderer is paused and audio sink is stopped,
1365                // we need to make sure that the audio track buffer fully drains
1366                // before delivering data.
1367                // FIXME: remove this if we can detect if stop() is complete.
1368                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1369                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1370            } else {
1371                mAudioSink->start();
1372            }
1373            mNumFramesWritten = 0;
1374        }
1375    } else {
1376        flushQueue(&mVideoQueue);
1377
1378        mDrainVideoQueuePending = false;
1379
1380        if (mVideoScheduler != NULL) {
1381            mVideoScheduler->restart();
1382        }
1383
1384        Mutex::Autolock autoLock(mLock);
1385        ++mVideoDrainGeneration;
1386        prepareForMediaRenderingStart_l();
1387    }
1388
1389    mVideoSampleReceived = false;
1390
1391    if (notifyComplete) {
1392        notifyFlushComplete(audio);
1393    }
1394}
1395
1396void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1397    while (!queue->empty()) {
1398        QueueEntry *entry = &*queue->begin();
1399
1400        if (entry->mBuffer != NULL) {
1401            entry->mNotifyConsumed->post();
1402        }
1403
1404        queue->erase(queue->begin());
1405        entry = NULL;
1406    }
1407}
1408
1409void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1410    sp<AMessage> notify = mNotify->dup();
1411    notify->setInt32("what", kWhatFlushComplete);
1412    notify->setInt32("audio", static_cast<int32_t>(audio));
1413    notify->post();
1414}
1415
1416bool NuPlayer::Renderer::dropBufferIfStale(
1417        bool audio, const sp<AMessage> &msg) {
1418    int32_t queueGeneration;
1419    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1420
1421    if (queueGeneration == getQueueGeneration(audio)) {
1422        return false;
1423    }
1424
1425    sp<AMessage> notifyConsumed;
1426    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1427        notifyConsumed->post();
1428    }
1429
1430    return true;
1431}
1432
1433void NuPlayer::Renderer::onAudioSinkChanged() {
1434    if (offloadingAudio()) {
1435        return;
1436    }
1437    CHECK(!mDrainAudioQueuePending);
1438    mNumFramesWritten = 0;
1439    {
1440        Mutex::Autolock autoLock(mLock);
1441        mAnchorNumFramesWritten = -1;
1442    }
1443    uint32_t written;
1444    if (mAudioSink->getFramesWritten(&written) == OK) {
1445        mNumFramesWritten = written;
1446    }
1447}
1448
1449void NuPlayer::Renderer::onDisableOffloadAudio() {
1450    Mutex::Autolock autoLock(mLock);
1451    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1452    ++mAudioDrainGeneration;
1453    if (mAudioRenderingStartGeneration != -1) {
1454        prepareForMediaRenderingStart_l();
1455    }
1456}
1457
1458void NuPlayer::Renderer::onEnableOffloadAudio() {
1459    Mutex::Autolock autoLock(mLock);
1460    mFlags |= FLAG_OFFLOAD_AUDIO;
1461    ++mAudioDrainGeneration;
1462    if (mAudioRenderingStartGeneration != -1) {
1463        prepareForMediaRenderingStart_l();
1464    }
1465}
1466
1467void NuPlayer::Renderer::onPause() {
1468    if (mPaused) {
1469        return;
1470    }
1471
1472    {
1473        Mutex::Autolock autoLock(mLock);
1474        // we do not increment audio drain generation so that we fill audio buffer during pause.
1475        ++mVideoDrainGeneration;
1476        prepareForMediaRenderingStart_l();
1477        mPaused = true;
1478        mMediaClock->setPlaybackRate(0.0);
1479    }
1480
1481    mDrainAudioQueuePending = false;
1482    mDrainVideoQueuePending = false;
1483
1484    if (mHasAudio) {
1485        mAudioSink->pause();
1486        startAudioOffloadPauseTimeout();
1487    }
1488
1489    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1490          mAudioQueue.size(), mVideoQueue.size());
1491}
1492
1493void NuPlayer::Renderer::onResume() {
1494    if (!mPaused) {
1495        return;
1496    }
1497
1498    if (mHasAudio) {
1499        cancelAudioOffloadPauseTimeout();
1500        status_t err = mAudioSink->start();
1501        if (err != OK) {
1502            notifyAudioTearDown();
1503        }
1504    }
1505
1506    {
1507        Mutex::Autolock autoLock(mLock);
1508        mPaused = false;
1509
1510        // configure audiosink as we did not do it when pausing
1511        if (mAudioSink != NULL && mAudioSink->ready()) {
1512            mAudioSink->setPlaybackRate(mPlaybackSettings);
1513        }
1514
1515        mMediaClock->setPlaybackRate(mPlaybackRate);
1516
1517        if (!mAudioQueue.empty()) {
1518            postDrainAudioQueue_l();
1519        }
1520    }
1521
1522    if (!mVideoQueue.empty()) {
1523        postDrainVideoQueue();
1524    }
1525}
1526
1527void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1528    if (mVideoScheduler == NULL) {
1529        mVideoScheduler = new VideoFrameScheduler();
1530    }
1531    mVideoScheduler->init(fps);
1532}
1533
1534int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1535    Mutex::Autolock autoLock(mLock);
1536    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1537}
1538
1539int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1540    Mutex::Autolock autoLock(mLock);
1541    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1542}
1543
1544bool NuPlayer::Renderer::getSyncQueues() {
1545    Mutex::Autolock autoLock(mLock);
1546    return mSyncQueues;
1547}
1548
1549// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
1550// as it acquires locks and may query the audio driver.
1551//
1552// Some calls could conceivably retrieve extrapolated data instead of
1553// accessing getTimestamp() or getPosition() every time a data buffer with
1554// a media time is received.
1555//
1556// Calculate duration of played samples if played at normal rate (i.e., 1.0).
1557int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
1558    uint32_t numFramesPlayed;
1559    int64_t numFramesPlayedAt;
1560    AudioTimestamp ts;
1561    static const int64_t kStaleTimestamp100ms = 100000;
1562
1563    status_t res = mAudioSink->getTimestamp(ts);
1564    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
1565        numFramesPlayed = ts.mPosition;
1566        numFramesPlayedAt =
1567            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
1568        const int64_t timestampAge = nowUs - numFramesPlayedAt;
1569        if (timestampAge > kStaleTimestamp100ms) {
1570            // This is an audio FIXME.
1571            // getTimestamp returns a timestamp which may come from audio mixing threads.
1572            // After pausing, the MixerThread may go idle, thus the mTime estimate may
1573            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
1574            // the max latency should be about 25ms with an average around 12ms (to be verified).
1575            // For safety we use 100ms.
1576            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
1577                    (long long)nowUs, (long long)numFramesPlayedAt);
1578            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
1579        }
1580        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1581    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
1582        numFramesPlayed = 0;
1583        numFramesPlayedAt = nowUs;
1584        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
1585        //        numFramesPlayed, (long long)numFramesPlayedAt);
1586    } else {                         // case 3: transitory at new track or audio fast tracks.
1587        res = mAudioSink->getPosition(&numFramesPlayed);
1588        CHECK_EQ(res, (status_t)OK);
1589        numFramesPlayedAt = nowUs;
1590        numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
1591        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1592    }
1593
1594    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
1595    int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
1596            + nowUs - numFramesPlayedAt;
1597    if (durationUs < 0) {
1598        // Occurs when numFramesPlayed position is very small and the following:
1599        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
1600        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
1601        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
1602        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
1603        //
1604        // Both of these are transitory conditions.
1605        ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
1606        durationUs = 0;
1607    }
1608    ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
1609            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
1610    return durationUs;
1611}
1612
1613void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1614    if (mAudioTornDown) {
1615        return;
1616    }
1617    mAudioTornDown = true;
1618
1619    int64_t currentPositionUs;
1620    sp<AMessage> notify = mNotify->dup();
1621    if (getCurrentPosition(&currentPositionUs) == OK) {
1622        notify->setInt64("positionUs", currentPositionUs);
1623    }
1624
1625    mAudioSink->stop();
1626    mAudioSink->flush();
1627
1628    notify->setInt32("what", kWhatAudioTearDown);
1629    notify->setInt32("reason", reason);
1630    notify->post();
1631}
1632
1633void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1634    if (offloadingAudio()) {
1635        mWakeLock->acquire();
1636        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1637        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1638        msg->post(kOffloadPauseMaxUs);
1639    }
1640}
1641
1642void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1643    if (offloadingAudio()) {
1644        mWakeLock->release(true);
1645        ++mAudioOffloadPauseTimeoutGeneration;
1646    }
1647}
1648
1649status_t NuPlayer::Renderer::onOpenAudioSink(
1650        const sp<AMessage> &format,
1651        bool offloadOnly,
1652        bool hasVideo,
1653        uint32_t flags) {
1654    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1655            offloadOnly, offloadingAudio());
1656    bool audioSinkChanged = false;
1657
1658    int32_t numChannels;
1659    CHECK(format->findInt32("channel-count", &numChannels));
1660
1661    int32_t channelMask;
1662    if (!format->findInt32("channel-mask", &channelMask)) {
1663        // signal to the AudioSink to derive the mask from count.
1664        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1665    }
1666
1667    int32_t sampleRate;
1668    CHECK(format->findInt32("sample-rate", &sampleRate));
1669
1670    if (offloadingAudio()) {
1671        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1672        AString mime;
1673        CHECK(format->findString("mime", &mime));
1674        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1675
1676        if (err != OK) {
1677            ALOGE("Couldn't map mime \"%s\" to a valid "
1678                    "audio_format", mime.c_str());
1679            onDisableOffloadAudio();
1680        } else {
1681            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1682                    mime.c_str(), audioFormat);
1683
1684            int avgBitRate = -1;
1685            format->findInt32("bit-rate", &avgBitRate);
1686
1687            int32_t aacProfile = -1;
1688            if (audioFormat == AUDIO_FORMAT_AAC
1689                    && format->findInt32("aac-profile", &aacProfile)) {
1690                // Redefine AAC format as per aac profile
1691                mapAACProfileToAudioFormat(
1692                        audioFormat,
1693                        aacProfile);
1694            }
1695
1696            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1697            offloadInfo.duration_us = -1;
1698            format->findInt64(
1699                    "durationUs", &offloadInfo.duration_us);
1700            offloadInfo.sample_rate = sampleRate;
1701            offloadInfo.channel_mask = channelMask;
1702            offloadInfo.format = audioFormat;
1703            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1704            offloadInfo.bit_rate = avgBitRate;
1705            offloadInfo.has_video = hasVideo;
1706            offloadInfo.is_streaming = true;
1707
1708            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1709                ALOGV("openAudioSink: no change in offload mode");
1710                // no change from previous configuration, everything ok.
1711                return OK;
1712            }
1713            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1714
1715            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1716            uint32_t offloadFlags = flags;
1717            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1718            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1719            audioSinkChanged = true;
1720            mAudioSink->close();
1721
1722            err = mAudioSink->open(
1723                    sampleRate,
1724                    numChannels,
1725                    (audio_channel_mask_t)channelMask,
1726                    audioFormat,
1727                    0 /* bufferCount - unused */,
1728                    &NuPlayer::Renderer::AudioSinkCallback,
1729                    this,
1730                    (audio_output_flags_t)offloadFlags,
1731                    &offloadInfo);
1732
1733            if (err == OK) {
1734                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1735            }
1736
1737            if (err == OK) {
1738                // If the playback is offloaded to h/w, we pass
1739                // the HAL some metadata information.
1740                // We don't want to do this for PCM because it
1741                // will be going through the AudioFlinger mixer
1742                // before reaching the hardware.
1743                // TODO
1744                mCurrentOffloadInfo = offloadInfo;
1745                if (!mPaused) { // for preview mode, don't start if paused
1746                    err = mAudioSink->start();
1747                }
1748                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1749            }
1750            if (err != OK) {
1751                // Clean up, fall back to non offload mode.
1752                mAudioSink->close();
1753                onDisableOffloadAudio();
1754                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1755                ALOGV("openAudioSink: offload failed");
1756            } else {
1757                mUseAudioCallback = true;  // offload mode transfers data through callback
1758                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1759            }
1760        }
1761    }
1762    if (!offloadOnly && !offloadingAudio()) {
1763        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1764        uint32_t pcmFlags = flags;
1765        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1766
1767        const PcmInfo info = {
1768                (audio_channel_mask_t)channelMask,
1769                (audio_output_flags_t)pcmFlags,
1770                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1771                numChannels,
1772                sampleRate
1773        };
1774        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1775            ALOGV("openAudioSink: no change in pcm mode");
1776            // no change from previous configuration, everything ok.
1777            return OK;
1778        }
1779
1780        audioSinkChanged = true;
1781        mAudioSink->close();
1782        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1783        // Note: It is possible to set up the callback, but not use it to send audio data.
1784        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1785        mUseAudioCallback = getUseAudioCallbackSetting();
1786        if (mUseAudioCallback) {
1787            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1788        }
1789
1790        // Compute the desired buffer size.
1791        // For callback mode, the amount of time before wakeup is about half the buffer size.
1792        const uint32_t frameCount =
1793                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1794
1795        status_t err = mAudioSink->open(
1796                    sampleRate,
1797                    numChannels,
1798                    (audio_channel_mask_t)channelMask,
1799                    AUDIO_FORMAT_PCM_16_BIT,
1800                    0 /* bufferCount - unused */,
1801                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1802                    mUseAudioCallback ? this : NULL,
1803                    (audio_output_flags_t)pcmFlags,
1804                    NULL,
1805                    true /* doNotReconnect */,
1806                    frameCount);
1807        if (err == OK) {
1808            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1809        }
1810        if (err != OK) {
1811            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1812            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1813            return err;
1814        }
1815        mCurrentPcmInfo = info;
1816        if (!mPaused) { // for preview mode, don't start if paused
1817            mAudioSink->start();
1818        }
1819    }
1820    if (audioSinkChanged) {
1821        onAudioSinkChanged();
1822    }
1823    mAudioTornDown = false;
1824    return OK;
1825}
1826
1827void NuPlayer::Renderer::onCloseAudioSink() {
1828    mAudioSink->close();
1829    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1830    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1831}
1832
1833}  // namespace android
1834
1835