NuPlayerRenderer.cpp revision 58d315cae745aae2c87eb3e7cac2da5e25a57d4c
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <cutils/properties.h>
23#include <media/stagefright/foundation/ABuffer.h>
24#include <media/stagefright/foundation/ADebug.h>
25#include <media/stagefright/foundation/AMessage.h>
26#include <media/stagefright/foundation/AUtils.h>
27#include <media/stagefright/foundation/AWakeLock.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
30#include <media/stagefright/MetaData.h>
31#include <media/stagefright/Utils.h>
32#include <media/stagefright/VideoFrameScheduler.h>
33
34#include <inttypes.h>
35
36namespace android {
37
38/*
39 * Example of common configuration settings in shell script form
40
41   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
42   adb shell setprop audio.offload.disable 1
43
44   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
45   adb shell setprop audio.offload.video 1
46
47   #Use audio callbacks for PCM data
48   adb shell setprop media.stagefright.audio.cbk 1
49
50   #Use deep buffer for PCM data with video (it is generally enabled for audio-only)
51   adb shell setprop media.stagefright.audio.deep 1
52
53   #Set size of buffers for pcm audio sink in msec (example: 1000 msec)
54   adb shell setprop media.stagefright.audio.sink 1000
55
56 * These configurations take effect for the next track played (not the current track).
57 */
58
59static inline bool getUseAudioCallbackSetting() {
60    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
61}
62
63static inline int32_t getAudioSinkPcmMsSetting() {
64    return property_get_int32(
65            "media.stagefright.audio.sink", 500 /* default_value */);
66}
67
68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
69// is closed to allow the audio DSP to power down.
70static const int64_t kOffloadPauseMaxUs = 10000000ll;
71
72// static
73const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
74        AUDIO_CHANNEL_NONE,
75        AUDIO_OUTPUT_FLAG_NONE,
76        AUDIO_FORMAT_INVALID,
77        0, // mNumChannels
78        0 // mSampleRate
79};
80
81// static
82const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
83
84NuPlayer::Renderer::Renderer(
85        const sp<MediaPlayerBase::AudioSink> &sink,
86        const sp<AMessage> &notify,
87        uint32_t flags)
88    : mAudioSink(sink),
89      mNotify(notify),
90      mFlags(flags),
91      mNumFramesWritten(0),
92      mDrainAudioQueuePending(false),
93      mDrainVideoQueuePending(false),
94      mAudioQueueGeneration(0),
95      mVideoQueueGeneration(0),
96      mAudioDrainGeneration(0),
97      mVideoDrainGeneration(0),
98      mAudioEOSGeneration(0),
99      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
100      mAudioFirstAnchorTimeMediaUs(-1),
101      mAnchorTimeMediaUs(-1),
102      mAnchorNumFramesWritten(-1),
103      mVideoLateByUs(0ll),
104      mHasAudio(false),
105      mHasVideo(false),
106      mNotifyCompleteAudio(false),
107      mNotifyCompleteVideo(false),
108      mSyncQueues(false),
109      mPaused(false),
110      mPauseDrainAudioAllowedUs(0),
111      mVideoSampleReceived(false),
112      mVideoRenderingStarted(false),
113      mVideoRenderingStartGeneration(0),
114      mAudioRenderingStartGeneration(0),
115      mLastAudioMediaTimeUs(-1),
116      mAudioOffloadPauseTimeoutGeneration(0),
117      mAudioTornDown(false),
118      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
119      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
120      mTotalBuffersQueued(0),
121      mLastAudioBufferDrained(0),
122      mUseAudioCallback(false),
123      mWakeLock(new AWakeLock()) {
124    mMediaClock = new MediaClock;
125    mPlaybackRate = mPlaybackSettings.mSpeed;
126    mMediaClock->setPlaybackRate(mPlaybackRate);
127}
128
129NuPlayer::Renderer::~Renderer() {
130    if (offloadingAudio()) {
131        mAudioSink->stop();
132        mAudioSink->flush();
133        mAudioSink->close();
134    }
135}
136
137void NuPlayer::Renderer::queueBuffer(
138        bool audio,
139        const sp<ABuffer> &buffer,
140        const sp<AMessage> &notifyConsumed) {
141    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
142    msg->setInt32("queueGeneration", getQueueGeneration(audio));
143    msg->setInt32("audio", static_cast<int32_t>(audio));
144    msg->setBuffer("buffer", buffer);
145    msg->setMessage("notifyConsumed", notifyConsumed);
146    msg->post();
147}
148
149void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
150    CHECK_NE(finalResult, (status_t)OK);
151
152    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
153    msg->setInt32("queueGeneration", getQueueGeneration(audio));
154    msg->setInt32("audio", static_cast<int32_t>(audio));
155    msg->setInt32("finalResult", finalResult);
156    msg->post();
157}
158
159status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
160    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
161    writeToAMessage(msg, rate);
162    sp<AMessage> response;
163    status_t err = msg->postAndAwaitResponse(&response);
164    if (err == OK && response != NULL) {
165        CHECK(response->findInt32("err", &err));
166    }
167    return err;
168}
169
170status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
171    if (rate.mSpeed == 0.f) {
172        onPause();
173        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
174        // have to correspond to the any non-0 speed (e.g old speed). Keep
175        // settings nonetheless, using the old speed, in case audiosink changes.
176        AudioPlaybackRate newRate = rate;
177        newRate.mSpeed = mPlaybackSettings.mSpeed;
178        mPlaybackSettings = newRate;
179        return OK;
180    }
181
182    if (mAudioSink != NULL && mAudioSink->ready()) {
183        status_t err = mAudioSink->setPlaybackRate(rate);
184        if (err != OK) {
185            return err;
186        }
187    }
188    mPlaybackSettings = rate;
189    mPlaybackRate = rate.mSpeed;
190    mMediaClock->setPlaybackRate(mPlaybackRate);
191    return OK;
192}
193
194status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
195    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
196    sp<AMessage> response;
197    status_t err = msg->postAndAwaitResponse(&response);
198    if (err == OK && response != NULL) {
199        CHECK(response->findInt32("err", &err));
200        if (err == OK) {
201            readFromAMessage(response, rate);
202        }
203    }
204    return err;
205}
206
207status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
208    if (mAudioSink != NULL && mAudioSink->ready()) {
209        status_t err = mAudioSink->getPlaybackRate(rate);
210        if (err == OK) {
211            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
212                ALOGW("correcting mismatch in internal/external playback rate");
213            }
214            // get playback settings used by audiosink, as it may be
215            // slightly off due to audiosink not taking small changes.
216            mPlaybackSettings = *rate;
217            if (mPaused) {
218                rate->mSpeed = 0.f;
219            }
220        }
221        return err;
222    }
223    *rate = mPlaybackSettings;
224    return OK;
225}
226
227status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
228    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
229    writeToAMessage(msg, sync, videoFpsHint);
230    sp<AMessage> response;
231    status_t err = msg->postAndAwaitResponse(&response);
232    if (err == OK && response != NULL) {
233        CHECK(response->findInt32("err", &err));
234    }
235    return err;
236}
237
238status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
239    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
240        return BAD_VALUE;
241    }
242    // TODO: support sync sources
243    return INVALID_OPERATION;
244}
245
246status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
247    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
248    sp<AMessage> response;
249    status_t err = msg->postAndAwaitResponse(&response);
250    if (err == OK && response != NULL) {
251        CHECK(response->findInt32("err", &err));
252        if (err == OK) {
253            readFromAMessage(response, sync, videoFps);
254        }
255    }
256    return err;
257}
258
259status_t NuPlayer::Renderer::onGetSyncSettings(
260        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
261    *sync = mSyncSettings;
262    *videoFps = -1.f;
263    return OK;
264}
265
266void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
267    {
268        Mutex::Autolock autoLock(mLock);
269        if (audio) {
270            mNotifyCompleteAudio |= notifyComplete;
271            clearAudioFirstAnchorTime_l();
272            ++mAudioQueueGeneration;
273            ++mAudioDrainGeneration;
274        } else {
275            mNotifyCompleteVideo |= notifyComplete;
276            ++mVideoQueueGeneration;
277            ++mVideoDrainGeneration;
278        }
279
280        clearAnchorTime_l();
281        mVideoLateByUs = 0;
282        mSyncQueues = false;
283    }
284
285    sp<AMessage> msg = new AMessage(kWhatFlush, this);
286    msg->setInt32("audio", static_cast<int32_t>(audio));
287    msg->post();
288}
289
290void NuPlayer::Renderer::signalTimeDiscontinuity() {
291}
292
293void NuPlayer::Renderer::signalDisableOffloadAudio() {
294    (new AMessage(kWhatDisableOffloadAudio, this))->post();
295}
296
297void NuPlayer::Renderer::signalEnableOffloadAudio() {
298    (new AMessage(kWhatEnableOffloadAudio, this))->post();
299}
300
301void NuPlayer::Renderer::pause() {
302    (new AMessage(kWhatPause, this))->post();
303}
304
305void NuPlayer::Renderer::resume() {
306    (new AMessage(kWhatResume, this))->post();
307}
308
309void NuPlayer::Renderer::setVideoFrameRate(float fps) {
310    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
311    msg->setFloat("frame-rate", fps);
312    msg->post();
313}
314
315// Called on any threads.
316status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
317    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
318}
319
320void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
321    mAudioFirstAnchorTimeMediaUs = -1;
322    mMediaClock->setStartingTimeMedia(-1);
323}
324
325void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
326    if (mAudioFirstAnchorTimeMediaUs == -1) {
327        mAudioFirstAnchorTimeMediaUs = mediaUs;
328        mMediaClock->setStartingTimeMedia(mediaUs);
329    }
330}
331
332void NuPlayer::Renderer::clearAnchorTime_l() {
333    mMediaClock->clearAnchor();
334    mAnchorTimeMediaUs = -1;
335    mAnchorNumFramesWritten = -1;
336}
337
338void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
339    Mutex::Autolock autoLock(mLock);
340    mVideoLateByUs = lateUs;
341}
342
343int64_t NuPlayer::Renderer::getVideoLateByUs() {
344    Mutex::Autolock autoLock(mLock);
345    return mVideoLateByUs;
346}
347
348status_t NuPlayer::Renderer::openAudioSink(
349        const sp<AMessage> &format,
350        bool offloadOnly,
351        bool hasVideo,
352        uint32_t flags,
353        bool *isOffloaded) {
354    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
355    msg->setMessage("format", format);
356    msg->setInt32("offload-only", offloadOnly);
357    msg->setInt32("has-video", hasVideo);
358    msg->setInt32("flags", flags);
359
360    sp<AMessage> response;
361    msg->postAndAwaitResponse(&response);
362
363    int32_t err;
364    if (!response->findInt32("err", &err)) {
365        err = INVALID_OPERATION;
366    } else if (err == OK && isOffloaded != NULL) {
367        int32_t offload;
368        CHECK(response->findInt32("offload", &offload));
369        *isOffloaded = (offload != 0);
370    }
371    return err;
372}
373
374void NuPlayer::Renderer::closeAudioSink() {
375    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
376
377    sp<AMessage> response;
378    msg->postAndAwaitResponse(&response);
379}
380
381void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
382    switch (msg->what()) {
383        case kWhatOpenAudioSink:
384        {
385            sp<AMessage> format;
386            CHECK(msg->findMessage("format", &format));
387
388            int32_t offloadOnly;
389            CHECK(msg->findInt32("offload-only", &offloadOnly));
390
391            int32_t hasVideo;
392            CHECK(msg->findInt32("has-video", &hasVideo));
393
394            uint32_t flags;
395            CHECK(msg->findInt32("flags", (int32_t *)&flags));
396
397            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
398
399            sp<AMessage> response = new AMessage;
400            response->setInt32("err", err);
401            response->setInt32("offload", offloadingAudio());
402
403            sp<AReplyToken> replyID;
404            CHECK(msg->senderAwaitsResponse(&replyID));
405            response->postReply(replyID);
406
407            break;
408        }
409
410        case kWhatCloseAudioSink:
411        {
412            sp<AReplyToken> replyID;
413            CHECK(msg->senderAwaitsResponse(&replyID));
414
415            onCloseAudioSink();
416
417            sp<AMessage> response = new AMessage;
418            response->postReply(replyID);
419            break;
420        }
421
422        case kWhatStopAudioSink:
423        {
424            mAudioSink->stop();
425            break;
426        }
427
428        case kWhatDrainAudioQueue:
429        {
430            mDrainAudioQueuePending = false;
431
432            int32_t generation;
433            CHECK(msg->findInt32("drainGeneration", &generation));
434            if (generation != getDrainGeneration(true /* audio */)) {
435                break;
436            }
437
438            if (onDrainAudioQueue()) {
439                uint32_t numFramesPlayed;
440                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
441                         (status_t)OK);
442
443                uint32_t numFramesPendingPlayout =
444                    mNumFramesWritten - numFramesPlayed;
445
446                // This is how long the audio sink will have data to
447                // play back.
448                int64_t delayUs =
449                    mAudioSink->msecsPerFrame()
450                        * numFramesPendingPlayout * 1000ll;
451                if (mPlaybackRate > 1.0f) {
452                    delayUs /= mPlaybackRate;
453                }
454
455                // Let's give it more data after about half that time
456                // has elapsed.
457                Mutex::Autolock autoLock(mLock);
458                postDrainAudioQueue_l(delayUs / 2);
459            }
460            break;
461        }
462
463        case kWhatDrainVideoQueue:
464        {
465            int32_t generation;
466            CHECK(msg->findInt32("drainGeneration", &generation));
467            if (generation != getDrainGeneration(false /* audio */)) {
468                break;
469            }
470
471            mDrainVideoQueuePending = false;
472
473            onDrainVideoQueue();
474
475            postDrainVideoQueue();
476            break;
477        }
478
479        case kWhatPostDrainVideoQueue:
480        {
481            int32_t generation;
482            CHECK(msg->findInt32("drainGeneration", &generation));
483            if (generation != getDrainGeneration(false /* audio */)) {
484                break;
485            }
486
487            mDrainVideoQueuePending = false;
488            postDrainVideoQueue();
489            break;
490        }
491
492        case kWhatQueueBuffer:
493        {
494            onQueueBuffer(msg);
495            break;
496        }
497
498        case kWhatQueueEOS:
499        {
500            onQueueEOS(msg);
501            break;
502        }
503
504        case kWhatEOS:
505        {
506            int32_t generation;
507            CHECK(msg->findInt32("audioEOSGeneration", &generation));
508            if (generation != mAudioEOSGeneration) {
509                break;
510            }
511            status_t finalResult;
512            CHECK(msg->findInt32("finalResult", &finalResult));
513            notifyEOS(true /* audio */, finalResult);
514            break;
515        }
516
517        case kWhatConfigPlayback:
518        {
519            sp<AReplyToken> replyID;
520            CHECK(msg->senderAwaitsResponse(&replyID));
521            AudioPlaybackRate rate;
522            readFromAMessage(msg, &rate);
523            status_t err = onConfigPlayback(rate);
524            sp<AMessage> response = new AMessage;
525            response->setInt32("err", err);
526            response->postReply(replyID);
527            break;
528        }
529
530        case kWhatGetPlaybackSettings:
531        {
532            sp<AReplyToken> replyID;
533            CHECK(msg->senderAwaitsResponse(&replyID));
534            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
535            status_t err = onGetPlaybackSettings(&rate);
536            sp<AMessage> response = new AMessage;
537            if (err == OK) {
538                writeToAMessage(response, rate);
539            }
540            response->setInt32("err", err);
541            response->postReply(replyID);
542            break;
543        }
544
545        case kWhatConfigSync:
546        {
547            sp<AReplyToken> replyID;
548            CHECK(msg->senderAwaitsResponse(&replyID));
549            AVSyncSettings sync;
550            float videoFpsHint;
551            readFromAMessage(msg, &sync, &videoFpsHint);
552            status_t err = onConfigSync(sync, videoFpsHint);
553            sp<AMessage> response = new AMessage;
554            response->setInt32("err", err);
555            response->postReply(replyID);
556            break;
557        }
558
559        case kWhatGetSyncSettings:
560        {
561            sp<AReplyToken> replyID;
562            CHECK(msg->senderAwaitsResponse(&replyID));
563
564            ALOGV("kWhatGetSyncSettings");
565            AVSyncSettings sync;
566            float videoFps = -1.f;
567            status_t err = onGetSyncSettings(&sync, &videoFps);
568            sp<AMessage> response = new AMessage;
569            if (err == OK) {
570                writeToAMessage(response, sync, videoFps);
571            }
572            response->setInt32("err", err);
573            response->postReply(replyID);
574            break;
575        }
576
577        case kWhatFlush:
578        {
579            onFlush(msg);
580            break;
581        }
582
583        case kWhatDisableOffloadAudio:
584        {
585            onDisableOffloadAudio();
586            break;
587        }
588
589        case kWhatEnableOffloadAudio:
590        {
591            onEnableOffloadAudio();
592            break;
593        }
594
595        case kWhatPause:
596        {
597            onPause();
598            break;
599        }
600
601        case kWhatResume:
602        {
603            onResume();
604            break;
605        }
606
607        case kWhatSetVideoFrameRate:
608        {
609            float fps;
610            CHECK(msg->findFloat("frame-rate", &fps));
611            onSetVideoFrameRate(fps);
612            break;
613        }
614
615        case kWhatAudioTearDown:
616        {
617            onAudioTearDown(kDueToError);
618            break;
619        }
620
621        case kWhatAudioOffloadPauseTimeout:
622        {
623            int32_t generation;
624            CHECK(msg->findInt32("drainGeneration", &generation));
625            if (generation != mAudioOffloadPauseTimeoutGeneration) {
626                break;
627            }
628            ALOGV("Audio Offload tear down due to pause timeout.");
629            onAudioTearDown(kDueToTimeout);
630            mWakeLock->release();
631            break;
632        }
633
634        default:
635            TRESPASS();
636            break;
637    }
638}
639
640void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
641    if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) {
642        return;
643    }
644
645    if (mAudioQueue.empty()) {
646        return;
647    }
648
649    // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data.
650    if (mPaused) {
651        const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs();
652        if (diffUs > delayUs) {
653            delayUs = diffUs;
654        }
655    }
656
657    mDrainAudioQueuePending = true;
658    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
659    msg->setInt32("drainGeneration", mAudioDrainGeneration);
660    msg->post(delayUs);
661}
662
663void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
664    mAudioRenderingStartGeneration = mAudioDrainGeneration;
665    mVideoRenderingStartGeneration = mVideoDrainGeneration;
666}
667
668void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
669    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
670        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
671        mVideoRenderingStartGeneration = -1;
672        mAudioRenderingStartGeneration = -1;
673
674        sp<AMessage> notify = mNotify->dup();
675        notify->setInt32("what", kWhatMediaRenderingStart);
676        notify->post();
677    }
678}
679
680// static
681size_t NuPlayer::Renderer::AudioSinkCallback(
682        MediaPlayerBase::AudioSink * /* audioSink */,
683        void *buffer,
684        size_t size,
685        void *cookie,
686        MediaPlayerBase::AudioSink::cb_event_t event) {
687    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
688
689    switch (event) {
690        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
691        {
692            return me->fillAudioBuffer(buffer, size);
693            break;
694        }
695
696        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
697        {
698            ALOGV("AudioSink::CB_EVENT_STREAM_END");
699            me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
700            break;
701        }
702
703        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
704        {
705            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
706            me->notifyAudioTearDown();
707            break;
708        }
709    }
710
711    return 0;
712}
713
714size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
715    Mutex::Autolock autoLock(mLock);
716
717    if (!mUseAudioCallback) {
718        return 0;
719    }
720
721    bool hasEOS = false;
722
723    size_t sizeCopied = 0;
724    bool firstEntry = true;
725    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
726    while (sizeCopied < size && !mAudioQueue.empty()) {
727        entry = &*mAudioQueue.begin();
728
729        if (entry->mBuffer == NULL) { // EOS
730            hasEOS = true;
731            mAudioQueue.erase(mAudioQueue.begin());
732            break;
733        }
734
735        if (firstEntry && entry->mOffset == 0) {
736            firstEntry = false;
737            int64_t mediaTimeUs;
738            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
739            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
740            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
741        }
742
743        size_t copy = entry->mBuffer->size() - entry->mOffset;
744        size_t sizeRemaining = size - sizeCopied;
745        if (copy > sizeRemaining) {
746            copy = sizeRemaining;
747        }
748
749        memcpy((char *)buffer + sizeCopied,
750               entry->mBuffer->data() + entry->mOffset,
751               copy);
752
753        entry->mOffset += copy;
754        if (entry->mOffset == entry->mBuffer->size()) {
755            entry->mNotifyConsumed->post();
756            mAudioQueue.erase(mAudioQueue.begin());
757            entry = NULL;
758        }
759        sizeCopied += copy;
760
761        notifyIfMediaRenderingStarted_l();
762    }
763
764    if (mAudioFirstAnchorTimeMediaUs >= 0) {
765        int64_t nowUs = ALooper::GetNowUs();
766        int64_t nowMediaUs =
767            mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
768        // we don't know how much data we are queueing for offloaded tracks.
769        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
770    }
771
772    // for non-offloaded audio, we need to compute the frames written because
773    // there is no EVENT_STREAM_END notification. The frames written gives
774    // an estimate on the pending played out duration.
775    if (!offloadingAudio()) {
776        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
777    }
778
779    if (hasEOS) {
780        (new AMessage(kWhatStopAudioSink, this))->post();
781        // As there is currently no EVENT_STREAM_END callback notification for
782        // non-offloaded audio tracks, we need to post the EOS ourselves.
783        if (!offloadingAudio()) {
784            int64_t postEOSDelayUs = 0;
785            if (mAudioSink->needsTrailingPadding()) {
786                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
787            }
788            ALOGV("fillAudioBuffer: notifyEOS "
789                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
790                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
791            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
792        }
793    }
794    return sizeCopied;
795}
796
797void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() {
798    List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it;
799    bool foundEOS = false;
800    while (it != mAudioQueue.end()) {
801        int32_t eos;
802        QueueEntry *entry = &*it++;
803        if (entry->mBuffer == NULL
804                || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) {
805            itEOS = it;
806            foundEOS = true;
807        }
808    }
809
810    if (foundEOS) {
811        // post all replies before EOS and drop the samples
812        for (it = mAudioQueue.begin(); it != itEOS; it++) {
813            if (it->mBuffer == NULL) {
814                // delay doesn't matter as we don't even have an AudioTrack
815                notifyEOS(true /* audio */, it->mFinalResult);
816            } else {
817                it->mNotifyConsumed->post();
818            }
819        }
820        mAudioQueue.erase(mAudioQueue.begin(), itEOS);
821    }
822}
823
824bool NuPlayer::Renderer::onDrainAudioQueue() {
825    // do not drain audio during teardown as queued buffers may be invalid.
826    if (mAudioTornDown) {
827        return false;
828    }
829    // TODO: This call to getPosition checks if AudioTrack has been created
830    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
831    // CHECKs on getPosition will fail.
832    // We still need to figure out why AudioTrack is not created when
833    // this function is called. One possible reason could be leftover
834    // audio. Another possible place is to check whether decoder
835    // has received INFO_FORMAT_CHANGED as the first buffer since
836    // AudioSink is opened there, and possible interactions with flush
837    // immediately after start. Investigate error message
838    // "vorbis_dsp_synthesis returned -135", along with RTSP.
839    uint32_t numFramesPlayed;
840    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
841        // When getPosition fails, renderer will not reschedule the draining
842        // unless new samples are queued.
843        // If we have pending EOS (or "eos" marker for discontinuities), we need
844        // to post these now as NuPlayerDecoder might be waiting for it.
845        drainAudioQueueUntilLastEOS();
846
847        ALOGW("onDrainAudioQueue(): audio sink is not ready");
848        return false;
849    }
850
851#if 0
852    ssize_t numFramesAvailableToWrite =
853        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
854
855    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
856        ALOGI("audio sink underrun");
857    } else {
858        ALOGV("audio queue has %d frames left to play",
859             mAudioSink->frameCount() - numFramesAvailableToWrite);
860    }
861#endif
862
863    uint32_t prevFramesWritten = mNumFramesWritten;
864    while (!mAudioQueue.empty()) {
865        QueueEntry *entry = &*mAudioQueue.begin();
866
867        mLastAudioBufferDrained = entry->mBufferOrdinal;
868
869        if (entry->mBuffer == NULL) {
870            // EOS
871            int64_t postEOSDelayUs = 0;
872            if (mAudioSink->needsTrailingPadding()) {
873                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
874            }
875            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
876            mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
877
878            mAudioQueue.erase(mAudioQueue.begin());
879            entry = NULL;
880            if (mAudioSink->needsTrailingPadding()) {
881                // If we're not in gapless playback (i.e. through setNextPlayer), we
882                // need to stop the track here, because that will play out the last
883                // little bit at the end of the file. Otherwise short files won't play.
884                mAudioSink->stop();
885                mNumFramesWritten = 0;
886            }
887            return false;
888        }
889
890        // ignore 0-sized buffer which could be EOS marker with no data
891        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
892            int64_t mediaTimeUs;
893            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
894            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
895                    mediaTimeUs / 1E6);
896            onNewAudioMediaTime(mediaTimeUs);
897        }
898
899        size_t copy = entry->mBuffer->size() - entry->mOffset;
900
901        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
902                                            copy, false /* blocking */);
903        if (written < 0) {
904            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
905            if (written == WOULD_BLOCK) {
906                ALOGV("AudioSink write would block when writing %zu bytes", copy);
907            } else {
908                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
909                // This can only happen when AudioSink was opened with doNotReconnect flag set to
910                // true, in which case the NuPlayer will handle the reconnect.
911                notifyAudioTearDown();
912            }
913            break;
914        }
915
916        entry->mOffset += written;
917        if (entry->mOffset == entry->mBuffer->size()) {
918            entry->mNotifyConsumed->post();
919            mAudioQueue.erase(mAudioQueue.begin());
920
921            entry = NULL;
922        }
923
924        size_t copiedFrames = written / mAudioSink->frameSize();
925        mNumFramesWritten += copiedFrames;
926
927        {
928            Mutex::Autolock autoLock(mLock);
929            notifyIfMediaRenderingStarted_l();
930        }
931
932        if (written != (ssize_t)copy) {
933            // A short count was received from AudioSink::write()
934            //
935            // AudioSink write is called in non-blocking mode.
936            // It may return with a short count when:
937            //
938            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
939            // 2) The data to be copied exceeds the available buffer in AudioSink.
940            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
941            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
942
943            // (Case 1)
944            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
945            // needs to fail, as we should not carry over fractional frames between calls.
946            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
947
948            // (Case 2, 3, 4)
949            // Return early to the caller.
950            // Beware of calling immediately again as this may busy-loop if you are not careful.
951            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
952            break;
953        }
954    }
955    int64_t maxTimeMedia;
956    {
957        Mutex::Autolock autoLock(mLock);
958        maxTimeMedia =
959            mAnchorTimeMediaUs +
960                    (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
961                            * 1000LL * mAudioSink->msecsPerFrame());
962    }
963    mMediaClock->updateMaxTimeMedia(maxTimeMedia);
964
965    // calculate whether we need to reschedule another write.
966    bool reschedule = !mAudioQueue.empty()
967            && (!mPaused
968                || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers
969    //ALOGD("reschedule:%d  empty:%d  mPaused:%d  prevFramesWritten:%u  mNumFramesWritten:%u",
970    //        reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten);
971    return reschedule;
972}
973
974int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
975    int32_t sampleRate = offloadingAudio() ?
976            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
977    if (sampleRate == 0) {
978        ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload");
979        return 0;
980    }
981    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
982    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
983}
984
985// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
986int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
987    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
988    return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
989}
990
991int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
992    int64_t realUs;
993    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
994        // If failed to get current position, e.g. due to audio clock is
995        // not ready, then just play out video immediately without delay.
996        return nowUs;
997    }
998    return realUs;
999}
1000
1001void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
1002    Mutex::Autolock autoLock(mLock);
1003    // TRICKY: vorbis decoder generates multiple frames with the same
1004    // timestamp, so only update on the first frame with a given timestamp
1005    if (mediaTimeUs == mAnchorTimeMediaUs) {
1006        return;
1007    }
1008    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
1009    int64_t nowUs = ALooper::GetNowUs();
1010    int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
1011    mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
1012    mAnchorNumFramesWritten = mNumFramesWritten;
1013    mAnchorTimeMediaUs = mediaTimeUs;
1014}
1015
1016// Called without mLock acquired.
1017void NuPlayer::Renderer::postDrainVideoQueue() {
1018    if (mDrainVideoQueuePending
1019            || getSyncQueues()
1020            || (mPaused && mVideoSampleReceived)) {
1021        return;
1022    }
1023
1024    if (mVideoQueue.empty()) {
1025        return;
1026    }
1027
1028    QueueEntry &entry = *mVideoQueue.begin();
1029
1030    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
1031    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
1032
1033    if (entry.mBuffer == NULL) {
1034        // EOS doesn't carry a timestamp.
1035        msg->post();
1036        mDrainVideoQueuePending = true;
1037        return;
1038    }
1039
1040    int64_t delayUs;
1041    int64_t nowUs = ALooper::GetNowUs();
1042    int64_t realTimeUs;
1043    if (mFlags & FLAG_REAL_TIME) {
1044        int64_t mediaTimeUs;
1045        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1046        realTimeUs = mediaTimeUs;
1047    } else {
1048        int64_t mediaTimeUs;
1049        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1050
1051        {
1052            Mutex::Autolock autoLock(mLock);
1053            if (mAnchorTimeMediaUs < 0) {
1054                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
1055                mAnchorTimeMediaUs = mediaTimeUs;
1056                realTimeUs = nowUs;
1057            } else {
1058                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1059            }
1060        }
1061        if (!mHasAudio) {
1062            // smooth out videos >= 10fps
1063            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1064        }
1065
1066        // Heuristics to handle situation when media time changed without a
1067        // discontinuity. If we have not drained an audio buffer that was
1068        // received after this buffer, repost in 10 msec. Otherwise repost
1069        // in 500 msec.
1070        delayUs = realTimeUs - nowUs;
1071        if (delayUs > 500000) {
1072            int64_t postDelayUs = 500000;
1073            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
1074                postDelayUs = 10000;
1075            }
1076            msg->setWhat(kWhatPostDrainVideoQueue);
1077            msg->post(postDelayUs);
1078            mVideoScheduler->restart();
1079            ALOGI("possible video time jump of %dms, retrying in %dms",
1080                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
1081            mDrainVideoQueuePending = true;
1082            return;
1083        }
1084    }
1085
1086    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1087    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1088
1089    delayUs = realTimeUs - nowUs;
1090
1091    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1092    // post 2 display refreshes before rendering is due
1093    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1094
1095    mDrainVideoQueuePending = true;
1096}
1097
1098void NuPlayer::Renderer::onDrainVideoQueue() {
1099    if (mVideoQueue.empty()) {
1100        return;
1101    }
1102
1103    QueueEntry *entry = &*mVideoQueue.begin();
1104
1105    if (entry->mBuffer == NULL) {
1106        // EOS
1107
1108        notifyEOS(false /* audio */, entry->mFinalResult);
1109
1110        mVideoQueue.erase(mVideoQueue.begin());
1111        entry = NULL;
1112
1113        setVideoLateByUs(0);
1114        return;
1115    }
1116
1117    int64_t nowUs = -1;
1118    int64_t realTimeUs;
1119    int64_t mediaTimeUs = -1;
1120    if (mFlags & FLAG_REAL_TIME) {
1121        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1122    } else {
1123        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1124
1125        nowUs = ALooper::GetNowUs();
1126        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1127    }
1128
1129    bool tooLate = false;
1130
1131    if (!mPaused) {
1132        if (nowUs == -1) {
1133            nowUs = ALooper::GetNowUs();
1134        }
1135        setVideoLateByUs(nowUs - realTimeUs);
1136        tooLate = (mVideoLateByUs > 40000);
1137
1138        if (tooLate) {
1139            ALOGV("video late by %lld us (%.2f secs)",
1140                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1141        } else {
1142            int64_t mediaUs = 0;
1143            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1144            ALOGV("rendering video at media time %.2f secs",
1145                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1146                    mediaUs) / 1E6);
1147
1148            if (!(mFlags & FLAG_REAL_TIME)
1149                    && mLastAudioMediaTimeUs != -1
1150                    && mediaTimeUs > mLastAudioMediaTimeUs) {
1151                // If audio ends before video, video continues to drive media clock.
1152                // Also smooth out videos >= 10fps.
1153                mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
1154            }
1155        }
1156    } else {
1157        setVideoLateByUs(0);
1158        if (!mVideoSampleReceived && !mHasAudio) {
1159            // This will ensure that the first frame after a flush won't be used as anchor
1160            // when renderer is in paused state, because resume can happen any time after seek.
1161            Mutex::Autolock autoLock(mLock);
1162            clearAnchorTime_l();
1163        }
1164    }
1165
1166    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1167    entry->mNotifyConsumed->setInt32("render", !tooLate);
1168    entry->mNotifyConsumed->post();
1169    mVideoQueue.erase(mVideoQueue.begin());
1170    entry = NULL;
1171
1172    mVideoSampleReceived = true;
1173
1174    if (!mPaused) {
1175        if (!mVideoRenderingStarted) {
1176            mVideoRenderingStarted = true;
1177            notifyVideoRenderingStart();
1178        }
1179        Mutex::Autolock autoLock(mLock);
1180        notifyIfMediaRenderingStarted_l();
1181    }
1182}
1183
1184void NuPlayer::Renderer::notifyVideoRenderingStart() {
1185    sp<AMessage> notify = mNotify->dup();
1186    notify->setInt32("what", kWhatVideoRenderingStart);
1187    notify->post();
1188}
1189
1190void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1191    if (audio && delayUs > 0) {
1192        sp<AMessage> msg = new AMessage(kWhatEOS, this);
1193        msg->setInt32("audioEOSGeneration", mAudioEOSGeneration);
1194        msg->setInt32("finalResult", finalResult);
1195        msg->post(delayUs);
1196        return;
1197    }
1198    sp<AMessage> notify = mNotify->dup();
1199    notify->setInt32("what", kWhatEOS);
1200    notify->setInt32("audio", static_cast<int32_t>(audio));
1201    notify->setInt32("finalResult", finalResult);
1202    notify->post(delayUs);
1203}
1204
1205void NuPlayer::Renderer::notifyAudioTearDown() {
1206    (new AMessage(kWhatAudioTearDown, this))->post();
1207}
1208
1209void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1210    int32_t audio;
1211    CHECK(msg->findInt32("audio", &audio));
1212
1213    if (dropBufferIfStale(audio, msg)) {
1214        return;
1215    }
1216
1217    if (audio) {
1218        mHasAudio = true;
1219    } else {
1220        mHasVideo = true;
1221    }
1222
1223    if (mHasVideo) {
1224        if (mVideoScheduler == NULL) {
1225            mVideoScheduler = new VideoFrameScheduler();
1226            mVideoScheduler->init();
1227        }
1228    }
1229
1230    sp<ABuffer> buffer;
1231    CHECK(msg->findBuffer("buffer", &buffer));
1232
1233    sp<AMessage> notifyConsumed;
1234    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1235
1236    QueueEntry entry;
1237    entry.mBuffer = buffer;
1238    entry.mNotifyConsumed = notifyConsumed;
1239    entry.mOffset = 0;
1240    entry.mFinalResult = OK;
1241    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1242
1243    if (audio) {
1244        Mutex::Autolock autoLock(mLock);
1245        mAudioQueue.push_back(entry);
1246        postDrainAudioQueue_l();
1247    } else {
1248        mVideoQueue.push_back(entry);
1249        postDrainVideoQueue();
1250    }
1251
1252    Mutex::Autolock autoLock(mLock);
1253    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1254        return;
1255    }
1256
1257    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1258    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1259
1260    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1261        // EOS signalled on either queue.
1262        syncQueuesDone_l();
1263        return;
1264    }
1265
1266    int64_t firstAudioTimeUs;
1267    int64_t firstVideoTimeUs;
1268    CHECK(firstAudioBuffer->meta()
1269            ->findInt64("timeUs", &firstAudioTimeUs));
1270    CHECK(firstVideoBuffer->meta()
1271            ->findInt64("timeUs", &firstVideoTimeUs));
1272
1273    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1274
1275    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1276
1277    if (diff > 100000ll) {
1278        // Audio data starts More than 0.1 secs before video.
1279        // Drop some audio.
1280
1281        (*mAudioQueue.begin()).mNotifyConsumed->post();
1282        mAudioQueue.erase(mAudioQueue.begin());
1283        return;
1284    }
1285
1286    syncQueuesDone_l();
1287}
1288
1289void NuPlayer::Renderer::syncQueuesDone_l() {
1290    if (!mSyncQueues) {
1291        return;
1292    }
1293
1294    mSyncQueues = false;
1295
1296    if (!mAudioQueue.empty()) {
1297        postDrainAudioQueue_l();
1298    }
1299
1300    if (!mVideoQueue.empty()) {
1301        mLock.unlock();
1302        postDrainVideoQueue();
1303        mLock.lock();
1304    }
1305}
1306
1307void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1308    int32_t audio;
1309    CHECK(msg->findInt32("audio", &audio));
1310
1311    if (dropBufferIfStale(audio, msg)) {
1312        return;
1313    }
1314
1315    int32_t finalResult;
1316    CHECK(msg->findInt32("finalResult", &finalResult));
1317
1318    QueueEntry entry;
1319    entry.mOffset = 0;
1320    entry.mFinalResult = finalResult;
1321
1322    if (audio) {
1323        Mutex::Autolock autoLock(mLock);
1324        if (mAudioQueue.empty() && mSyncQueues) {
1325            syncQueuesDone_l();
1326        }
1327        mAudioQueue.push_back(entry);
1328        postDrainAudioQueue_l();
1329    } else {
1330        if (mVideoQueue.empty() && getSyncQueues()) {
1331            Mutex::Autolock autoLock(mLock);
1332            syncQueuesDone_l();
1333        }
1334        mVideoQueue.push_back(entry);
1335        postDrainVideoQueue();
1336    }
1337}
1338
1339void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1340    int32_t audio, notifyComplete;
1341    CHECK(msg->findInt32("audio", &audio));
1342
1343    {
1344        Mutex::Autolock autoLock(mLock);
1345        if (audio) {
1346            notifyComplete = mNotifyCompleteAudio;
1347            mNotifyCompleteAudio = false;
1348            mLastAudioMediaTimeUs = -1;
1349        } else {
1350            notifyComplete = mNotifyCompleteVideo;
1351            mNotifyCompleteVideo = false;
1352        }
1353
1354        // If we're currently syncing the queues, i.e. dropping audio while
1355        // aligning the first audio/video buffer times and only one of the
1356        // two queues has data, we may starve that queue by not requesting
1357        // more buffers from the decoder. If the other source then encounters
1358        // a discontinuity that leads to flushing, we'll never find the
1359        // corresponding discontinuity on the other queue.
1360        // Therefore we'll stop syncing the queues if at least one of them
1361        // is flushed.
1362        syncQueuesDone_l();
1363        clearAnchorTime_l();
1364    }
1365
1366    ALOGV("flushing %s", audio ? "audio" : "video");
1367    if (audio) {
1368        {
1369            Mutex::Autolock autoLock(mLock);
1370            flushQueue(&mAudioQueue);
1371
1372            ++mAudioDrainGeneration;
1373            ++mAudioEOSGeneration;
1374            prepareForMediaRenderingStart_l();
1375
1376            // the frame count will be reset after flush.
1377            clearAudioFirstAnchorTime_l();
1378        }
1379
1380        mDrainAudioQueuePending = false;
1381
1382        if (offloadingAudio()) {
1383            mAudioSink->pause();
1384            mAudioSink->flush();
1385            if (!mPaused) {
1386                mAudioSink->start();
1387            }
1388        } else {
1389            mAudioSink->pause();
1390            mAudioSink->flush();
1391            // Call stop() to signal to the AudioSink to completely fill the
1392            // internal buffer before resuming playback.
1393            // FIXME: this is ignored after flush().
1394            mAudioSink->stop();
1395            if (mPaused) {
1396                // Race condition: if renderer is paused and audio sink is stopped,
1397                // we need to make sure that the audio track buffer fully drains
1398                // before delivering data.
1399                // FIXME: remove this if we can detect if stop() is complete.
1400                const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms)
1401                mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs;
1402            } else {
1403                mAudioSink->start();
1404            }
1405            mNumFramesWritten = 0;
1406        }
1407    } else {
1408        flushQueue(&mVideoQueue);
1409
1410        mDrainVideoQueuePending = false;
1411
1412        if (mVideoScheduler != NULL) {
1413            mVideoScheduler->restart();
1414        }
1415
1416        Mutex::Autolock autoLock(mLock);
1417        ++mVideoDrainGeneration;
1418        prepareForMediaRenderingStart_l();
1419    }
1420
1421    mVideoSampleReceived = false;
1422
1423    if (notifyComplete) {
1424        notifyFlushComplete(audio);
1425    }
1426}
1427
1428void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1429    while (!queue->empty()) {
1430        QueueEntry *entry = &*queue->begin();
1431
1432        if (entry->mBuffer != NULL) {
1433            entry->mNotifyConsumed->post();
1434        }
1435
1436        queue->erase(queue->begin());
1437        entry = NULL;
1438    }
1439}
1440
1441void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1442    sp<AMessage> notify = mNotify->dup();
1443    notify->setInt32("what", kWhatFlushComplete);
1444    notify->setInt32("audio", static_cast<int32_t>(audio));
1445    notify->post();
1446}
1447
1448bool NuPlayer::Renderer::dropBufferIfStale(
1449        bool audio, const sp<AMessage> &msg) {
1450    int32_t queueGeneration;
1451    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1452
1453    if (queueGeneration == getQueueGeneration(audio)) {
1454        return false;
1455    }
1456
1457    sp<AMessage> notifyConsumed;
1458    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1459        notifyConsumed->post();
1460    }
1461
1462    return true;
1463}
1464
1465void NuPlayer::Renderer::onAudioSinkChanged() {
1466    if (offloadingAudio()) {
1467        return;
1468    }
1469    CHECK(!mDrainAudioQueuePending);
1470    mNumFramesWritten = 0;
1471    {
1472        Mutex::Autolock autoLock(mLock);
1473        mAnchorNumFramesWritten = -1;
1474    }
1475    uint32_t written;
1476    if (mAudioSink->getFramesWritten(&written) == OK) {
1477        mNumFramesWritten = written;
1478    }
1479}
1480
1481void NuPlayer::Renderer::onDisableOffloadAudio() {
1482    Mutex::Autolock autoLock(mLock);
1483    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1484    ++mAudioDrainGeneration;
1485    if (mAudioRenderingStartGeneration != -1) {
1486        prepareForMediaRenderingStart_l();
1487    }
1488}
1489
1490void NuPlayer::Renderer::onEnableOffloadAudio() {
1491    Mutex::Autolock autoLock(mLock);
1492    mFlags |= FLAG_OFFLOAD_AUDIO;
1493    ++mAudioDrainGeneration;
1494    if (mAudioRenderingStartGeneration != -1) {
1495        prepareForMediaRenderingStart_l();
1496    }
1497}
1498
1499void NuPlayer::Renderer::onPause() {
1500    if (mPaused) {
1501        return;
1502    }
1503
1504    {
1505        Mutex::Autolock autoLock(mLock);
1506        // we do not increment audio drain generation so that we fill audio buffer during pause.
1507        ++mVideoDrainGeneration;
1508        prepareForMediaRenderingStart_l();
1509        mPaused = true;
1510        mMediaClock->setPlaybackRate(0.0);
1511    }
1512
1513    mDrainAudioQueuePending = false;
1514    mDrainVideoQueuePending = false;
1515
1516    if (mHasAudio) {
1517        mAudioSink->pause();
1518        startAudioOffloadPauseTimeout();
1519    }
1520
1521    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1522          mAudioQueue.size(), mVideoQueue.size());
1523}
1524
1525void NuPlayer::Renderer::onResume() {
1526    if (!mPaused) {
1527        return;
1528    }
1529
1530    if (mHasAudio) {
1531        cancelAudioOffloadPauseTimeout();
1532        status_t err = mAudioSink->start();
1533        if (err != OK) {
1534            ALOGE("cannot start AudioSink err %d", err);
1535            notifyAudioTearDown();
1536        }
1537    }
1538
1539    {
1540        Mutex::Autolock autoLock(mLock);
1541        mPaused = false;
1542
1543        // configure audiosink as we did not do it when pausing
1544        if (mAudioSink != NULL && mAudioSink->ready()) {
1545            mAudioSink->setPlaybackRate(mPlaybackSettings);
1546        }
1547
1548        mMediaClock->setPlaybackRate(mPlaybackRate);
1549
1550        if (!mAudioQueue.empty()) {
1551            postDrainAudioQueue_l();
1552        }
1553    }
1554
1555    if (!mVideoQueue.empty()) {
1556        postDrainVideoQueue();
1557    }
1558}
1559
1560void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1561    if (mVideoScheduler == NULL) {
1562        mVideoScheduler = new VideoFrameScheduler();
1563    }
1564    mVideoScheduler->init(fps);
1565}
1566
1567int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1568    Mutex::Autolock autoLock(mLock);
1569    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1570}
1571
1572int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1573    Mutex::Autolock autoLock(mLock);
1574    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1575}
1576
1577bool NuPlayer::Renderer::getSyncQueues() {
1578    Mutex::Autolock autoLock(mLock);
1579    return mSyncQueues;
1580}
1581
1582// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
1583// as it acquires locks and may query the audio driver.
1584//
1585// Some calls could conceivably retrieve extrapolated data instead of
1586// accessing getTimestamp() or getPosition() every time a data buffer with
1587// a media time is received.
1588//
1589// Calculate duration of played samples if played at normal rate (i.e., 1.0).
1590int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
1591    uint32_t numFramesPlayed;
1592    int64_t numFramesPlayedAt;
1593    AudioTimestamp ts;
1594    static const int64_t kStaleTimestamp100ms = 100000;
1595
1596    status_t res = mAudioSink->getTimestamp(ts);
1597    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
1598        numFramesPlayed = ts.mPosition;
1599        numFramesPlayedAt =
1600            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
1601        const int64_t timestampAge = nowUs - numFramesPlayedAt;
1602        if (timestampAge > kStaleTimestamp100ms) {
1603            // This is an audio FIXME.
1604            // getTimestamp returns a timestamp which may come from audio mixing threads.
1605            // After pausing, the MixerThread may go idle, thus the mTime estimate may
1606            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
1607            // the max latency should be about 25ms with an average around 12ms (to be verified).
1608            // For safety we use 100ms.
1609            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
1610                    (long long)nowUs, (long long)numFramesPlayedAt);
1611            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
1612        }
1613        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1614    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
1615        numFramesPlayed = 0;
1616        numFramesPlayedAt = nowUs;
1617        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
1618        //        numFramesPlayed, (long long)numFramesPlayedAt);
1619    } else {                         // case 3: transitory at new track or audio fast tracks.
1620        res = mAudioSink->getPosition(&numFramesPlayed);
1621        CHECK_EQ(res, (status_t)OK);
1622        numFramesPlayedAt = nowUs;
1623        numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
1624        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1625    }
1626
1627    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
1628    int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
1629            + nowUs - numFramesPlayedAt;
1630    if (durationUs < 0) {
1631        // Occurs when numFramesPlayed position is very small and the following:
1632        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
1633        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
1634        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
1635        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
1636        //
1637        // Both of these are transitory conditions.
1638        ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
1639        durationUs = 0;
1640    }
1641    ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
1642            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
1643    return durationUs;
1644}
1645
1646void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1647    if (mAudioTornDown) {
1648        return;
1649    }
1650    mAudioTornDown = true;
1651
1652    int64_t currentPositionUs;
1653    sp<AMessage> notify = mNotify->dup();
1654    if (getCurrentPosition(&currentPositionUs) == OK) {
1655        notify->setInt64("positionUs", currentPositionUs);
1656    }
1657
1658    mAudioSink->stop();
1659    mAudioSink->flush();
1660
1661    notify->setInt32("what", kWhatAudioTearDown);
1662    notify->setInt32("reason", reason);
1663    notify->post();
1664}
1665
1666void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1667    if (offloadingAudio()) {
1668        mWakeLock->acquire();
1669        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1670        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1671        msg->post(kOffloadPauseMaxUs);
1672    }
1673}
1674
1675void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1676    if (offloadingAudio()) {
1677        mWakeLock->release(true);
1678        ++mAudioOffloadPauseTimeoutGeneration;
1679    }
1680}
1681
1682status_t NuPlayer::Renderer::onOpenAudioSink(
1683        const sp<AMessage> &format,
1684        bool offloadOnly,
1685        bool hasVideo,
1686        uint32_t flags) {
1687    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1688            offloadOnly, offloadingAudio());
1689    bool audioSinkChanged = false;
1690
1691    int32_t numChannels;
1692    CHECK(format->findInt32("channel-count", &numChannels));
1693
1694    int32_t channelMask;
1695    if (!format->findInt32("channel-mask", &channelMask)) {
1696        // signal to the AudioSink to derive the mask from count.
1697        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1698    }
1699
1700    int32_t sampleRate;
1701    CHECK(format->findInt32("sample-rate", &sampleRate));
1702
1703    if (offloadingAudio()) {
1704        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1705        AString mime;
1706        CHECK(format->findString("mime", &mime));
1707        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1708
1709        if (err != OK) {
1710            ALOGE("Couldn't map mime \"%s\" to a valid "
1711                    "audio_format", mime.c_str());
1712            onDisableOffloadAudio();
1713        } else {
1714            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1715                    mime.c_str(), audioFormat);
1716
1717            int avgBitRate = -1;
1718            format->findInt32("bit-rate", &avgBitRate);
1719
1720            int32_t aacProfile = -1;
1721            if (audioFormat == AUDIO_FORMAT_AAC
1722                    && format->findInt32("aac-profile", &aacProfile)) {
1723                // Redefine AAC format as per aac profile
1724                mapAACProfileToAudioFormat(
1725                        audioFormat,
1726                        aacProfile);
1727            }
1728
1729            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1730            offloadInfo.duration_us = -1;
1731            format->findInt64(
1732                    "durationUs", &offloadInfo.duration_us);
1733            offloadInfo.sample_rate = sampleRate;
1734            offloadInfo.channel_mask = channelMask;
1735            offloadInfo.format = audioFormat;
1736            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1737            offloadInfo.bit_rate = avgBitRate;
1738            offloadInfo.has_video = hasVideo;
1739            offloadInfo.is_streaming = true;
1740
1741            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1742                ALOGV("openAudioSink: no change in offload mode");
1743                // no change from previous configuration, everything ok.
1744                return OK;
1745            }
1746            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1747
1748            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1749            uint32_t offloadFlags = flags;
1750            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1751            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1752            audioSinkChanged = true;
1753            mAudioSink->close();
1754
1755            err = mAudioSink->open(
1756                    sampleRate,
1757                    numChannels,
1758                    (audio_channel_mask_t)channelMask,
1759                    audioFormat,
1760                    0 /* bufferCount - unused */,
1761                    &NuPlayer::Renderer::AudioSinkCallback,
1762                    this,
1763                    (audio_output_flags_t)offloadFlags,
1764                    &offloadInfo);
1765
1766            if (err == OK) {
1767                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1768            }
1769
1770            if (err == OK) {
1771                // If the playback is offloaded to h/w, we pass
1772                // the HAL some metadata information.
1773                // We don't want to do this for PCM because it
1774                // will be going through the AudioFlinger mixer
1775                // before reaching the hardware.
1776                // TODO
1777                mCurrentOffloadInfo = offloadInfo;
1778                if (!mPaused) { // for preview mode, don't start if paused
1779                    err = mAudioSink->start();
1780                }
1781                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1782            }
1783            if (err != OK) {
1784                // Clean up, fall back to non offload mode.
1785                mAudioSink->close();
1786                onDisableOffloadAudio();
1787                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1788                ALOGV("openAudioSink: offload failed");
1789            } else {
1790                mUseAudioCallback = true;  // offload mode transfers data through callback
1791                ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1792            }
1793        }
1794    }
1795    if (!offloadOnly && !offloadingAudio()) {
1796        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1797        uint32_t pcmFlags = flags;
1798        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1799
1800        const PcmInfo info = {
1801                (audio_channel_mask_t)channelMask,
1802                (audio_output_flags_t)pcmFlags,
1803                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1804                numChannels,
1805                sampleRate
1806        };
1807        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1808            ALOGV("openAudioSink: no change in pcm mode");
1809            // no change from previous configuration, everything ok.
1810            return OK;
1811        }
1812
1813        audioSinkChanged = true;
1814        mAudioSink->close();
1815        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1816        // Note: It is possible to set up the callback, but not use it to send audio data.
1817        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1818        mUseAudioCallback = getUseAudioCallbackSetting();
1819        if (mUseAudioCallback) {
1820            ++mAudioDrainGeneration;  // discard pending kWhatDrainAudioQueue message.
1821        }
1822
1823        // Compute the desired buffer size.
1824        // For callback mode, the amount of time before wakeup is about half the buffer size.
1825        const uint32_t frameCount =
1826                (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000;
1827
1828        // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct
1829        // AudioSink. We don't want this when there's video because it will cause a video seek to
1830        // the previous I frame. But we do want this when there's only audio because it will give
1831        // NuPlayer a chance to switch from non-offload mode to offload mode.
1832        // So we only set doNotReconnect when there's no video.
1833        const bool doNotReconnect = !hasVideo;
1834        status_t err = mAudioSink->open(
1835                    sampleRate,
1836                    numChannels,
1837                    (audio_channel_mask_t)channelMask,
1838                    AUDIO_FORMAT_PCM_16_BIT,
1839                    0 /* bufferCount - unused */,
1840                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1841                    mUseAudioCallback ? this : NULL,
1842                    (audio_output_flags_t)pcmFlags,
1843                    NULL,
1844                    doNotReconnect,
1845                    frameCount);
1846        if (err == OK) {
1847            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1848        }
1849        if (err != OK) {
1850            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1851            mAudioSink->close();
1852            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1853            return err;
1854        }
1855        mCurrentPcmInfo = info;
1856        if (!mPaused) { // for preview mode, don't start if paused
1857            mAudioSink->start();
1858        }
1859    }
1860    if (audioSinkChanged) {
1861        onAudioSinkChanged();
1862    }
1863    mAudioTornDown = false;
1864    return OK;
1865}
1866
1867void NuPlayer::Renderer::onCloseAudioSink() {
1868    mAudioSink->close();
1869    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1870    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1871}
1872
1873}  // namespace android
1874
1875