NuPlayerRenderer.cpp revision a0b397133bfce8a62198dfac9a2b970c8b20bcc5
1/*
2 * Copyright (C) 2010 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17//#define LOG_NDEBUG 0
18#define LOG_TAG "NuPlayerRenderer"
19#include <utils/Log.h>
20
21#include "NuPlayerRenderer.h"
22#include <cutils/properties.h>
23#include <media/stagefright/foundation/ABuffer.h>
24#include <media/stagefright/foundation/ADebug.h>
25#include <media/stagefright/foundation/AMessage.h>
26#include <media/stagefright/foundation/AUtils.h>
27#include <media/stagefright/foundation/AWakeLock.h>
28#include <media/stagefright/MediaClock.h>
29#include <media/stagefright/MediaErrors.h>
30#include <media/stagefright/MetaData.h>
31#include <media/stagefright/Utils.h>
32
33#include <VideoFrameScheduler.h>
34
35#include <inttypes.h>
36
37namespace android {
38
39/*
40 * Example of common configuration settings in shell script form
41
42   #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager
43   adb shell setprop audio.offload.disable 1
44
45   #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager
46   adb shell setprop audio.offload.video 1
47
48   #Use audio callbacks for PCM data
49   adb shell setprop media.stagefright.audio.cbk 1
50
51 * These configurations take effect for the next track played (not the current track).
52 */
53
54static inline bool getUseAudioCallbackSetting() {
55    return property_get_bool("media.stagefright.audio.cbk", false /* default_value */);
56}
57
58// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink
59// is closed to allow the audio DSP to power down.
60static const int64_t kOffloadPauseMaxUs = 10000000ll;
61
62// static
63const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = {
64        AUDIO_CHANNEL_NONE,
65        AUDIO_OUTPUT_FLAG_NONE,
66        AUDIO_FORMAT_INVALID,
67        0, // mNumChannels
68        0 // mSampleRate
69};
70
71// static
72const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll;
73
74NuPlayer::Renderer::Renderer(
75        const sp<MediaPlayerBase::AudioSink> &sink,
76        const sp<AMessage> &notify,
77        uint32_t flags)
78    : mAudioSink(sink),
79      mNotify(notify),
80      mFlags(flags),
81      mNumFramesWritten(0),
82      mDrainAudioQueuePending(false),
83      mDrainVideoQueuePending(false),
84      mAudioQueueGeneration(0),
85      mVideoQueueGeneration(0),
86      mAudioDrainGeneration(0),
87      mVideoDrainGeneration(0),
88      mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT),
89      mAudioFirstAnchorTimeMediaUs(-1),
90      mAnchorTimeMediaUs(-1),
91      mAnchorNumFramesWritten(-1),
92      mVideoLateByUs(0ll),
93      mHasAudio(false),
94      mHasVideo(false),
95      mNotifyCompleteAudio(false),
96      mNotifyCompleteVideo(false),
97      mSyncQueues(false),
98      mPaused(false),
99      mVideoSampleReceived(false),
100      mVideoRenderingStarted(false),
101      mVideoRenderingStartGeneration(0),
102      mAudioRenderingStartGeneration(0),
103      mAudioOffloadPauseTimeoutGeneration(0),
104      mAudioTornDown(false),
105      mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER),
106      mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER),
107      mTotalBuffersQueued(0),
108      mLastAudioBufferDrained(0),
109      mUseAudioCallback(false),
110      mWakeLock(new AWakeLock()) {
111    mMediaClock = new MediaClock;
112    mPlaybackRate = mPlaybackSettings.mSpeed;
113    mMediaClock->setPlaybackRate(mPlaybackRate);
114}
115
116NuPlayer::Renderer::~Renderer() {
117    if (offloadingAudio()) {
118        mAudioSink->stop();
119        mAudioSink->flush();
120        mAudioSink->close();
121    }
122}
123
124void NuPlayer::Renderer::queueBuffer(
125        bool audio,
126        const sp<ABuffer> &buffer,
127        const sp<AMessage> &notifyConsumed) {
128    sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this);
129    msg->setInt32("queueGeneration", getQueueGeneration(audio));
130    msg->setInt32("audio", static_cast<int32_t>(audio));
131    msg->setBuffer("buffer", buffer);
132    msg->setMessage("notifyConsumed", notifyConsumed);
133    msg->post();
134}
135
136void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) {
137    CHECK_NE(finalResult, (status_t)OK);
138
139    sp<AMessage> msg = new AMessage(kWhatQueueEOS, this);
140    msg->setInt32("queueGeneration", getQueueGeneration(audio));
141    msg->setInt32("audio", static_cast<int32_t>(audio));
142    msg->setInt32("finalResult", finalResult);
143    msg->post();
144}
145
146status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) {
147    sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this);
148    writeToAMessage(msg, rate);
149    sp<AMessage> response;
150    status_t err = msg->postAndAwaitResponse(&response);
151    if (err == OK && response != NULL) {
152        CHECK(response->findInt32("err", &err));
153    }
154    return err;
155}
156
157status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) {
158    if (rate.mSpeed == 0.f) {
159        onPause();
160        // don't call audiosink's setPlaybackRate if pausing, as pitch does not
161        // have to correspond to the any non-0 speed (e.g old speed). Keep
162        // settings nonetheless, using the old speed, in case audiosink changes.
163        AudioPlaybackRate newRate = rate;
164        newRate.mSpeed = mPlaybackSettings.mSpeed;
165        mPlaybackSettings = newRate;
166        return OK;
167    }
168
169    if (mAudioSink != NULL && mAudioSink->ready()) {
170        status_t err = mAudioSink->setPlaybackRate(rate);
171        if (err != OK) {
172            return err;
173        }
174    }
175    mPlaybackSettings = rate;
176    mPlaybackRate = rate.mSpeed;
177    mMediaClock->setPlaybackRate(mPlaybackRate);
178    return OK;
179}
180
181status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
182    sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this);
183    sp<AMessage> response;
184    status_t err = msg->postAndAwaitResponse(&response);
185    if (err == OK && response != NULL) {
186        CHECK(response->findInt32("err", &err));
187        if (err == OK) {
188            readFromAMessage(response, rate);
189        }
190    }
191    return err;
192}
193
194status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) {
195    if (mAudioSink != NULL && mAudioSink->ready()) {
196        status_t err = mAudioSink->getPlaybackRate(rate);
197        if (err == OK) {
198            if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) {
199                ALOGW("correcting mismatch in internal/external playback rate");
200            }
201            // get playback settings used by audiosink, as it may be
202            // slightly off due to audiosink not taking small changes.
203            mPlaybackSettings = *rate;
204            if (mPaused) {
205                rate->mSpeed = 0.f;
206            }
207        }
208        return err;
209    }
210    *rate = mPlaybackSettings;
211    return OK;
212}
213
214status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) {
215    sp<AMessage> msg = new AMessage(kWhatConfigSync, this);
216    writeToAMessage(msg, sync, videoFpsHint);
217    sp<AMessage> response;
218    status_t err = msg->postAndAwaitResponse(&response);
219    if (err == OK && response != NULL) {
220        CHECK(response->findInt32("err", &err));
221    }
222    return err;
223}
224
225status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) {
226    if (sync.mSource != AVSYNC_SOURCE_DEFAULT) {
227        return BAD_VALUE;
228    }
229    // TODO: support sync sources
230    return INVALID_OPERATION;
231}
232
233status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) {
234    sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this);
235    sp<AMessage> response;
236    status_t err = msg->postAndAwaitResponse(&response);
237    if (err == OK && response != NULL) {
238        CHECK(response->findInt32("err", &err));
239        if (err == OK) {
240            readFromAMessage(response, sync, videoFps);
241        }
242    }
243    return err;
244}
245
246status_t NuPlayer::Renderer::onGetSyncSettings(
247        AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) {
248    *sync = mSyncSettings;
249    *videoFps = -1.f;
250    return OK;
251}
252
253void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) {
254    {
255        Mutex::Autolock autoLock(mLock);
256        if (audio) {
257            mNotifyCompleteAudio |= notifyComplete;
258            ++mAudioQueueGeneration;
259            ++mAudioDrainGeneration;
260        } else {
261            mNotifyCompleteVideo |= notifyComplete;
262            ++mVideoQueueGeneration;
263            ++mVideoDrainGeneration;
264        }
265
266        clearAnchorTime_l();
267        clearAudioFirstAnchorTime_l();
268        mVideoLateByUs = 0;
269        mSyncQueues = false;
270    }
271
272    sp<AMessage> msg = new AMessage(kWhatFlush, this);
273    msg->setInt32("audio", static_cast<int32_t>(audio));
274    msg->post();
275}
276
277void NuPlayer::Renderer::signalTimeDiscontinuity() {
278}
279
280void NuPlayer::Renderer::signalDisableOffloadAudio() {
281    (new AMessage(kWhatDisableOffloadAudio, this))->post();
282}
283
284void NuPlayer::Renderer::signalEnableOffloadAudio() {
285    (new AMessage(kWhatEnableOffloadAudio, this))->post();
286}
287
288void NuPlayer::Renderer::pause() {
289    (new AMessage(kWhatPause, this))->post();
290}
291
292void NuPlayer::Renderer::resume() {
293    (new AMessage(kWhatResume, this))->post();
294}
295
296void NuPlayer::Renderer::setVideoFrameRate(float fps) {
297    sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this);
298    msg->setFloat("frame-rate", fps);
299    msg->post();
300}
301
302// Called on any threads.
303status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) {
304    return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs);
305}
306
307void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() {
308    mAudioFirstAnchorTimeMediaUs = -1;
309    mMediaClock->setStartingTimeMedia(-1);
310}
311
312void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) {
313    if (mAudioFirstAnchorTimeMediaUs == -1) {
314        mAudioFirstAnchorTimeMediaUs = mediaUs;
315        mMediaClock->setStartingTimeMedia(mediaUs);
316    }
317}
318
319void NuPlayer::Renderer::clearAnchorTime_l() {
320    mMediaClock->clearAnchor();
321    mAnchorTimeMediaUs = -1;
322    mAnchorNumFramesWritten = -1;
323}
324
325void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) {
326    Mutex::Autolock autoLock(mLock);
327    mVideoLateByUs = lateUs;
328}
329
330int64_t NuPlayer::Renderer::getVideoLateByUs() {
331    Mutex::Autolock autoLock(mLock);
332    return mVideoLateByUs;
333}
334
335status_t NuPlayer::Renderer::openAudioSink(
336        const sp<AMessage> &format,
337        bool offloadOnly,
338        bool hasVideo,
339        uint32_t flags,
340        bool *isOffloaded) {
341    sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this);
342    msg->setMessage("format", format);
343    msg->setInt32("offload-only", offloadOnly);
344    msg->setInt32("has-video", hasVideo);
345    msg->setInt32("flags", flags);
346
347    sp<AMessage> response;
348    msg->postAndAwaitResponse(&response);
349
350    int32_t err;
351    if (!response->findInt32("err", &err)) {
352        err = INVALID_OPERATION;
353    } else if (err == OK && isOffloaded != NULL) {
354        int32_t offload;
355        CHECK(response->findInt32("offload", &offload));
356        *isOffloaded = (offload != 0);
357    }
358    return err;
359}
360
361void NuPlayer::Renderer::closeAudioSink() {
362    sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this);
363
364    sp<AMessage> response;
365    msg->postAndAwaitResponse(&response);
366}
367
368void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) {
369    switch (msg->what()) {
370        case kWhatOpenAudioSink:
371        {
372            sp<AMessage> format;
373            CHECK(msg->findMessage("format", &format));
374
375            int32_t offloadOnly;
376            CHECK(msg->findInt32("offload-only", &offloadOnly));
377
378            int32_t hasVideo;
379            CHECK(msg->findInt32("has-video", &hasVideo));
380
381            uint32_t flags;
382            CHECK(msg->findInt32("flags", (int32_t *)&flags));
383
384            status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags);
385
386            sp<AMessage> response = new AMessage;
387            response->setInt32("err", err);
388            response->setInt32("offload", offloadingAudio());
389
390            sp<AReplyToken> replyID;
391            CHECK(msg->senderAwaitsResponse(&replyID));
392            response->postReply(replyID);
393
394            break;
395        }
396
397        case kWhatCloseAudioSink:
398        {
399            sp<AReplyToken> replyID;
400            CHECK(msg->senderAwaitsResponse(&replyID));
401
402            onCloseAudioSink();
403
404            sp<AMessage> response = new AMessage;
405            response->postReply(replyID);
406            break;
407        }
408
409        case kWhatStopAudioSink:
410        {
411            mAudioSink->stop();
412            break;
413        }
414
415        case kWhatDrainAudioQueue:
416        {
417            int32_t generation;
418            CHECK(msg->findInt32("drainGeneration", &generation));
419            if (generation != getDrainGeneration(true /* audio */)) {
420                break;
421            }
422
423            mDrainAudioQueuePending = false;
424
425            if (onDrainAudioQueue()) {
426                uint32_t numFramesPlayed;
427                CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed),
428                         (status_t)OK);
429
430                uint32_t numFramesPendingPlayout =
431                    mNumFramesWritten - numFramesPlayed;
432
433                // This is how long the audio sink will have data to
434                // play back.
435                int64_t delayUs =
436                    mAudioSink->msecsPerFrame()
437                        * numFramesPendingPlayout * 1000ll;
438                if (mPlaybackRate > 1.0f) {
439                    delayUs /= mPlaybackRate;
440                }
441
442                // Let's give it more data after about half that time
443                // has elapsed.
444                Mutex::Autolock autoLock(mLock);
445                postDrainAudioQueue_l(delayUs / 2);
446            }
447            break;
448        }
449
450        case kWhatDrainVideoQueue:
451        {
452            int32_t generation;
453            CHECK(msg->findInt32("drainGeneration", &generation));
454            if (generation != getDrainGeneration(false /* audio */)) {
455                break;
456            }
457
458            mDrainVideoQueuePending = false;
459
460            onDrainVideoQueue();
461
462            postDrainVideoQueue();
463            break;
464        }
465
466        case kWhatPostDrainVideoQueue:
467        {
468            int32_t generation;
469            CHECK(msg->findInt32("drainGeneration", &generation));
470            if (generation != getDrainGeneration(false /* audio */)) {
471                break;
472            }
473
474            mDrainVideoQueuePending = false;
475            postDrainVideoQueue();
476            break;
477        }
478
479        case kWhatQueueBuffer:
480        {
481            onQueueBuffer(msg);
482            break;
483        }
484
485        case kWhatQueueEOS:
486        {
487            onQueueEOS(msg);
488            break;
489        }
490
491        case kWhatConfigPlayback:
492        {
493            sp<AReplyToken> replyID;
494            CHECK(msg->senderAwaitsResponse(&replyID));
495            AudioPlaybackRate rate;
496            readFromAMessage(msg, &rate);
497            status_t err = onConfigPlayback(rate);
498            sp<AMessage> response = new AMessage;
499            response->setInt32("err", err);
500            response->postReply(replyID);
501            break;
502        }
503
504        case kWhatGetPlaybackSettings:
505        {
506            sp<AReplyToken> replyID;
507            CHECK(msg->senderAwaitsResponse(&replyID));
508            AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT;
509            status_t err = onGetPlaybackSettings(&rate);
510            sp<AMessage> response = new AMessage;
511            if (err == OK) {
512                writeToAMessage(response, rate);
513            }
514            response->setInt32("err", err);
515            response->postReply(replyID);
516            break;
517        }
518
519        case kWhatConfigSync:
520        {
521            sp<AReplyToken> replyID;
522            CHECK(msg->senderAwaitsResponse(&replyID));
523            AVSyncSettings sync;
524            float videoFpsHint;
525            readFromAMessage(msg, &sync, &videoFpsHint);
526            status_t err = onConfigSync(sync, videoFpsHint);
527            sp<AMessage> response = new AMessage;
528            response->setInt32("err", err);
529            response->postReply(replyID);
530            break;
531        }
532
533        case kWhatGetSyncSettings:
534        {
535            sp<AReplyToken> replyID;
536            CHECK(msg->senderAwaitsResponse(&replyID));
537
538            ALOGV("kWhatGetSyncSettings");
539            AVSyncSettings sync;
540            float videoFps = -1.f;
541            status_t err = onGetSyncSettings(&sync, &videoFps);
542            sp<AMessage> response = new AMessage;
543            if (err == OK) {
544                writeToAMessage(response, sync, videoFps);
545            }
546            response->setInt32("err", err);
547            response->postReply(replyID);
548            break;
549        }
550
551        case kWhatFlush:
552        {
553            onFlush(msg);
554            break;
555        }
556
557        case kWhatDisableOffloadAudio:
558        {
559            onDisableOffloadAudio();
560            break;
561        }
562
563        case kWhatEnableOffloadAudio:
564        {
565            onEnableOffloadAudio();
566            break;
567        }
568
569        case kWhatPause:
570        {
571            onPause();
572            break;
573        }
574
575        case kWhatResume:
576        {
577            onResume();
578            break;
579        }
580
581        case kWhatSetVideoFrameRate:
582        {
583            float fps;
584            CHECK(msg->findFloat("frame-rate", &fps));
585            onSetVideoFrameRate(fps);
586            break;
587        }
588
589        case kWhatAudioTearDown:
590        {
591            onAudioTearDown(kDueToError);
592            break;
593        }
594
595        case kWhatAudioOffloadPauseTimeout:
596        {
597            int32_t generation;
598            CHECK(msg->findInt32("drainGeneration", &generation));
599            if (generation != mAudioOffloadPauseTimeoutGeneration) {
600                break;
601            }
602            ALOGV("Audio Offload tear down due to pause timeout.");
603            onAudioTearDown(kDueToTimeout);
604            mWakeLock->release();
605            break;
606        }
607
608        default:
609            TRESPASS();
610            break;
611    }
612}
613
614void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) {
615    if (mDrainAudioQueuePending || mSyncQueues || mPaused
616            || mUseAudioCallback) {
617        return;
618    }
619
620    if (mAudioQueue.empty()) {
621        return;
622    }
623
624    mDrainAudioQueuePending = true;
625    sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this);
626    msg->setInt32("drainGeneration", mAudioDrainGeneration);
627    msg->post(delayUs);
628}
629
630void NuPlayer::Renderer::prepareForMediaRenderingStart_l() {
631    mAudioRenderingStartGeneration = mAudioDrainGeneration;
632    mVideoRenderingStartGeneration = mVideoDrainGeneration;
633}
634
635void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() {
636    if (mVideoRenderingStartGeneration == mVideoDrainGeneration &&
637        mAudioRenderingStartGeneration == mAudioDrainGeneration) {
638        mVideoRenderingStartGeneration = -1;
639        mAudioRenderingStartGeneration = -1;
640
641        sp<AMessage> notify = mNotify->dup();
642        notify->setInt32("what", kWhatMediaRenderingStart);
643        notify->post();
644    }
645}
646
647// static
648size_t NuPlayer::Renderer::AudioSinkCallback(
649        MediaPlayerBase::AudioSink * /* audioSink */,
650        void *buffer,
651        size_t size,
652        void *cookie,
653        MediaPlayerBase::AudioSink::cb_event_t event) {
654    NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie;
655
656    switch (event) {
657        case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER:
658        {
659            return me->fillAudioBuffer(buffer, size);
660            break;
661        }
662
663        case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END:
664        {
665            ALOGV("AudioSink::CB_EVENT_STREAM_END");
666            me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM);
667            break;
668        }
669
670        case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN:
671        {
672            ALOGV("AudioSink::CB_EVENT_TEAR_DOWN");
673            me->notifyAudioTearDown();
674            break;
675        }
676    }
677
678    return 0;
679}
680
681size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) {
682    Mutex::Autolock autoLock(mLock);
683
684    if (!mUseAudioCallback || mPaused) {
685        return 0;
686    }
687
688    bool hasEOS = false;
689
690    size_t sizeCopied = 0;
691    bool firstEntry = true;
692    QueueEntry *entry;  // will be valid after while loop if hasEOS is set.
693    while (sizeCopied < size && !mAudioQueue.empty()) {
694        entry = &*mAudioQueue.begin();
695
696        if (entry->mBuffer == NULL) { // EOS
697            hasEOS = true;
698            mAudioQueue.erase(mAudioQueue.begin());
699            break;
700        }
701
702        if (firstEntry && entry->mOffset == 0) {
703            firstEntry = false;
704            int64_t mediaTimeUs;
705            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
706            ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6);
707            setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
708        }
709
710        size_t copy = entry->mBuffer->size() - entry->mOffset;
711        size_t sizeRemaining = size - sizeCopied;
712        if (copy > sizeRemaining) {
713            copy = sizeRemaining;
714        }
715
716        memcpy((char *)buffer + sizeCopied,
717               entry->mBuffer->data() + entry->mOffset,
718               copy);
719
720        entry->mOffset += copy;
721        if (entry->mOffset == entry->mBuffer->size()) {
722            entry->mNotifyConsumed->post();
723            mAudioQueue.erase(mAudioQueue.begin());
724            entry = NULL;
725        }
726        sizeCopied += copy;
727
728        notifyIfMediaRenderingStarted_l();
729    }
730
731    if (mAudioFirstAnchorTimeMediaUs >= 0) {
732        int64_t nowUs = ALooper::GetNowUs();
733        int64_t nowMediaUs =
734            mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs);
735        // we don't know how much data we are queueing for offloaded tracks.
736        mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX);
737    }
738
739    // for non-offloaded audio, we need to compute the frames written because
740    // there is no EVENT_STREAM_END notification. The frames written gives
741    // an estimate on the pending played out duration.
742    if (!offloadingAudio()) {
743        mNumFramesWritten += sizeCopied / mAudioSink->frameSize();
744    }
745
746    if (hasEOS) {
747        (new AMessage(kWhatStopAudioSink, this))->post();
748        // As there is currently no EVENT_STREAM_END callback notification for
749        // non-offloaded audio tracks, we need to post the EOS ourselves.
750        if (!offloadingAudio()) {
751            int64_t postEOSDelayUs = 0;
752            if (mAudioSink->needsTrailingPadding()) {
753                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
754            }
755            ALOGV("fillAudioBuffer: notifyEOS "
756                    "mNumFramesWritten:%u  finalResult:%d  postEOSDelay:%lld",
757                    mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs);
758            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
759        }
760    }
761    return sizeCopied;
762}
763
764bool NuPlayer::Renderer::onDrainAudioQueue() {
765    // TODO: This call to getPosition checks if AudioTrack has been created
766    // in AudioSink before draining audio. If AudioTrack doesn't exist, then
767    // CHECKs on getPosition will fail.
768    // We still need to figure out why AudioTrack is not created when
769    // this function is called. One possible reason could be leftover
770    // audio. Another possible place is to check whether decoder
771    // has received INFO_FORMAT_CHANGED as the first buffer since
772    // AudioSink is opened there, and possible interactions with flush
773    // immediately after start. Investigate error message
774    // "vorbis_dsp_synthesis returned -135", along with RTSP.
775    uint32_t numFramesPlayed;
776    if (mAudioSink->getPosition(&numFramesPlayed) != OK) {
777        return false;
778    }
779
780#if 0
781    ssize_t numFramesAvailableToWrite =
782        mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed);
783
784    if (numFramesAvailableToWrite == mAudioSink->frameCount()) {
785        ALOGI("audio sink underrun");
786    } else {
787        ALOGV("audio queue has %d frames left to play",
788             mAudioSink->frameCount() - numFramesAvailableToWrite);
789    }
790#endif
791
792    while (!mAudioQueue.empty()) {
793        QueueEntry *entry = &*mAudioQueue.begin();
794
795        mLastAudioBufferDrained = entry->mBufferOrdinal;
796
797        if (entry->mBuffer == NULL) {
798            // EOS
799            int64_t postEOSDelayUs = 0;
800            if (mAudioSink->needsTrailingPadding()) {
801                postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs());
802            }
803            notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs);
804
805            mAudioQueue.erase(mAudioQueue.begin());
806            entry = NULL;
807            if (mAudioSink->needsTrailingPadding()) {
808                // If we're not in gapless playback (i.e. through setNextPlayer), we
809                // need to stop the track here, because that will play out the last
810                // little bit at the end of the file. Otherwise short files won't play.
811                mAudioSink->stop();
812                mNumFramesWritten = 0;
813            }
814            return false;
815        }
816
817        // ignore 0-sized buffer which could be EOS marker with no data
818        if (entry->mOffset == 0 && entry->mBuffer->size() > 0) {
819            int64_t mediaTimeUs;
820            CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
821            ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs",
822                    mediaTimeUs / 1E6);
823            onNewAudioMediaTime(mediaTimeUs);
824        }
825
826        size_t copy = entry->mBuffer->size() - entry->mOffset;
827
828        ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset,
829                                            copy, false /* blocking */);
830        if (written < 0) {
831            // An error in AudioSink write. Perhaps the AudioSink was not properly opened.
832            if (written == WOULD_BLOCK) {
833                ALOGW("AudioSink write would block when writing %zu bytes", copy);
834            } else {
835                ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy);
836                notifyAudioTearDown();
837            }
838            break;
839        }
840
841        entry->mOffset += written;
842        if (entry->mOffset == entry->mBuffer->size()) {
843            entry->mNotifyConsumed->post();
844            mAudioQueue.erase(mAudioQueue.begin());
845
846            entry = NULL;
847        }
848
849        size_t copiedFrames = written / mAudioSink->frameSize();
850        mNumFramesWritten += copiedFrames;
851
852        {
853            Mutex::Autolock autoLock(mLock);
854            notifyIfMediaRenderingStarted_l();
855        }
856
857        if (written != (ssize_t)copy) {
858            // A short count was received from AudioSink::write()
859            //
860            // AudioSink write is called in non-blocking mode.
861            // It may return with a short count when:
862            //
863            // 1) Size to be copied is not a multiple of the frame size. We consider this fatal.
864            // 2) The data to be copied exceeds the available buffer in AudioSink.
865            // 3) An error occurs and data has been partially copied to the buffer in AudioSink.
866            // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded.
867
868            // (Case 1)
869            // Must be a multiple of the frame size.  If it is not a multiple of a frame size, it
870            // needs to fail, as we should not carry over fractional frames between calls.
871            CHECK_EQ(copy % mAudioSink->frameSize(), 0);
872
873            // (Case 2, 3, 4)
874            // Return early to the caller.
875            // Beware of calling immediately again as this may busy-loop if you are not careful.
876            ALOGV("AudioSink write short frame count %zd < %zu", written, copy);
877            break;
878        }
879    }
880    int64_t maxTimeMedia;
881    {
882        Mutex::Autolock autoLock(mLock);
883        maxTimeMedia =
884            mAnchorTimeMediaUs +
885                    (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL)
886                            * 1000LL * mAudioSink->msecsPerFrame());
887    }
888    mMediaClock->updateMaxTimeMedia(maxTimeMedia);
889
890    return !mAudioQueue.empty();
891}
892
893int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) {
894    int32_t sampleRate = offloadingAudio() ?
895            mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate;
896    // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours.
897    return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate);
898}
899
900// Calculate duration of pending samples if played at normal rate (i.e., 1.0).
901int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) {
902    int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten);
903    return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs);
904}
905
906int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) {
907    int64_t realUs;
908    if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) {
909        // If failed to get current position, e.g. due to audio clock is
910        // not ready, then just play out video immediately without delay.
911        return nowUs;
912    }
913    return realUs;
914}
915
916void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) {
917    Mutex::Autolock autoLock(mLock);
918    // TRICKY: vorbis decoder generates multiple frames with the same
919    // timestamp, so only update on the first frame with a given timestamp
920    if (mediaTimeUs == mAnchorTimeMediaUs) {
921        return;
922    }
923    setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs);
924    int64_t nowUs = ALooper::GetNowUs();
925    int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs);
926    mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs);
927    mAnchorNumFramesWritten = mNumFramesWritten;
928    mAnchorTimeMediaUs = mediaTimeUs;
929}
930
931// Called without mLock acquired.
932void NuPlayer::Renderer::postDrainVideoQueue() {
933    if (mDrainVideoQueuePending
934            || getSyncQueues()
935            || (mPaused && mVideoSampleReceived)) {
936        return;
937    }
938
939    if (mVideoQueue.empty()) {
940        return;
941    }
942
943    QueueEntry &entry = *mVideoQueue.begin();
944
945    sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this);
946    msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */));
947
948    if (entry.mBuffer == NULL) {
949        // EOS doesn't carry a timestamp.
950        msg->post();
951        mDrainVideoQueuePending = true;
952        return;
953    }
954
955    int64_t delayUs;
956    int64_t nowUs = ALooper::GetNowUs();
957    int64_t realTimeUs;
958    if (mFlags & FLAG_REAL_TIME) {
959        int64_t mediaTimeUs;
960        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
961        realTimeUs = mediaTimeUs;
962    } else {
963        int64_t mediaTimeUs;
964        CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
965
966        {
967            Mutex::Autolock autoLock(mLock);
968            if (mAnchorTimeMediaUs < 0) {
969                mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs);
970                mAnchorTimeMediaUs = mediaTimeUs;
971                realTimeUs = nowUs;
972            } else {
973                realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
974            }
975        }
976        if (!mHasAudio) {
977            // smooth out videos >= 10fps
978            mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000);
979        }
980
981        // Heuristics to handle situation when media time changed without a
982        // discontinuity. If we have not drained an audio buffer that was
983        // received after this buffer, repost in 10 msec. Otherwise repost
984        // in 500 msec.
985        delayUs = realTimeUs - nowUs;
986        if (delayUs > 500000) {
987            int64_t postDelayUs = 500000;
988            if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) {
989                postDelayUs = 10000;
990            }
991            msg->setWhat(kWhatPostDrainVideoQueue);
992            msg->post(postDelayUs);
993            mVideoScheduler->restart();
994            ALOGI("possible video time jump of %dms, retrying in %dms",
995                    (int)(delayUs / 1000), (int)(postDelayUs / 1000));
996            mDrainVideoQueuePending = true;
997            return;
998        }
999    }
1000
1001    realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000;
1002    int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000);
1003
1004    delayUs = realTimeUs - nowUs;
1005
1006    ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs);
1007    // post 2 display refreshes before rendering is due
1008    msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0);
1009
1010    mDrainVideoQueuePending = true;
1011}
1012
1013void NuPlayer::Renderer::onDrainVideoQueue() {
1014    if (mVideoQueue.empty()) {
1015        return;
1016    }
1017
1018    QueueEntry *entry = &*mVideoQueue.begin();
1019
1020    if (entry->mBuffer == NULL) {
1021        // EOS
1022
1023        notifyEOS(false /* audio */, entry->mFinalResult);
1024
1025        mVideoQueue.erase(mVideoQueue.begin());
1026        entry = NULL;
1027
1028        setVideoLateByUs(0);
1029        return;
1030    }
1031
1032    int64_t nowUs = -1;
1033    int64_t realTimeUs;
1034    if (mFlags & FLAG_REAL_TIME) {
1035        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs));
1036    } else {
1037        int64_t mediaTimeUs;
1038        CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs));
1039
1040        nowUs = ALooper::GetNowUs();
1041        realTimeUs = getRealTimeUs(mediaTimeUs, nowUs);
1042    }
1043
1044    bool tooLate = false;
1045
1046    if (!mPaused) {
1047        if (nowUs == -1) {
1048            nowUs = ALooper::GetNowUs();
1049        }
1050        setVideoLateByUs(nowUs - realTimeUs);
1051        tooLate = (mVideoLateByUs > 40000);
1052
1053        if (tooLate) {
1054            ALOGV("video late by %lld us (%.2f secs)",
1055                 (long long)mVideoLateByUs, mVideoLateByUs / 1E6);
1056        } else {
1057            int64_t mediaUs = 0;
1058            mMediaClock->getMediaTime(realTimeUs, &mediaUs);
1059            ALOGV("rendering video at media time %.2f secs",
1060                    (mFlags & FLAG_REAL_TIME ? realTimeUs :
1061                    mediaUs) / 1E6);
1062        }
1063    } else {
1064        setVideoLateByUs(0);
1065        if (!mVideoSampleReceived && !mHasAudio) {
1066            // This will ensure that the first frame after a flush won't be used as anchor
1067            // when renderer is in paused state, because resume can happen any time after seek.
1068            Mutex::Autolock autoLock(mLock);
1069            clearAnchorTime_l();
1070        }
1071    }
1072
1073    entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll);
1074    entry->mNotifyConsumed->setInt32("render", !tooLate);
1075    entry->mNotifyConsumed->post();
1076    mVideoQueue.erase(mVideoQueue.begin());
1077    entry = NULL;
1078
1079    mVideoSampleReceived = true;
1080
1081    if (!mPaused) {
1082        if (!mVideoRenderingStarted) {
1083            mVideoRenderingStarted = true;
1084            notifyVideoRenderingStart();
1085        }
1086        Mutex::Autolock autoLock(mLock);
1087        notifyIfMediaRenderingStarted_l();
1088    }
1089}
1090
1091void NuPlayer::Renderer::notifyVideoRenderingStart() {
1092    sp<AMessage> notify = mNotify->dup();
1093    notify->setInt32("what", kWhatVideoRenderingStart);
1094    notify->post();
1095}
1096
1097void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) {
1098    sp<AMessage> notify = mNotify->dup();
1099    notify->setInt32("what", kWhatEOS);
1100    notify->setInt32("audio", static_cast<int32_t>(audio));
1101    notify->setInt32("finalResult", finalResult);
1102    notify->post(delayUs);
1103}
1104
1105void NuPlayer::Renderer::notifyAudioTearDown() {
1106    (new AMessage(kWhatAudioTearDown, this))->post();
1107}
1108
1109void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) {
1110    int32_t audio;
1111    CHECK(msg->findInt32("audio", &audio));
1112
1113    if (dropBufferIfStale(audio, msg)) {
1114        return;
1115    }
1116
1117    if (audio) {
1118        mHasAudio = true;
1119    } else {
1120        mHasVideo = true;
1121    }
1122
1123    if (mHasVideo) {
1124        if (mVideoScheduler == NULL) {
1125            mVideoScheduler = new VideoFrameScheduler();
1126            mVideoScheduler->init();
1127        }
1128    }
1129
1130    sp<ABuffer> buffer;
1131    CHECK(msg->findBuffer("buffer", &buffer));
1132
1133    sp<AMessage> notifyConsumed;
1134    CHECK(msg->findMessage("notifyConsumed", &notifyConsumed));
1135
1136    QueueEntry entry;
1137    entry.mBuffer = buffer;
1138    entry.mNotifyConsumed = notifyConsumed;
1139    entry.mOffset = 0;
1140    entry.mFinalResult = OK;
1141    entry.mBufferOrdinal = ++mTotalBuffersQueued;
1142
1143    if (audio) {
1144        Mutex::Autolock autoLock(mLock);
1145        mAudioQueue.push_back(entry);
1146        postDrainAudioQueue_l();
1147    } else {
1148        mVideoQueue.push_back(entry);
1149        postDrainVideoQueue();
1150    }
1151
1152    Mutex::Autolock autoLock(mLock);
1153    if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) {
1154        return;
1155    }
1156
1157    sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer;
1158    sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer;
1159
1160    if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) {
1161        // EOS signalled on either queue.
1162        syncQueuesDone_l();
1163        return;
1164    }
1165
1166    int64_t firstAudioTimeUs;
1167    int64_t firstVideoTimeUs;
1168    CHECK(firstAudioBuffer->meta()
1169            ->findInt64("timeUs", &firstAudioTimeUs));
1170    CHECK(firstVideoBuffer->meta()
1171            ->findInt64("timeUs", &firstVideoTimeUs));
1172
1173    int64_t diff = firstVideoTimeUs - firstAudioTimeUs;
1174
1175    ALOGV("queueDiff = %.2f secs", diff / 1E6);
1176
1177    if (diff > 100000ll) {
1178        // Audio data starts More than 0.1 secs before video.
1179        // Drop some audio.
1180
1181        (*mAudioQueue.begin()).mNotifyConsumed->post();
1182        mAudioQueue.erase(mAudioQueue.begin());
1183        return;
1184    }
1185
1186    syncQueuesDone_l();
1187}
1188
1189void NuPlayer::Renderer::syncQueuesDone_l() {
1190    if (!mSyncQueues) {
1191        return;
1192    }
1193
1194    mSyncQueues = false;
1195
1196    if (!mAudioQueue.empty()) {
1197        postDrainAudioQueue_l();
1198    }
1199
1200    if (!mVideoQueue.empty()) {
1201        mLock.unlock();
1202        postDrainVideoQueue();
1203        mLock.lock();
1204    }
1205}
1206
1207void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) {
1208    int32_t audio;
1209    CHECK(msg->findInt32("audio", &audio));
1210
1211    if (dropBufferIfStale(audio, msg)) {
1212        return;
1213    }
1214
1215    int32_t finalResult;
1216    CHECK(msg->findInt32("finalResult", &finalResult));
1217
1218    QueueEntry entry;
1219    entry.mOffset = 0;
1220    entry.mFinalResult = finalResult;
1221
1222    if (audio) {
1223        Mutex::Autolock autoLock(mLock);
1224        if (mAudioQueue.empty() && mSyncQueues) {
1225            syncQueuesDone_l();
1226        }
1227        mAudioQueue.push_back(entry);
1228        postDrainAudioQueue_l();
1229    } else {
1230        if (mVideoQueue.empty() && getSyncQueues()) {
1231            Mutex::Autolock autoLock(mLock);
1232            syncQueuesDone_l();
1233        }
1234        mVideoQueue.push_back(entry);
1235        postDrainVideoQueue();
1236    }
1237}
1238
1239void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) {
1240    int32_t audio, notifyComplete;
1241    CHECK(msg->findInt32("audio", &audio));
1242
1243    {
1244        Mutex::Autolock autoLock(mLock);
1245        if (audio) {
1246            notifyComplete = mNotifyCompleteAudio;
1247            mNotifyCompleteAudio = false;
1248        } else {
1249            notifyComplete = mNotifyCompleteVideo;
1250            mNotifyCompleteVideo = false;
1251        }
1252
1253        // If we're currently syncing the queues, i.e. dropping audio while
1254        // aligning the first audio/video buffer times and only one of the
1255        // two queues has data, we may starve that queue by not requesting
1256        // more buffers from the decoder. If the other source then encounters
1257        // a discontinuity that leads to flushing, we'll never find the
1258        // corresponding discontinuity on the other queue.
1259        // Therefore we'll stop syncing the queues if at least one of them
1260        // is flushed.
1261        syncQueuesDone_l();
1262        clearAnchorTime_l();
1263    }
1264
1265    ALOGV("flushing %s", audio ? "audio" : "video");
1266    if (audio) {
1267        {
1268            Mutex::Autolock autoLock(mLock);
1269            flushQueue(&mAudioQueue);
1270
1271            ++mAudioDrainGeneration;
1272            prepareForMediaRenderingStart_l();
1273
1274            // the frame count will be reset after flush.
1275            clearAudioFirstAnchorTime_l();
1276        }
1277
1278        mDrainAudioQueuePending = false;
1279
1280        if (offloadingAudio()) {
1281            mAudioSink->pause();
1282            mAudioSink->flush();
1283            mAudioSink->start();
1284        } else {
1285            mAudioSink->pause();
1286            mAudioSink->flush();
1287            // Call stop() to signal to the AudioSink to completely fill the
1288            // internal buffer before resuming playback.
1289            mAudioSink->stop();
1290            if (!mPaused) {
1291                mAudioSink->start();
1292            }
1293            mNumFramesWritten = 0;
1294        }
1295    } else {
1296        flushQueue(&mVideoQueue);
1297
1298        mDrainVideoQueuePending = false;
1299
1300        if (mVideoScheduler != NULL) {
1301            mVideoScheduler->restart();
1302        }
1303
1304        Mutex::Autolock autoLock(mLock);
1305        ++mVideoDrainGeneration;
1306        prepareForMediaRenderingStart_l();
1307    }
1308
1309    mVideoSampleReceived = false;
1310
1311    if (notifyComplete) {
1312        notifyFlushComplete(audio);
1313    }
1314}
1315
1316void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) {
1317    while (!queue->empty()) {
1318        QueueEntry *entry = &*queue->begin();
1319
1320        if (entry->mBuffer != NULL) {
1321            entry->mNotifyConsumed->post();
1322        }
1323
1324        queue->erase(queue->begin());
1325        entry = NULL;
1326    }
1327}
1328
1329void NuPlayer::Renderer::notifyFlushComplete(bool audio) {
1330    sp<AMessage> notify = mNotify->dup();
1331    notify->setInt32("what", kWhatFlushComplete);
1332    notify->setInt32("audio", static_cast<int32_t>(audio));
1333    notify->post();
1334}
1335
1336bool NuPlayer::Renderer::dropBufferIfStale(
1337        bool audio, const sp<AMessage> &msg) {
1338    int32_t queueGeneration;
1339    CHECK(msg->findInt32("queueGeneration", &queueGeneration));
1340
1341    if (queueGeneration == getQueueGeneration(audio)) {
1342        return false;
1343    }
1344
1345    sp<AMessage> notifyConsumed;
1346    if (msg->findMessage("notifyConsumed", &notifyConsumed)) {
1347        notifyConsumed->post();
1348    }
1349
1350    return true;
1351}
1352
1353void NuPlayer::Renderer::onAudioSinkChanged() {
1354    if (offloadingAudio()) {
1355        return;
1356    }
1357    CHECK(!mDrainAudioQueuePending);
1358    mNumFramesWritten = 0;
1359    {
1360        Mutex::Autolock autoLock(mLock);
1361        mAnchorNumFramesWritten = -1;
1362    }
1363    uint32_t written;
1364    if (mAudioSink->getFramesWritten(&written) == OK) {
1365        mNumFramesWritten = written;
1366    }
1367}
1368
1369void NuPlayer::Renderer::onDisableOffloadAudio() {
1370    Mutex::Autolock autoLock(mLock);
1371    mFlags &= ~FLAG_OFFLOAD_AUDIO;
1372    ++mAudioDrainGeneration;
1373}
1374
1375void NuPlayer::Renderer::onEnableOffloadAudio() {
1376    Mutex::Autolock autoLock(mLock);
1377    mFlags |= FLAG_OFFLOAD_AUDIO;
1378    ++mAudioDrainGeneration;
1379}
1380
1381void NuPlayer::Renderer::onPause() {
1382    if (mPaused) {
1383        return;
1384    }
1385
1386    {
1387        Mutex::Autolock autoLock(mLock);
1388        ++mAudioDrainGeneration;
1389        ++mVideoDrainGeneration;
1390        prepareForMediaRenderingStart_l();
1391        mPaused = true;
1392        mMediaClock->setPlaybackRate(0.0);
1393    }
1394
1395    mDrainAudioQueuePending = false;
1396    mDrainVideoQueuePending = false;
1397
1398    if (mHasAudio) {
1399        mAudioSink->pause();
1400        startAudioOffloadPauseTimeout();
1401    }
1402
1403    ALOGV("now paused audio queue has %zu entries, video has %zu entries",
1404          mAudioQueue.size(), mVideoQueue.size());
1405}
1406
1407void NuPlayer::Renderer::onResume() {
1408    if (!mPaused) {
1409        return;
1410    }
1411
1412    if (mHasAudio) {
1413        cancelAudioOffloadPauseTimeout();
1414        mAudioSink->start();
1415    }
1416
1417    {
1418        Mutex::Autolock autoLock(mLock);
1419        mPaused = false;
1420
1421        // configure audiosink as we did not do it when pausing
1422        if (mAudioSink != NULL && mAudioSink->ready()) {
1423            mAudioSink->setPlaybackRate(mPlaybackSettings);
1424        }
1425
1426        mMediaClock->setPlaybackRate(mPlaybackRate);
1427
1428        if (!mAudioQueue.empty()) {
1429            postDrainAudioQueue_l();
1430        }
1431    }
1432
1433    if (!mVideoQueue.empty()) {
1434        postDrainVideoQueue();
1435    }
1436}
1437
1438void NuPlayer::Renderer::onSetVideoFrameRate(float fps) {
1439    if (mVideoScheduler == NULL) {
1440        mVideoScheduler = new VideoFrameScheduler();
1441    }
1442    mVideoScheduler->init(fps);
1443}
1444
1445int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) {
1446    Mutex::Autolock autoLock(mLock);
1447    return (audio ? mAudioQueueGeneration : mVideoQueueGeneration);
1448}
1449
1450int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) {
1451    Mutex::Autolock autoLock(mLock);
1452    return (audio ? mAudioDrainGeneration : mVideoDrainGeneration);
1453}
1454
1455bool NuPlayer::Renderer::getSyncQueues() {
1456    Mutex::Autolock autoLock(mLock);
1457    return mSyncQueues;
1458}
1459
1460// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs()
1461// as it acquires locks and may query the audio driver.
1462//
1463// Some calls could conceivably retrieve extrapolated data instead of
1464// accessing getTimestamp() or getPosition() every time a data buffer with
1465// a media time is received.
1466//
1467// Calculate duration of played samples if played at normal rate (i.e., 1.0).
1468int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) {
1469    uint32_t numFramesPlayed;
1470    int64_t numFramesPlayedAt;
1471    AudioTimestamp ts;
1472    static const int64_t kStaleTimestamp100ms = 100000;
1473
1474    status_t res = mAudioSink->getTimestamp(ts);
1475    if (res == OK) {                 // case 1: mixing audio tracks and offloaded tracks.
1476        numFramesPlayed = ts.mPosition;
1477        numFramesPlayedAt =
1478            ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000;
1479        const int64_t timestampAge = nowUs - numFramesPlayedAt;
1480        if (timestampAge > kStaleTimestamp100ms) {
1481            // This is an audio FIXME.
1482            // getTimestamp returns a timestamp which may come from audio mixing threads.
1483            // After pausing, the MixerThread may go idle, thus the mTime estimate may
1484            // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms,
1485            // the max latency should be about 25ms with an average around 12ms (to be verified).
1486            // For safety we use 100ms.
1487            ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)",
1488                    (long long)nowUs, (long long)numFramesPlayedAt);
1489            numFramesPlayedAt = nowUs - kStaleTimestamp100ms;
1490        }
1491        //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1492    } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track
1493        numFramesPlayed = 0;
1494        numFramesPlayedAt = nowUs;
1495        //ALOGD("getTimestamp: WOULD_BLOCK %d %lld",
1496        //        numFramesPlayed, (long long)numFramesPlayedAt);
1497    } else {                         // case 3: transitory at new track or audio fast tracks.
1498        res = mAudioSink->getPosition(&numFramesPlayed);
1499        CHECK_EQ(res, (status_t)OK);
1500        numFramesPlayedAt = nowUs;
1501        numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */
1502        //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt);
1503    }
1504
1505    //CHECK_EQ(numFramesPlayed & (1 << 31), 0);  // can't be negative until 12.4 hrs, test
1506    int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed)
1507            + nowUs - numFramesPlayedAt;
1508    if (durationUs < 0) {
1509        // Occurs when numFramesPlayed position is very small and the following:
1510        // (1) In case 1, the time nowUs is computed before getTimestamp() is called and
1511        //     numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed.
1512        // (2) In case 3, using getPosition and adding mAudioSink->latency() to
1513        //     numFramesPlayedAt, by a time amount greater than numFramesPlayed.
1514        //
1515        // Both of these are transitory conditions.
1516        ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs);
1517        durationUs = 0;
1518    }
1519    ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)",
1520            (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt);
1521    return durationUs;
1522}
1523
1524void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) {
1525    if (mAudioTornDown) {
1526        return;
1527    }
1528    mAudioTornDown = true;
1529
1530    int64_t currentPositionUs;
1531    if (getCurrentPosition(&currentPositionUs) != OK) {
1532        currentPositionUs = 0;
1533    }
1534
1535    mAudioSink->stop();
1536    mAudioSink->flush();
1537
1538    sp<AMessage> notify = mNotify->dup();
1539    notify->setInt32("what", kWhatAudioTearDown);
1540    notify->setInt64("positionUs", currentPositionUs);
1541    notify->setInt32("reason", reason);
1542    notify->post();
1543}
1544
1545void NuPlayer::Renderer::startAudioOffloadPauseTimeout() {
1546    if (offloadingAudio()) {
1547        mWakeLock->acquire();
1548        sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this);
1549        msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration);
1550        msg->post(kOffloadPauseMaxUs);
1551    }
1552}
1553
1554void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() {
1555    if (offloadingAudio()) {
1556        mWakeLock->release(true);
1557        ++mAudioOffloadPauseTimeoutGeneration;
1558    }
1559}
1560
1561status_t NuPlayer::Renderer::onOpenAudioSink(
1562        const sp<AMessage> &format,
1563        bool offloadOnly,
1564        bool hasVideo,
1565        uint32_t flags) {
1566    ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)",
1567            offloadOnly, offloadingAudio());
1568    bool audioSinkChanged = false;
1569
1570    int32_t numChannels;
1571    CHECK(format->findInt32("channel-count", &numChannels));
1572
1573    int32_t channelMask;
1574    if (!format->findInt32("channel-mask", &channelMask)) {
1575        // signal to the AudioSink to derive the mask from count.
1576        channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER;
1577    }
1578
1579    int32_t sampleRate;
1580    CHECK(format->findInt32("sample-rate", &sampleRate));
1581
1582    if (offloadingAudio()) {
1583        audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT;
1584        AString mime;
1585        CHECK(format->findString("mime", &mime));
1586        status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str());
1587
1588        if (err != OK) {
1589            ALOGE("Couldn't map mime \"%s\" to a valid "
1590                    "audio_format", mime.c_str());
1591            onDisableOffloadAudio();
1592        } else {
1593            ALOGV("Mime \"%s\" mapped to audio_format 0x%x",
1594                    mime.c_str(), audioFormat);
1595
1596            int avgBitRate = -1;
1597            format->findInt32("bit-rate", &avgBitRate);
1598
1599            int32_t aacProfile = -1;
1600            if (audioFormat == AUDIO_FORMAT_AAC
1601                    && format->findInt32("aac-profile", &aacProfile)) {
1602                // Redefine AAC format as per aac profile
1603                mapAACProfileToAudioFormat(
1604                        audioFormat,
1605                        aacProfile);
1606            }
1607
1608            audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER;
1609            offloadInfo.duration_us = -1;
1610            format->findInt64(
1611                    "durationUs", &offloadInfo.duration_us);
1612            offloadInfo.sample_rate = sampleRate;
1613            offloadInfo.channel_mask = channelMask;
1614            offloadInfo.format = audioFormat;
1615            offloadInfo.stream_type = AUDIO_STREAM_MUSIC;
1616            offloadInfo.bit_rate = avgBitRate;
1617            offloadInfo.has_video = hasVideo;
1618            offloadInfo.is_streaming = true;
1619
1620            if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) {
1621                ALOGV("openAudioSink: no change in offload mode");
1622                // no change from previous configuration, everything ok.
1623                return OK;
1624            }
1625            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1626
1627            ALOGV("openAudioSink: try to open AudioSink in offload mode");
1628            uint32_t offloadFlags = flags;
1629            offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1630            offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER;
1631            audioSinkChanged = true;
1632            mAudioSink->close();
1633
1634            err = mAudioSink->open(
1635                    sampleRate,
1636                    numChannels,
1637                    (audio_channel_mask_t)channelMask,
1638                    audioFormat,
1639                    8 /* bufferCount */,
1640                    &NuPlayer::Renderer::AudioSinkCallback,
1641                    this,
1642                    (audio_output_flags_t)offloadFlags,
1643                    &offloadInfo);
1644
1645            if (err == OK) {
1646                err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1647            }
1648
1649            if (err == OK) {
1650                // If the playback is offloaded to h/w, we pass
1651                // the HAL some metadata information.
1652                // We don't want to do this for PCM because it
1653                // will be going through the AudioFlinger mixer
1654                // before reaching the hardware.
1655                // TODO
1656                mCurrentOffloadInfo = offloadInfo;
1657                err = mAudioSink->start();
1658                ALOGV_IF(err == OK, "openAudioSink: offload succeeded");
1659            }
1660            if (err != OK) {
1661                // Clean up, fall back to non offload mode.
1662                mAudioSink->close();
1663                onDisableOffloadAudio();
1664                mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1665                ALOGV("openAudioSink: offload failed");
1666            }
1667            mUseAudioCallback = true;  // offload mode transfers data through callback
1668        }
1669    }
1670    if (!offloadOnly && !offloadingAudio()) {
1671        ALOGV("openAudioSink: open AudioSink in NON-offload mode");
1672        uint32_t pcmFlags = flags;
1673        pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD;
1674
1675        const PcmInfo info = {
1676                (audio_channel_mask_t)channelMask,
1677                (audio_output_flags_t)pcmFlags,
1678                AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat
1679                numChannels,
1680                sampleRate
1681        };
1682        if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) {
1683            ALOGV("openAudioSink: no change in pcm mode");
1684            // no change from previous configuration, everything ok.
1685            return OK;
1686        }
1687
1688        audioSinkChanged = true;
1689        mAudioSink->close();
1690        mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1691        // Note: It is possible to set up the callback, but not use it to send audio data.
1692        // This requires a fix in AudioSink to explicitly specify the transfer mode.
1693        mUseAudioCallback = getUseAudioCallbackSetting();
1694        status_t err = mAudioSink->open(
1695                    sampleRate,
1696                    numChannels,
1697                    (audio_channel_mask_t)channelMask,
1698                    AUDIO_FORMAT_PCM_16_BIT,
1699                    8 /* bufferCount */,
1700                    mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL,
1701                    mUseAudioCallback ? this : NULL,
1702                    (audio_output_flags_t)pcmFlags,
1703                    NULL,
1704                    true /* doNotReconnect */);
1705        if (err == OK) {
1706            err = mAudioSink->setPlaybackRate(mPlaybackSettings);
1707        }
1708        if (err != OK) {
1709            ALOGW("openAudioSink: non offloaded open failed status: %d", err);
1710            mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1711            return err;
1712        }
1713        mCurrentPcmInfo = info;
1714        mAudioSink->start();
1715    }
1716    if (audioSinkChanged) {
1717        onAudioSinkChanged();
1718    }
1719    mAudioTornDown = false;
1720    return OK;
1721}
1722
1723void NuPlayer::Renderer::onCloseAudioSink() {
1724    mAudioSink->close();
1725    mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER;
1726    mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER;
1727}
1728
1729}  // namespace android
1730
1731