NuPlayerRenderer.cpp revision 528c8403ad2ede53054a706a20c00b710fa08166
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <cutils/properties.h> 23#include <media/stagefright/foundation/ABuffer.h> 24#include <media/stagefright/foundation/ADebug.h> 25#include <media/stagefright/foundation/AMessage.h> 26#include <media/stagefright/foundation/AUtils.h> 27#include <media/stagefright/foundation/AWakeLock.h> 28#include <media/stagefright/MediaClock.h> 29#include <media/stagefright/MediaErrors.h> 30#include <media/stagefright/MetaData.h> 31#include <media/stagefright/Utils.h> 32#include <media/stagefright/VideoFrameScheduler.h> 33 34#include <inttypes.h> 35 36namespace android { 37 38/* 39 * Example of common configuration settings in shell script form 40 41 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 42 adb shell setprop audio.offload.disable 1 43 44 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 45 adb shell setprop audio.offload.video 1 46 47 #Use audio callbacks for PCM data 48 adb shell setprop media.stagefright.audio.cbk 1 49 50 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 51 adb shell setprop media.stagefright.audio.deep 1 52 53 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 54 adb shell setprop media.stagefright.audio.sink 1000 55 56 * These configurations take effect for the next track played (not the current track). 57 */ 58 59static inline bool getUseAudioCallbackSetting() { 60 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 61} 62 63static inline int32_t getAudioSinkPcmMsSetting() { 64 return property_get_int32( 65 "media.stagefright.audio.sink", 500 /* default_value */); 66} 67 68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 69// is closed to allow the audio DSP to power down. 70static const int64_t kOffloadPauseMaxUs = 10000000ll; 71 72// Maximum allowed delay from AudioSink, 1.5 seconds. 73static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll; 74 75static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; 76 77// static 78const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 79 AUDIO_CHANNEL_NONE, 80 AUDIO_OUTPUT_FLAG_NONE, 81 AUDIO_FORMAT_INVALID, 82 0, // mNumChannels 83 0 // mSampleRate 84}; 85 86// static 87const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 88 89NuPlayer::Renderer::Renderer( 90 const sp<MediaPlayerBase::AudioSink> &sink, 91 const sp<AMessage> ¬ify, 92 uint32_t flags) 93 : mAudioSink(sink), 94 mNotify(notify), 95 mFlags(flags), 96 mNumFramesWritten(0), 97 mDrainAudioQueuePending(false), 98 mDrainVideoQueuePending(false), 99 mAudioQueueGeneration(0), 100 mVideoQueueGeneration(0), 101 mAudioDrainGeneration(0), 102 mVideoDrainGeneration(0), 103 mAudioEOSGeneration(0), 104 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 105 mAudioFirstAnchorTimeMediaUs(-1), 106 mAnchorTimeMediaUs(-1), 107 mAnchorNumFramesWritten(-1), 108 mVideoLateByUs(0ll), 109 mHasAudio(false), 110 mHasVideo(false), 111 mNotifyCompleteAudio(false), 112 mNotifyCompleteVideo(false), 113 mSyncQueues(false), 114 mPaused(false), 115 mPauseDrainAudioAllowedUs(0), 116 mVideoSampleReceived(false), 117 mVideoRenderingStarted(false), 118 mVideoRenderingStartGeneration(0), 119 mAudioRenderingStartGeneration(0), 120 mRenderingDataDelivered(false), 121 mNextAudioClockUpdateTimeUs(-1), 122 mLastAudioMediaTimeUs(-1), 123 mAudioOffloadPauseTimeoutGeneration(0), 124 mAudioTornDown(false), 125 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 126 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 127 mTotalBuffersQueued(0), 128 mLastAudioBufferDrained(0), 129 mUseAudioCallback(false), 130 mWakeLock(new AWakeLock()) { 131 mMediaClock = new MediaClock; 132 mPlaybackRate = mPlaybackSettings.mSpeed; 133 mMediaClock->setPlaybackRate(mPlaybackRate); 134} 135 136NuPlayer::Renderer::~Renderer() { 137 if (offloadingAudio()) { 138 mAudioSink->stop(); 139 mAudioSink->flush(); 140 mAudioSink->close(); 141 } 142} 143 144void NuPlayer::Renderer::queueBuffer( 145 bool audio, 146 const sp<ABuffer> &buffer, 147 const sp<AMessage> ¬ifyConsumed) { 148 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 149 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 150 msg->setInt32("audio", static_cast<int32_t>(audio)); 151 msg->setBuffer("buffer", buffer); 152 msg->setMessage("notifyConsumed", notifyConsumed); 153 msg->post(); 154} 155 156void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 157 CHECK_NE(finalResult, (status_t)OK); 158 159 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 160 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 161 msg->setInt32("audio", static_cast<int32_t>(audio)); 162 msg->setInt32("finalResult", finalResult); 163 msg->post(); 164} 165 166status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 167 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 168 writeToAMessage(msg, rate); 169 sp<AMessage> response; 170 status_t err = msg->postAndAwaitResponse(&response); 171 if (err == OK && response != NULL) { 172 CHECK(response->findInt32("err", &err)); 173 } 174 return err; 175} 176 177status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 178 if (rate.mSpeed == 0.f) { 179 onPause(); 180 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 181 // have to correspond to the any non-0 speed (e.g old speed). Keep 182 // settings nonetheless, using the old speed, in case audiosink changes. 183 AudioPlaybackRate newRate = rate; 184 newRate.mSpeed = mPlaybackSettings.mSpeed; 185 mPlaybackSettings = newRate; 186 return OK; 187 } 188 189 if (mAudioSink != NULL && mAudioSink->ready()) { 190 status_t err = mAudioSink->setPlaybackRate(rate); 191 if (err != OK) { 192 return err; 193 } 194 } 195 mPlaybackSettings = rate; 196 mPlaybackRate = rate.mSpeed; 197 mMediaClock->setPlaybackRate(mPlaybackRate); 198 return OK; 199} 200 201status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 202 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 203 sp<AMessage> response; 204 status_t err = msg->postAndAwaitResponse(&response); 205 if (err == OK && response != NULL) { 206 CHECK(response->findInt32("err", &err)); 207 if (err == OK) { 208 readFromAMessage(response, rate); 209 } 210 } 211 return err; 212} 213 214status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 215 if (mAudioSink != NULL && mAudioSink->ready()) { 216 status_t err = mAudioSink->getPlaybackRate(rate); 217 if (err == OK) { 218 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 219 ALOGW("correcting mismatch in internal/external playback rate"); 220 } 221 // get playback settings used by audiosink, as it may be 222 // slightly off due to audiosink not taking small changes. 223 mPlaybackSettings = *rate; 224 if (mPaused) { 225 rate->mSpeed = 0.f; 226 } 227 } 228 return err; 229 } 230 *rate = mPlaybackSettings; 231 return OK; 232} 233 234status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 235 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 236 writeToAMessage(msg, sync, videoFpsHint); 237 sp<AMessage> response; 238 status_t err = msg->postAndAwaitResponse(&response); 239 if (err == OK && response != NULL) { 240 CHECK(response->findInt32("err", &err)); 241 } 242 return err; 243} 244 245status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 246 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 247 return BAD_VALUE; 248 } 249 // TODO: support sync sources 250 return INVALID_OPERATION; 251} 252 253status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 254 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 255 sp<AMessage> response; 256 status_t err = msg->postAndAwaitResponse(&response); 257 if (err == OK && response != NULL) { 258 CHECK(response->findInt32("err", &err)); 259 if (err == OK) { 260 readFromAMessage(response, sync, videoFps); 261 } 262 } 263 return err; 264} 265 266status_t NuPlayer::Renderer::onGetSyncSettings( 267 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 268 *sync = mSyncSettings; 269 *videoFps = -1.f; 270 return OK; 271} 272 273void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 274 { 275 Mutex::Autolock autoLock(mLock); 276 if (audio) { 277 mNotifyCompleteAudio |= notifyComplete; 278 clearAudioFirstAnchorTime_l(); 279 ++mAudioQueueGeneration; 280 ++mAudioDrainGeneration; 281 } else { 282 mNotifyCompleteVideo |= notifyComplete; 283 ++mVideoQueueGeneration; 284 ++mVideoDrainGeneration; 285 } 286 287 clearAnchorTime_l(); 288 mVideoLateByUs = 0; 289 mSyncQueues = false; 290 } 291 292 sp<AMessage> msg = new AMessage(kWhatFlush, this); 293 msg->setInt32("audio", static_cast<int32_t>(audio)); 294 msg->post(); 295} 296 297void NuPlayer::Renderer::signalTimeDiscontinuity() { 298} 299 300void NuPlayer::Renderer::signalDisableOffloadAudio() { 301 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 302} 303 304void NuPlayer::Renderer::signalEnableOffloadAudio() { 305 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 306} 307 308void NuPlayer::Renderer::pause() { 309 (new AMessage(kWhatPause, this))->post(); 310} 311 312void NuPlayer::Renderer::resume() { 313 (new AMessage(kWhatResume, this))->post(); 314} 315 316void NuPlayer::Renderer::setVideoFrameRate(float fps) { 317 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 318 msg->setFloat("frame-rate", fps); 319 msg->post(); 320} 321 322// Called on any threads without mLock acquired. 323status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 324 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 325 if (result == OK) { 326 return result; 327 } 328 329 // MediaClock has not started yet. Try to start it if possible. 330 { 331 Mutex::Autolock autoLock(mLock); 332 if (mAudioFirstAnchorTimeMediaUs == -1) { 333 return result; 334 } 335 336 AudioTimestamp ts; 337 status_t res = mAudioSink->getTimestamp(ts); 338 if (res != OK) { 339 return result; 340 } 341 342 // AudioSink has rendered some frames. 343 int64_t nowUs = ALooper::GetNowUs(); 344 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) 345 + mAudioFirstAnchorTimeMediaUs; 346 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); 347 } 348 349 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 350} 351 352void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 353 mAudioFirstAnchorTimeMediaUs = -1; 354 mMediaClock->setStartingTimeMedia(-1); 355} 356 357void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 358 if (mAudioFirstAnchorTimeMediaUs == -1) { 359 mAudioFirstAnchorTimeMediaUs = mediaUs; 360 mMediaClock->setStartingTimeMedia(mediaUs); 361 } 362} 363 364void NuPlayer::Renderer::clearAnchorTime_l() { 365 mMediaClock->clearAnchor(); 366 mAnchorTimeMediaUs = -1; 367 mAnchorNumFramesWritten = -1; 368} 369 370void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 371 Mutex::Autolock autoLock(mLock); 372 mVideoLateByUs = lateUs; 373} 374 375int64_t NuPlayer::Renderer::getVideoLateByUs() { 376 Mutex::Autolock autoLock(mLock); 377 return mVideoLateByUs; 378} 379 380status_t NuPlayer::Renderer::openAudioSink( 381 const sp<AMessage> &format, 382 bool offloadOnly, 383 bool hasVideo, 384 uint32_t flags, 385 bool *isOffloaded) { 386 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 387 msg->setMessage("format", format); 388 msg->setInt32("offload-only", offloadOnly); 389 msg->setInt32("has-video", hasVideo); 390 msg->setInt32("flags", flags); 391 392 sp<AMessage> response; 393 msg->postAndAwaitResponse(&response); 394 395 int32_t err; 396 if (!response->findInt32("err", &err)) { 397 err = INVALID_OPERATION; 398 } else if (err == OK && isOffloaded != NULL) { 399 int32_t offload; 400 CHECK(response->findInt32("offload", &offload)); 401 *isOffloaded = (offload != 0); 402 } 403 return err; 404} 405 406void NuPlayer::Renderer::closeAudioSink() { 407 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 408 409 sp<AMessage> response; 410 msg->postAndAwaitResponse(&response); 411} 412 413void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 414 switch (msg->what()) { 415 case kWhatOpenAudioSink: 416 { 417 sp<AMessage> format; 418 CHECK(msg->findMessage("format", &format)); 419 420 int32_t offloadOnly; 421 CHECK(msg->findInt32("offload-only", &offloadOnly)); 422 423 int32_t hasVideo; 424 CHECK(msg->findInt32("has-video", &hasVideo)); 425 426 uint32_t flags; 427 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 428 429 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags); 430 431 sp<AMessage> response = new AMessage; 432 response->setInt32("err", err); 433 response->setInt32("offload", offloadingAudio()); 434 435 sp<AReplyToken> replyID; 436 CHECK(msg->senderAwaitsResponse(&replyID)); 437 response->postReply(replyID); 438 439 break; 440 } 441 442 case kWhatCloseAudioSink: 443 { 444 sp<AReplyToken> replyID; 445 CHECK(msg->senderAwaitsResponse(&replyID)); 446 447 onCloseAudioSink(); 448 449 sp<AMessage> response = new AMessage; 450 response->postReply(replyID); 451 break; 452 } 453 454 case kWhatStopAudioSink: 455 { 456 mAudioSink->stop(); 457 break; 458 } 459 460 case kWhatDrainAudioQueue: 461 { 462 mDrainAudioQueuePending = false; 463 464 int32_t generation; 465 CHECK(msg->findInt32("drainGeneration", &generation)); 466 if (generation != getDrainGeneration(true /* audio */)) { 467 break; 468 } 469 470 if (onDrainAudioQueue()) { 471 uint32_t numFramesPlayed; 472 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 473 (status_t)OK); 474 475 uint32_t numFramesPendingPlayout = 476 mNumFramesWritten - numFramesPlayed; 477 478 // This is how long the audio sink will have data to 479 // play back. 480 int64_t delayUs = 481 mAudioSink->msecsPerFrame() 482 * numFramesPendingPlayout * 1000ll; 483 if (mPlaybackRate > 1.0f) { 484 delayUs /= mPlaybackRate; 485 } 486 487 // Let's give it more data after about half that time 488 // has elapsed. 489 Mutex::Autolock autoLock(mLock); 490 postDrainAudioQueue_l(delayUs / 2); 491 } 492 break; 493 } 494 495 case kWhatDrainVideoQueue: 496 { 497 int32_t generation; 498 CHECK(msg->findInt32("drainGeneration", &generation)); 499 if (generation != getDrainGeneration(false /* audio */)) { 500 break; 501 } 502 503 mDrainVideoQueuePending = false; 504 505 onDrainVideoQueue(); 506 507 postDrainVideoQueue(); 508 break; 509 } 510 511 case kWhatPostDrainVideoQueue: 512 { 513 int32_t generation; 514 CHECK(msg->findInt32("drainGeneration", &generation)); 515 if (generation != getDrainGeneration(false /* audio */)) { 516 break; 517 } 518 519 mDrainVideoQueuePending = false; 520 postDrainVideoQueue(); 521 break; 522 } 523 524 case kWhatQueueBuffer: 525 { 526 onQueueBuffer(msg); 527 break; 528 } 529 530 case kWhatQueueEOS: 531 { 532 onQueueEOS(msg); 533 break; 534 } 535 536 case kWhatEOS: 537 { 538 int32_t generation; 539 CHECK(msg->findInt32("audioEOSGeneration", &generation)); 540 if (generation != mAudioEOSGeneration) { 541 break; 542 } 543 status_t finalResult; 544 CHECK(msg->findInt32("finalResult", &finalResult)); 545 notifyEOS(true /* audio */, finalResult); 546 break; 547 } 548 549 case kWhatConfigPlayback: 550 { 551 sp<AReplyToken> replyID; 552 CHECK(msg->senderAwaitsResponse(&replyID)); 553 AudioPlaybackRate rate; 554 readFromAMessage(msg, &rate); 555 status_t err = onConfigPlayback(rate); 556 sp<AMessage> response = new AMessage; 557 response->setInt32("err", err); 558 response->postReply(replyID); 559 break; 560 } 561 562 case kWhatGetPlaybackSettings: 563 { 564 sp<AReplyToken> replyID; 565 CHECK(msg->senderAwaitsResponse(&replyID)); 566 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 567 status_t err = onGetPlaybackSettings(&rate); 568 sp<AMessage> response = new AMessage; 569 if (err == OK) { 570 writeToAMessage(response, rate); 571 } 572 response->setInt32("err", err); 573 response->postReply(replyID); 574 break; 575 } 576 577 case kWhatConfigSync: 578 { 579 sp<AReplyToken> replyID; 580 CHECK(msg->senderAwaitsResponse(&replyID)); 581 AVSyncSettings sync; 582 float videoFpsHint; 583 readFromAMessage(msg, &sync, &videoFpsHint); 584 status_t err = onConfigSync(sync, videoFpsHint); 585 sp<AMessage> response = new AMessage; 586 response->setInt32("err", err); 587 response->postReply(replyID); 588 break; 589 } 590 591 case kWhatGetSyncSettings: 592 { 593 sp<AReplyToken> replyID; 594 CHECK(msg->senderAwaitsResponse(&replyID)); 595 596 ALOGV("kWhatGetSyncSettings"); 597 AVSyncSettings sync; 598 float videoFps = -1.f; 599 status_t err = onGetSyncSettings(&sync, &videoFps); 600 sp<AMessage> response = new AMessage; 601 if (err == OK) { 602 writeToAMessage(response, sync, videoFps); 603 } 604 response->setInt32("err", err); 605 response->postReply(replyID); 606 break; 607 } 608 609 case kWhatFlush: 610 { 611 onFlush(msg); 612 break; 613 } 614 615 case kWhatDisableOffloadAudio: 616 { 617 onDisableOffloadAudio(); 618 break; 619 } 620 621 case kWhatEnableOffloadAudio: 622 { 623 onEnableOffloadAudio(); 624 break; 625 } 626 627 case kWhatPause: 628 { 629 onPause(); 630 break; 631 } 632 633 case kWhatResume: 634 { 635 onResume(); 636 break; 637 } 638 639 case kWhatSetVideoFrameRate: 640 { 641 float fps; 642 CHECK(msg->findFloat("frame-rate", &fps)); 643 onSetVideoFrameRate(fps); 644 break; 645 } 646 647 case kWhatAudioTearDown: 648 { 649 onAudioTearDown(kDueToError); 650 break; 651 } 652 653 case kWhatAudioOffloadPauseTimeout: 654 { 655 int32_t generation; 656 CHECK(msg->findInt32("drainGeneration", &generation)); 657 if (generation != mAudioOffloadPauseTimeoutGeneration) { 658 break; 659 } 660 ALOGV("Audio Offload tear down due to pause timeout."); 661 onAudioTearDown(kDueToTimeout); 662 mWakeLock->release(); 663 break; 664 } 665 666 default: 667 TRESPASS(); 668 break; 669 } 670} 671 672void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 673 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 674 return; 675 } 676 677 if (mAudioQueue.empty()) { 678 return; 679 } 680 681 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 682 if (mPaused) { 683 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 684 if (diffUs > delayUs) { 685 delayUs = diffUs; 686 } 687 } 688 689 mDrainAudioQueuePending = true; 690 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 691 msg->setInt32("drainGeneration", mAudioDrainGeneration); 692 msg->post(delayUs); 693} 694 695void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 696 mAudioRenderingStartGeneration = mAudioDrainGeneration; 697 mVideoRenderingStartGeneration = mVideoDrainGeneration; 698 mRenderingDataDelivered = false; 699} 700 701void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 702 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 703 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 704 mRenderingDataDelivered = true; 705 if (mPaused) { 706 return; 707 } 708 mVideoRenderingStartGeneration = -1; 709 mAudioRenderingStartGeneration = -1; 710 711 sp<AMessage> notify = mNotify->dup(); 712 notify->setInt32("what", kWhatMediaRenderingStart); 713 notify->post(); 714 } 715} 716 717// static 718size_t NuPlayer::Renderer::AudioSinkCallback( 719 MediaPlayerBase::AudioSink * /* audioSink */, 720 void *buffer, 721 size_t size, 722 void *cookie, 723 MediaPlayerBase::AudioSink::cb_event_t event) { 724 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 725 726 switch (event) { 727 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 728 { 729 return me->fillAudioBuffer(buffer, size); 730 break; 731 } 732 733 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 734 { 735 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 736 me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 737 break; 738 } 739 740 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 741 { 742 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 743 me->notifyAudioTearDown(); 744 break; 745 } 746 } 747 748 return 0; 749} 750 751size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 752 Mutex::Autolock autoLock(mLock); 753 754 if (!mUseAudioCallback) { 755 return 0; 756 } 757 758 bool hasEOS = false; 759 760 size_t sizeCopied = 0; 761 bool firstEntry = true; 762 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 763 while (sizeCopied < size && !mAudioQueue.empty()) { 764 entry = &*mAudioQueue.begin(); 765 766 if (entry->mBuffer == NULL) { // EOS 767 hasEOS = true; 768 mAudioQueue.erase(mAudioQueue.begin()); 769 break; 770 } 771 772 if (firstEntry && entry->mOffset == 0) { 773 firstEntry = false; 774 int64_t mediaTimeUs; 775 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 776 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 777 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 778 } 779 780 size_t copy = entry->mBuffer->size() - entry->mOffset; 781 size_t sizeRemaining = size - sizeCopied; 782 if (copy > sizeRemaining) { 783 copy = sizeRemaining; 784 } 785 786 memcpy((char *)buffer + sizeCopied, 787 entry->mBuffer->data() + entry->mOffset, 788 copy); 789 790 entry->mOffset += copy; 791 if (entry->mOffset == entry->mBuffer->size()) { 792 entry->mNotifyConsumed->post(); 793 mAudioQueue.erase(mAudioQueue.begin()); 794 entry = NULL; 795 } 796 sizeCopied += copy; 797 798 notifyIfMediaRenderingStarted_l(); 799 } 800 801 if (mAudioFirstAnchorTimeMediaUs >= 0) { 802 int64_t nowUs = ALooper::GetNowUs(); 803 int64_t nowMediaUs = 804 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); 805 // we don't know how much data we are queueing for offloaded tracks. 806 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 807 } 808 809 // for non-offloaded audio, we need to compute the frames written because 810 // there is no EVENT_STREAM_END notification. The frames written gives 811 // an estimate on the pending played out duration. 812 if (!offloadingAudio()) { 813 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 814 } 815 816 if (hasEOS) { 817 (new AMessage(kWhatStopAudioSink, this))->post(); 818 // As there is currently no EVENT_STREAM_END callback notification for 819 // non-offloaded audio tracks, we need to post the EOS ourselves. 820 if (!offloadingAudio()) { 821 int64_t postEOSDelayUs = 0; 822 if (mAudioSink->needsTrailingPadding()) { 823 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 824 } 825 ALOGV("fillAudioBuffer: notifyEOS " 826 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 827 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 828 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 829 } 830 } 831 return sizeCopied; 832} 833 834void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 835 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 836 bool foundEOS = false; 837 while (it != mAudioQueue.end()) { 838 int32_t eos; 839 QueueEntry *entry = &*it++; 840 if (entry->mBuffer == NULL 841 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 842 itEOS = it; 843 foundEOS = true; 844 } 845 } 846 847 if (foundEOS) { 848 // post all replies before EOS and drop the samples 849 for (it = mAudioQueue.begin(); it != itEOS; it++) { 850 if (it->mBuffer == NULL) { 851 // delay doesn't matter as we don't even have an AudioTrack 852 notifyEOS(true /* audio */, it->mFinalResult); 853 } else { 854 it->mNotifyConsumed->post(); 855 } 856 } 857 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 858 } 859} 860 861bool NuPlayer::Renderer::onDrainAudioQueue() { 862 // do not drain audio during teardown as queued buffers may be invalid. 863 if (mAudioTornDown) { 864 return false; 865 } 866 // TODO: This call to getPosition checks if AudioTrack has been created 867 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 868 // CHECKs on getPosition will fail. 869 // We still need to figure out why AudioTrack is not created when 870 // this function is called. One possible reason could be leftover 871 // audio. Another possible place is to check whether decoder 872 // has received INFO_FORMAT_CHANGED as the first buffer since 873 // AudioSink is opened there, and possible interactions with flush 874 // immediately after start. Investigate error message 875 // "vorbis_dsp_synthesis returned -135", along with RTSP. 876 uint32_t numFramesPlayed; 877 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 878 // When getPosition fails, renderer will not reschedule the draining 879 // unless new samples are queued. 880 // If we have pending EOS (or "eos" marker for discontinuities), we need 881 // to post these now as NuPlayerDecoder might be waiting for it. 882 drainAudioQueueUntilLastEOS(); 883 884 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 885 return false; 886 } 887 888#if 0 889 ssize_t numFramesAvailableToWrite = 890 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 891 892 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 893 ALOGI("audio sink underrun"); 894 } else { 895 ALOGV("audio queue has %d frames left to play", 896 mAudioSink->frameCount() - numFramesAvailableToWrite); 897 } 898#endif 899 900 uint32_t prevFramesWritten = mNumFramesWritten; 901 while (!mAudioQueue.empty()) { 902 QueueEntry *entry = &*mAudioQueue.begin(); 903 904 mLastAudioBufferDrained = entry->mBufferOrdinal; 905 906 if (entry->mBuffer == NULL) { 907 // EOS 908 int64_t postEOSDelayUs = 0; 909 if (mAudioSink->needsTrailingPadding()) { 910 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 911 } 912 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 913 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 914 915 mAudioQueue.erase(mAudioQueue.begin()); 916 entry = NULL; 917 if (mAudioSink->needsTrailingPadding()) { 918 // If we're not in gapless playback (i.e. through setNextPlayer), we 919 // need to stop the track here, because that will play out the last 920 // little bit at the end of the file. Otherwise short files won't play. 921 mAudioSink->stop(); 922 mNumFramesWritten = 0; 923 } 924 return false; 925 } 926 927 // ignore 0-sized buffer which could be EOS marker with no data 928 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 929 int64_t mediaTimeUs; 930 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 931 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 932 mediaTimeUs / 1E6); 933 onNewAudioMediaTime(mediaTimeUs); 934 } 935 936 size_t copy = entry->mBuffer->size() - entry->mOffset; 937 938 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 939 copy, false /* blocking */); 940 if (written < 0) { 941 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 942 if (written == WOULD_BLOCK) { 943 ALOGV("AudioSink write would block when writing %zu bytes", copy); 944 } else { 945 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 946 // This can only happen when AudioSink was opened with doNotReconnect flag set to 947 // true, in which case the NuPlayer will handle the reconnect. 948 notifyAudioTearDown(); 949 } 950 break; 951 } 952 953 entry->mOffset += written; 954 if (entry->mOffset == entry->mBuffer->size()) { 955 entry->mNotifyConsumed->post(); 956 mAudioQueue.erase(mAudioQueue.begin()); 957 958 entry = NULL; 959 } 960 961 size_t copiedFrames = written / mAudioSink->frameSize(); 962 mNumFramesWritten += copiedFrames; 963 964 { 965 Mutex::Autolock autoLock(mLock); 966 int64_t maxTimeMedia; 967 maxTimeMedia = 968 mAnchorTimeMediaUs + 969 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 970 * 1000LL * mAudioSink->msecsPerFrame()); 971 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 972 973 notifyIfMediaRenderingStarted_l(); 974 } 975 976 if (written != (ssize_t)copy) { 977 // A short count was received from AudioSink::write() 978 // 979 // AudioSink write is called in non-blocking mode. 980 // It may return with a short count when: 981 // 982 // 1) Size to be copied is not a multiple of the frame size. We consider this fatal. 983 // 2) The data to be copied exceeds the available buffer in AudioSink. 984 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 985 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 986 987 // (Case 1) 988 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 989 // needs to fail, as we should not carry over fractional frames between calls. 990 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 991 992 // (Case 2, 3, 4) 993 // Return early to the caller. 994 // Beware of calling immediately again as this may busy-loop if you are not careful. 995 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 996 break; 997 } 998 } 999 1000 // calculate whether we need to reschedule another write. 1001 bool reschedule = !mAudioQueue.empty() 1002 && (!mPaused 1003 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 1004 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 1005 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 1006 return reschedule; 1007} 1008 1009int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 1010 int32_t sampleRate = offloadingAudio() ? 1011 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 1012 if (sampleRate == 0) { 1013 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 1014 return 0; 1015 } 1016 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 1017 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 1018} 1019 1020// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 1021int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 1022 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1023 return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs); 1024} 1025 1026int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 1027 int64_t realUs; 1028 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 1029 // If failed to get current position, e.g. due to audio clock is 1030 // not ready, then just play out video immediately without delay. 1031 return nowUs; 1032 } 1033 return realUs; 1034} 1035 1036void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 1037 Mutex::Autolock autoLock(mLock); 1038 // TRICKY: vorbis decoder generates multiple frames with the same 1039 // timestamp, so only update on the first frame with a given timestamp 1040 if (mediaTimeUs == mAnchorTimeMediaUs) { 1041 return; 1042 } 1043 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 1044 1045 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start 1046 if (mNextAudioClockUpdateTimeUs == -1) { 1047 AudioTimestamp ts; 1048 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { 1049 mNextAudioClockUpdateTimeUs = 0; // start our clock updates 1050 } 1051 } 1052 int64_t nowUs = ALooper::GetNowUs(); 1053 if (mNextAudioClockUpdateTimeUs >= 0) { 1054 if (nowUs >= mNextAudioClockUpdateTimeUs) { 1055 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 1056 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 1057 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; 1058 } 1059 } else { 1060 int64_t unused; 1061 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) 1062 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) 1063 > kMaxAllowedAudioSinkDelayUs)) { 1064 // Enough data has been sent to AudioSink, but AudioSink has not rendered 1065 // any data yet. Something is wrong with AudioSink, e.g., the device is not 1066 // connected to audio out. 1067 // Switch to system clock. This essentially creates a virtual AudioSink with 1068 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). 1069 // This virtual AudioSink renders audio data starting from the very first sample 1070 // and it's paced by system clock. 1071 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); 1072 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); 1073 } 1074 } 1075 mAnchorNumFramesWritten = mNumFramesWritten; 1076 mAnchorTimeMediaUs = mediaTimeUs; 1077} 1078 1079// Called without mLock acquired. 1080void NuPlayer::Renderer::postDrainVideoQueue() { 1081 if (mDrainVideoQueuePending 1082 || getSyncQueues() 1083 || (mPaused && mVideoSampleReceived)) { 1084 return; 1085 } 1086 1087 if (mVideoQueue.empty()) { 1088 return; 1089 } 1090 1091 QueueEntry &entry = *mVideoQueue.begin(); 1092 1093 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1094 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1095 1096 if (entry.mBuffer == NULL) { 1097 // EOS doesn't carry a timestamp. 1098 msg->post(); 1099 mDrainVideoQueuePending = true; 1100 return; 1101 } 1102 1103 bool needRepostDrainVideoQueue = false; 1104 int64_t delayUs; 1105 int64_t nowUs = ALooper::GetNowUs(); 1106 int64_t realTimeUs; 1107 if (mFlags & FLAG_REAL_TIME) { 1108 int64_t mediaTimeUs; 1109 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1110 realTimeUs = mediaTimeUs; 1111 } else { 1112 int64_t mediaTimeUs; 1113 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1114 1115 { 1116 Mutex::Autolock autoLock(mLock); 1117 if (mAnchorTimeMediaUs < 0) { 1118 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1119 mAnchorTimeMediaUs = mediaTimeUs; 1120 realTimeUs = nowUs; 1121 } else if (!mVideoSampleReceived) { 1122 // Always render the first video frame. 1123 realTimeUs = nowUs; 1124 } else if (mAudioFirstAnchorTimeMediaUs < 0 1125 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) { 1126 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1127 } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) { 1128 needRepostDrainVideoQueue = true; 1129 realTimeUs = nowUs; 1130 } else { 1131 realTimeUs = nowUs; 1132 } 1133 } 1134 if (!mHasAudio) { 1135 // smooth out videos >= 10fps 1136 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1137 } 1138 1139 // Heuristics to handle situation when media time changed without a 1140 // discontinuity. If we have not drained an audio buffer that was 1141 // received after this buffer, repost in 10 msec. Otherwise repost 1142 // in 500 msec. 1143 delayUs = realTimeUs - nowUs; 1144 int64_t postDelayUs = -1; 1145 if (delayUs > 500000) { 1146 postDelayUs = 500000; 1147 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1148 postDelayUs = 10000; 1149 } 1150 } else if (needRepostDrainVideoQueue) { 1151 // CHECK(mPlaybackRate > 0); 1152 // CHECK(mAudioFirstAnchorTimeMediaUs >= 0); 1153 // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0); 1154 postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs; 1155 postDelayUs /= mPlaybackRate; 1156 } 1157 1158 if (postDelayUs >= 0) { 1159 msg->setWhat(kWhatPostDrainVideoQueue); 1160 msg->post(postDelayUs); 1161 mVideoScheduler->restart(); 1162 ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms", 1163 (int)(delayUs / 1000), (int)(postDelayUs / 1000)); 1164 mDrainVideoQueuePending = true; 1165 return; 1166 } 1167 } 1168 1169 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1170 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1171 1172 delayUs = realTimeUs - nowUs; 1173 1174 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1175 // post 2 display refreshes before rendering is due 1176 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1177 1178 mDrainVideoQueuePending = true; 1179} 1180 1181void NuPlayer::Renderer::onDrainVideoQueue() { 1182 if (mVideoQueue.empty()) { 1183 return; 1184 } 1185 1186 QueueEntry *entry = &*mVideoQueue.begin(); 1187 1188 if (entry->mBuffer == NULL) { 1189 // EOS 1190 1191 notifyEOS(false /* audio */, entry->mFinalResult); 1192 1193 mVideoQueue.erase(mVideoQueue.begin()); 1194 entry = NULL; 1195 1196 setVideoLateByUs(0); 1197 return; 1198 } 1199 1200 int64_t nowUs = ALooper::GetNowUs(); 1201 int64_t realTimeUs; 1202 int64_t mediaTimeUs = -1; 1203 if (mFlags & FLAG_REAL_TIME) { 1204 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1205 } else { 1206 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1207 1208 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1209 } 1210 1211 bool tooLate = false; 1212 1213 if (!mPaused) { 1214 setVideoLateByUs(nowUs - realTimeUs); 1215 tooLate = (mVideoLateByUs > 40000); 1216 1217 if (tooLate) { 1218 ALOGV("video late by %lld us (%.2f secs)", 1219 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1220 } else { 1221 int64_t mediaUs = 0; 1222 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1223 ALOGV("rendering video at media time %.2f secs", 1224 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1225 mediaUs) / 1E6); 1226 1227 if (!(mFlags & FLAG_REAL_TIME) 1228 && mLastAudioMediaTimeUs != -1 1229 && mediaTimeUs > mLastAudioMediaTimeUs) { 1230 // If audio ends before video, video continues to drive media clock. 1231 // Also smooth out videos >= 10fps. 1232 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1233 } 1234 } 1235 } else { 1236 setVideoLateByUs(0); 1237 if (!mVideoSampleReceived && !mHasAudio) { 1238 // This will ensure that the first frame after a flush won't be used as anchor 1239 // when renderer is in paused state, because resume can happen any time after seek. 1240 Mutex::Autolock autoLock(mLock); 1241 clearAnchorTime_l(); 1242 } 1243 } 1244 1245 // Always render the first video frame while keeping stats on A/V sync. 1246 if (!mVideoSampleReceived) { 1247 realTimeUs = nowUs; 1248 tooLate = false; 1249 } 1250 1251 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1252 entry->mNotifyConsumed->setInt32("render", !tooLate); 1253 entry->mNotifyConsumed->post(); 1254 mVideoQueue.erase(mVideoQueue.begin()); 1255 entry = NULL; 1256 1257 mVideoSampleReceived = true; 1258 1259 if (!mPaused) { 1260 if (!mVideoRenderingStarted) { 1261 mVideoRenderingStarted = true; 1262 notifyVideoRenderingStart(); 1263 } 1264 Mutex::Autolock autoLock(mLock); 1265 notifyIfMediaRenderingStarted_l(); 1266 } 1267} 1268 1269void NuPlayer::Renderer::notifyVideoRenderingStart() { 1270 sp<AMessage> notify = mNotify->dup(); 1271 notify->setInt32("what", kWhatVideoRenderingStart); 1272 notify->post(); 1273} 1274 1275void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1276 if (audio && delayUs > 0) { 1277 sp<AMessage> msg = new AMessage(kWhatEOS, this); 1278 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); 1279 msg->setInt32("finalResult", finalResult); 1280 msg->post(delayUs); 1281 return; 1282 } 1283 sp<AMessage> notify = mNotify->dup(); 1284 notify->setInt32("what", kWhatEOS); 1285 notify->setInt32("audio", static_cast<int32_t>(audio)); 1286 notify->setInt32("finalResult", finalResult); 1287 notify->post(delayUs); 1288} 1289 1290void NuPlayer::Renderer::notifyAudioTearDown() { 1291 (new AMessage(kWhatAudioTearDown, this))->post(); 1292} 1293 1294void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1295 int32_t audio; 1296 CHECK(msg->findInt32("audio", &audio)); 1297 1298 if (dropBufferIfStale(audio, msg)) { 1299 return; 1300 } 1301 1302 if (audio) { 1303 mHasAudio = true; 1304 } else { 1305 mHasVideo = true; 1306 } 1307 1308 if (mHasVideo) { 1309 if (mVideoScheduler == NULL) { 1310 mVideoScheduler = new VideoFrameScheduler(); 1311 mVideoScheduler->init(); 1312 } 1313 } 1314 1315 sp<ABuffer> buffer; 1316 CHECK(msg->findBuffer("buffer", &buffer)); 1317 1318 sp<AMessage> notifyConsumed; 1319 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1320 1321 QueueEntry entry; 1322 entry.mBuffer = buffer; 1323 entry.mNotifyConsumed = notifyConsumed; 1324 entry.mOffset = 0; 1325 entry.mFinalResult = OK; 1326 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1327 1328 if (audio) { 1329 Mutex::Autolock autoLock(mLock); 1330 mAudioQueue.push_back(entry); 1331 postDrainAudioQueue_l(); 1332 } else { 1333 mVideoQueue.push_back(entry); 1334 postDrainVideoQueue(); 1335 } 1336 1337 Mutex::Autolock autoLock(mLock); 1338 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1339 return; 1340 } 1341 1342 sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1343 sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1344 1345 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1346 // EOS signalled on either queue. 1347 syncQueuesDone_l(); 1348 return; 1349 } 1350 1351 int64_t firstAudioTimeUs; 1352 int64_t firstVideoTimeUs; 1353 CHECK(firstAudioBuffer->meta() 1354 ->findInt64("timeUs", &firstAudioTimeUs)); 1355 CHECK(firstVideoBuffer->meta() 1356 ->findInt64("timeUs", &firstVideoTimeUs)); 1357 1358 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1359 1360 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1361 1362 if (diff > 100000ll) { 1363 // Audio data starts More than 0.1 secs before video. 1364 // Drop some audio. 1365 1366 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1367 mAudioQueue.erase(mAudioQueue.begin()); 1368 return; 1369 } 1370 1371 syncQueuesDone_l(); 1372} 1373 1374void NuPlayer::Renderer::syncQueuesDone_l() { 1375 if (!mSyncQueues) { 1376 return; 1377 } 1378 1379 mSyncQueues = false; 1380 1381 if (!mAudioQueue.empty()) { 1382 postDrainAudioQueue_l(); 1383 } 1384 1385 if (!mVideoQueue.empty()) { 1386 mLock.unlock(); 1387 postDrainVideoQueue(); 1388 mLock.lock(); 1389 } 1390} 1391 1392void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1393 int32_t audio; 1394 CHECK(msg->findInt32("audio", &audio)); 1395 1396 if (dropBufferIfStale(audio, msg)) { 1397 return; 1398 } 1399 1400 int32_t finalResult; 1401 CHECK(msg->findInt32("finalResult", &finalResult)); 1402 1403 QueueEntry entry; 1404 entry.mOffset = 0; 1405 entry.mFinalResult = finalResult; 1406 1407 if (audio) { 1408 Mutex::Autolock autoLock(mLock); 1409 if (mAudioQueue.empty() && mSyncQueues) { 1410 syncQueuesDone_l(); 1411 } 1412 mAudioQueue.push_back(entry); 1413 postDrainAudioQueue_l(); 1414 } else { 1415 if (mVideoQueue.empty() && getSyncQueues()) { 1416 Mutex::Autolock autoLock(mLock); 1417 syncQueuesDone_l(); 1418 } 1419 mVideoQueue.push_back(entry); 1420 postDrainVideoQueue(); 1421 } 1422} 1423 1424void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1425 int32_t audio, notifyComplete; 1426 CHECK(msg->findInt32("audio", &audio)); 1427 1428 { 1429 Mutex::Autolock autoLock(mLock); 1430 if (audio) { 1431 notifyComplete = mNotifyCompleteAudio; 1432 mNotifyCompleteAudio = false; 1433 mLastAudioMediaTimeUs = -1; 1434 } else { 1435 notifyComplete = mNotifyCompleteVideo; 1436 mNotifyCompleteVideo = false; 1437 } 1438 1439 // If we're currently syncing the queues, i.e. dropping audio while 1440 // aligning the first audio/video buffer times and only one of the 1441 // two queues has data, we may starve that queue by not requesting 1442 // more buffers from the decoder. If the other source then encounters 1443 // a discontinuity that leads to flushing, we'll never find the 1444 // corresponding discontinuity on the other queue. 1445 // Therefore we'll stop syncing the queues if at least one of them 1446 // is flushed. 1447 syncQueuesDone_l(); 1448 clearAnchorTime_l(); 1449 } 1450 1451 ALOGV("flushing %s", audio ? "audio" : "video"); 1452 if (audio) { 1453 { 1454 Mutex::Autolock autoLock(mLock); 1455 flushQueue(&mAudioQueue); 1456 1457 ++mAudioDrainGeneration; 1458 ++mAudioEOSGeneration; 1459 prepareForMediaRenderingStart_l(); 1460 1461 // the frame count will be reset after flush. 1462 clearAudioFirstAnchorTime_l(); 1463 } 1464 1465 mDrainAudioQueuePending = false; 1466 1467 if (offloadingAudio()) { 1468 mAudioSink->pause(); 1469 mAudioSink->flush(); 1470 if (!mPaused) { 1471 mAudioSink->start(); 1472 } 1473 } else { 1474 mAudioSink->pause(); 1475 mAudioSink->flush(); 1476 // Call stop() to signal to the AudioSink to completely fill the 1477 // internal buffer before resuming playback. 1478 // FIXME: this is ignored after flush(). 1479 mAudioSink->stop(); 1480 if (mPaused) { 1481 // Race condition: if renderer is paused and audio sink is stopped, 1482 // we need to make sure that the audio track buffer fully drains 1483 // before delivering data. 1484 // FIXME: remove this if we can detect if stop() is complete. 1485 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1486 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1487 } else { 1488 mAudioSink->start(); 1489 } 1490 mNumFramesWritten = 0; 1491 } 1492 mNextAudioClockUpdateTimeUs = -1; 1493 } else { 1494 flushQueue(&mVideoQueue); 1495 1496 mDrainVideoQueuePending = false; 1497 1498 if (mVideoScheduler != NULL) { 1499 mVideoScheduler->restart(); 1500 } 1501 1502 Mutex::Autolock autoLock(mLock); 1503 ++mVideoDrainGeneration; 1504 prepareForMediaRenderingStart_l(); 1505 } 1506 1507 mVideoSampleReceived = false; 1508 1509 if (notifyComplete) { 1510 notifyFlushComplete(audio); 1511 } 1512} 1513 1514void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1515 while (!queue->empty()) { 1516 QueueEntry *entry = &*queue->begin(); 1517 1518 if (entry->mBuffer != NULL) { 1519 entry->mNotifyConsumed->post(); 1520 } 1521 1522 queue->erase(queue->begin()); 1523 entry = NULL; 1524 } 1525} 1526 1527void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1528 sp<AMessage> notify = mNotify->dup(); 1529 notify->setInt32("what", kWhatFlushComplete); 1530 notify->setInt32("audio", static_cast<int32_t>(audio)); 1531 notify->post(); 1532} 1533 1534bool NuPlayer::Renderer::dropBufferIfStale( 1535 bool audio, const sp<AMessage> &msg) { 1536 int32_t queueGeneration; 1537 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1538 1539 if (queueGeneration == getQueueGeneration(audio)) { 1540 return false; 1541 } 1542 1543 sp<AMessage> notifyConsumed; 1544 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1545 notifyConsumed->post(); 1546 } 1547 1548 return true; 1549} 1550 1551void NuPlayer::Renderer::onAudioSinkChanged() { 1552 if (offloadingAudio()) { 1553 return; 1554 } 1555 CHECK(!mDrainAudioQueuePending); 1556 mNumFramesWritten = 0; 1557 { 1558 Mutex::Autolock autoLock(mLock); 1559 mAnchorNumFramesWritten = -1; 1560 } 1561 uint32_t written; 1562 if (mAudioSink->getFramesWritten(&written) == OK) { 1563 mNumFramesWritten = written; 1564 } 1565} 1566 1567void NuPlayer::Renderer::onDisableOffloadAudio() { 1568 Mutex::Autolock autoLock(mLock); 1569 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1570 ++mAudioDrainGeneration; 1571 if (mAudioRenderingStartGeneration != -1) { 1572 prepareForMediaRenderingStart_l(); 1573 } 1574} 1575 1576void NuPlayer::Renderer::onEnableOffloadAudio() { 1577 Mutex::Autolock autoLock(mLock); 1578 mFlags |= FLAG_OFFLOAD_AUDIO; 1579 ++mAudioDrainGeneration; 1580 if (mAudioRenderingStartGeneration != -1) { 1581 prepareForMediaRenderingStart_l(); 1582 } 1583} 1584 1585void NuPlayer::Renderer::onPause() { 1586 if (mPaused) { 1587 return; 1588 } 1589 1590 { 1591 Mutex::Autolock autoLock(mLock); 1592 // we do not increment audio drain generation so that we fill audio buffer during pause. 1593 ++mVideoDrainGeneration; 1594 prepareForMediaRenderingStart_l(); 1595 mPaused = true; 1596 mMediaClock->setPlaybackRate(0.0); 1597 } 1598 1599 mDrainAudioQueuePending = false; 1600 mDrainVideoQueuePending = false; 1601 1602 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1603 mAudioSink->pause(); 1604 startAudioOffloadPauseTimeout(); 1605 1606 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1607 mAudioQueue.size(), mVideoQueue.size()); 1608} 1609 1610void NuPlayer::Renderer::onResume() { 1611 if (!mPaused) { 1612 return; 1613 } 1614 1615 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1616 cancelAudioOffloadPauseTimeout(); 1617 if (mAudioSink->ready()) { 1618 status_t err = mAudioSink->start(); 1619 if (err != OK) { 1620 ALOGE("cannot start AudioSink err %d", err); 1621 notifyAudioTearDown(); 1622 } 1623 } 1624 1625 { 1626 Mutex::Autolock autoLock(mLock); 1627 mPaused = false; 1628 // rendering started message may have been delayed if we were paused. 1629 if (mRenderingDataDelivered) { 1630 notifyIfMediaRenderingStarted_l(); 1631 } 1632 // configure audiosink as we did not do it when pausing 1633 if (mAudioSink != NULL && mAudioSink->ready()) { 1634 mAudioSink->setPlaybackRate(mPlaybackSettings); 1635 } 1636 1637 mMediaClock->setPlaybackRate(mPlaybackRate); 1638 1639 if (!mAudioQueue.empty()) { 1640 postDrainAudioQueue_l(); 1641 } 1642 } 1643 1644 if (!mVideoQueue.empty()) { 1645 postDrainVideoQueue(); 1646 } 1647} 1648 1649void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1650 if (mVideoScheduler == NULL) { 1651 mVideoScheduler = new VideoFrameScheduler(); 1652 } 1653 mVideoScheduler->init(fps); 1654} 1655 1656int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1657 Mutex::Autolock autoLock(mLock); 1658 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1659} 1660 1661int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1662 Mutex::Autolock autoLock(mLock); 1663 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1664} 1665 1666bool NuPlayer::Renderer::getSyncQueues() { 1667 Mutex::Autolock autoLock(mLock); 1668 return mSyncQueues; 1669} 1670 1671void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1672 if (mAudioTornDown) { 1673 return; 1674 } 1675 mAudioTornDown = true; 1676 1677 int64_t currentPositionUs; 1678 sp<AMessage> notify = mNotify->dup(); 1679 if (getCurrentPosition(¤tPositionUs) == OK) { 1680 notify->setInt64("positionUs", currentPositionUs); 1681 } 1682 1683 mAudioSink->stop(); 1684 mAudioSink->flush(); 1685 1686 notify->setInt32("what", kWhatAudioTearDown); 1687 notify->setInt32("reason", reason); 1688 notify->post(); 1689} 1690 1691void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1692 if (offloadingAudio()) { 1693 mWakeLock->acquire(); 1694 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1695 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1696 msg->post(kOffloadPauseMaxUs); 1697 } 1698} 1699 1700void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1701 if (offloadingAudio()) { 1702 mWakeLock->release(true); 1703 ++mAudioOffloadPauseTimeoutGeneration; 1704 } 1705} 1706 1707status_t NuPlayer::Renderer::onOpenAudioSink( 1708 const sp<AMessage> &format, 1709 bool offloadOnly, 1710 bool hasVideo, 1711 uint32_t flags) { 1712 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1713 offloadOnly, offloadingAudio()); 1714 bool audioSinkChanged = false; 1715 1716 int32_t numChannels; 1717 CHECK(format->findInt32("channel-count", &numChannels)); 1718 1719 int32_t channelMask; 1720 if (!format->findInt32("channel-mask", &channelMask)) { 1721 // signal to the AudioSink to derive the mask from count. 1722 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1723 } 1724 1725 int32_t sampleRate; 1726 CHECK(format->findInt32("sample-rate", &sampleRate)); 1727 1728 if (offloadingAudio()) { 1729 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1730 AString mime; 1731 CHECK(format->findString("mime", &mime)); 1732 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1733 1734 if (err != OK) { 1735 ALOGE("Couldn't map mime \"%s\" to a valid " 1736 "audio_format", mime.c_str()); 1737 onDisableOffloadAudio(); 1738 } else { 1739 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1740 mime.c_str(), audioFormat); 1741 1742 int avgBitRate = -1; 1743 format->findInt32("bit-rate", &avgBitRate); 1744 1745 int32_t aacProfile = -1; 1746 if (audioFormat == AUDIO_FORMAT_AAC 1747 && format->findInt32("aac-profile", &aacProfile)) { 1748 // Redefine AAC format as per aac profile 1749 mapAACProfileToAudioFormat( 1750 audioFormat, 1751 aacProfile); 1752 } 1753 1754 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1755 offloadInfo.duration_us = -1; 1756 format->findInt64( 1757 "durationUs", &offloadInfo.duration_us); 1758 offloadInfo.sample_rate = sampleRate; 1759 offloadInfo.channel_mask = channelMask; 1760 offloadInfo.format = audioFormat; 1761 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1762 offloadInfo.bit_rate = avgBitRate; 1763 offloadInfo.has_video = hasVideo; 1764 offloadInfo.is_streaming = true; 1765 1766 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1767 ALOGV("openAudioSink: no change in offload mode"); 1768 // no change from previous configuration, everything ok. 1769 return OK; 1770 } 1771 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1772 1773 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1774 uint32_t offloadFlags = flags; 1775 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1776 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1777 audioSinkChanged = true; 1778 mAudioSink->close(); 1779 1780 err = mAudioSink->open( 1781 sampleRate, 1782 numChannels, 1783 (audio_channel_mask_t)channelMask, 1784 audioFormat, 1785 0 /* bufferCount - unused */, 1786 &NuPlayer::Renderer::AudioSinkCallback, 1787 this, 1788 (audio_output_flags_t)offloadFlags, 1789 &offloadInfo); 1790 1791 if (err == OK) { 1792 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1793 } 1794 1795 if (err == OK) { 1796 // If the playback is offloaded to h/w, we pass 1797 // the HAL some metadata information. 1798 // We don't want to do this for PCM because it 1799 // will be going through the AudioFlinger mixer 1800 // before reaching the hardware. 1801 // TODO 1802 mCurrentOffloadInfo = offloadInfo; 1803 if (!mPaused) { // for preview mode, don't start if paused 1804 err = mAudioSink->start(); 1805 } 1806 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1807 } 1808 if (err != OK) { 1809 // Clean up, fall back to non offload mode. 1810 mAudioSink->close(); 1811 onDisableOffloadAudio(); 1812 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1813 ALOGV("openAudioSink: offload failed"); 1814 } else { 1815 mUseAudioCallback = true; // offload mode transfers data through callback 1816 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1817 } 1818 } 1819 } 1820 if (!offloadOnly && !offloadingAudio()) { 1821 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1822 uint32_t pcmFlags = flags; 1823 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1824 1825 const PcmInfo info = { 1826 (audio_channel_mask_t)channelMask, 1827 (audio_output_flags_t)pcmFlags, 1828 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1829 numChannels, 1830 sampleRate 1831 }; 1832 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1833 ALOGV("openAudioSink: no change in pcm mode"); 1834 // no change from previous configuration, everything ok. 1835 return OK; 1836 } 1837 1838 audioSinkChanged = true; 1839 mAudioSink->close(); 1840 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1841 // Note: It is possible to set up the callback, but not use it to send audio data. 1842 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1843 mUseAudioCallback = getUseAudioCallbackSetting(); 1844 if (mUseAudioCallback) { 1845 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1846 } 1847 1848 // Compute the desired buffer size. 1849 // For callback mode, the amount of time before wakeup is about half the buffer size. 1850 const uint32_t frameCount = 1851 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1852 1853 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1854 // AudioSink. We don't want this when there's video because it will cause a video seek to 1855 // the previous I frame. But we do want this when there's only audio because it will give 1856 // NuPlayer a chance to switch from non-offload mode to offload mode. 1857 // So we only set doNotReconnect when there's no video. 1858 const bool doNotReconnect = !hasVideo; 1859 status_t err = mAudioSink->open( 1860 sampleRate, 1861 numChannels, 1862 (audio_channel_mask_t)channelMask, 1863 AUDIO_FORMAT_PCM_16_BIT, 1864 0 /* bufferCount - unused */, 1865 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 1866 mUseAudioCallback ? this : NULL, 1867 (audio_output_flags_t)pcmFlags, 1868 NULL, 1869 doNotReconnect, 1870 frameCount); 1871 if (err == OK) { 1872 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1873 } 1874 if (err != OK) { 1875 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 1876 mAudioSink->close(); 1877 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1878 return err; 1879 } 1880 mCurrentPcmInfo = info; 1881 if (!mPaused) { // for preview mode, don't start if paused 1882 mAudioSink->start(); 1883 } 1884 } 1885 if (audioSinkChanged) { 1886 onAudioSinkChanged(); 1887 } 1888 mAudioTornDown = false; 1889 return OK; 1890} 1891 1892void NuPlayer::Renderer::onCloseAudioSink() { 1893 mAudioSink->close(); 1894 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1895 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1896} 1897 1898} // namespace android 1899 1900