NuPlayerRenderer.cpp revision 7e34bf5af26f8752d4786d3098740cdf51e2438f
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <algorithm> 23#include <cutils/properties.h> 24#include <media/stagefright/foundation/ADebug.h> 25#include <media/stagefright/foundation/AMessage.h> 26#include <media/stagefright/foundation/AUtils.h> 27#include <media/stagefright/foundation/AWakeLock.h> 28#include <media/stagefright/MediaClock.h> 29#include <media/stagefright/MediaErrors.h> 30#include <media/stagefright/MetaData.h> 31#include <media/stagefright/Utils.h> 32#include <media/stagefright/VideoFrameScheduler.h> 33#include <media/MediaCodecBuffer.h> 34 35#include <inttypes.h> 36 37namespace android { 38 39/* 40 * Example of common configuration settings in shell script form 41 42 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 43 adb shell setprop audio.offload.disable 1 44 45 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 46 adb shell setprop audio.offload.video 1 47 48 #Use audio callbacks for PCM data 49 adb shell setprop media.stagefright.audio.cbk 1 50 51 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 52 adb shell setprop media.stagefright.audio.deep 1 53 54 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 55 adb shell setprop media.stagefright.audio.sink 1000 56 57 * These configurations take effect for the next track played (not the current track). 58 */ 59 60static inline bool getUseAudioCallbackSetting() { 61 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 62} 63 64static inline int32_t getAudioSinkPcmMsSetting() { 65 return property_get_int32( 66 "media.stagefright.audio.sink", 500 /* default_value */); 67} 68 69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 70// is closed to allow the audio DSP to power down. 71static const int64_t kOffloadPauseMaxUs = 10000000ll; 72 73// Maximum allowed delay from AudioSink, 1.5 seconds. 74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll; 75 76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; 77 78// static 79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 80 AUDIO_CHANNEL_NONE, 81 AUDIO_OUTPUT_FLAG_NONE, 82 AUDIO_FORMAT_INVALID, 83 0, // mNumChannels 84 0 // mSampleRate 85}; 86 87// static 88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 89 90NuPlayer::Renderer::Renderer( 91 const sp<MediaPlayerBase::AudioSink> &sink, 92 const sp<AMessage> ¬ify, 93 uint32_t flags) 94 : mAudioSink(sink), 95 mUseVirtualAudioSink(false), 96 mNotify(notify), 97 mFlags(flags), 98 mNumFramesWritten(0), 99 mDrainAudioQueuePending(false), 100 mDrainVideoQueuePending(false), 101 mAudioQueueGeneration(0), 102 mVideoQueueGeneration(0), 103 mAudioDrainGeneration(0), 104 mVideoDrainGeneration(0), 105 mAudioEOSGeneration(0), 106 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 107 mAudioFirstAnchorTimeMediaUs(-1), 108 mAnchorTimeMediaUs(-1), 109 mAnchorNumFramesWritten(-1), 110 mVideoLateByUs(0ll), 111 mHasAudio(false), 112 mHasVideo(false), 113 mNotifyCompleteAudio(false), 114 mNotifyCompleteVideo(false), 115 mSyncQueues(false), 116 mPaused(false), 117 mPauseDrainAudioAllowedUs(0), 118 mVideoSampleReceived(false), 119 mVideoRenderingStarted(false), 120 mVideoRenderingStartGeneration(0), 121 mAudioRenderingStartGeneration(0), 122 mRenderingDataDelivered(false), 123 mNextAudioClockUpdateTimeUs(-1), 124 mLastAudioMediaTimeUs(-1), 125 mAudioOffloadPauseTimeoutGeneration(0), 126 mAudioTornDown(false), 127 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 128 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 129 mTotalBuffersQueued(0), 130 mLastAudioBufferDrained(0), 131 mUseAudioCallback(false), 132 mWakeLock(new AWakeLock()) { 133 mMediaClock = new MediaClock; 134 mPlaybackRate = mPlaybackSettings.mSpeed; 135 mMediaClock->setPlaybackRate(mPlaybackRate); 136} 137 138NuPlayer::Renderer::~Renderer() { 139 if (offloadingAudio()) { 140 mAudioSink->stop(); 141 mAudioSink->flush(); 142 mAudioSink->close(); 143 } 144 145 // Try to avoid racing condition in case callback is still on. 146 Mutex::Autolock autoLock(mLock); 147 mUseAudioCallback = false; 148 flushQueue(&mAudioQueue); 149 flushQueue(&mVideoQueue); 150 mWakeLock.clear(); 151 mMediaClock.clear(); 152 mVideoScheduler.clear(); 153 mNotify.clear(); 154 mAudioSink.clear(); 155} 156 157void NuPlayer::Renderer::queueBuffer( 158 bool audio, 159 const sp<MediaCodecBuffer> &buffer, 160 const sp<AMessage> ¬ifyConsumed) { 161 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 162 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 163 msg->setInt32("audio", static_cast<int32_t>(audio)); 164 msg->setObject("buffer", buffer); 165 msg->setMessage("notifyConsumed", notifyConsumed); 166 msg->post(); 167} 168 169void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 170 CHECK_NE(finalResult, (status_t)OK); 171 172 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 173 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 174 msg->setInt32("audio", static_cast<int32_t>(audio)); 175 msg->setInt32("finalResult", finalResult); 176 msg->post(); 177} 178 179status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 180 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 181 writeToAMessage(msg, rate); 182 sp<AMessage> response; 183 status_t err = msg->postAndAwaitResponse(&response); 184 if (err == OK && response != NULL) { 185 CHECK(response->findInt32("err", &err)); 186 } 187 return err; 188} 189 190status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 191 if (rate.mSpeed == 0.f) { 192 onPause(); 193 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 194 // have to correspond to the any non-0 speed (e.g old speed). Keep 195 // settings nonetheless, using the old speed, in case audiosink changes. 196 AudioPlaybackRate newRate = rate; 197 newRate.mSpeed = mPlaybackSettings.mSpeed; 198 mPlaybackSettings = newRate; 199 return OK; 200 } 201 202 if (mAudioSink != NULL && mAudioSink->ready()) { 203 status_t err = mAudioSink->setPlaybackRate(rate); 204 if (err != OK) { 205 return err; 206 } 207 } 208 mPlaybackSettings = rate; 209 mPlaybackRate = rate.mSpeed; 210 mMediaClock->setPlaybackRate(mPlaybackRate); 211 return OK; 212} 213 214status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 215 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 216 sp<AMessage> response; 217 status_t err = msg->postAndAwaitResponse(&response); 218 if (err == OK && response != NULL) { 219 CHECK(response->findInt32("err", &err)); 220 if (err == OK) { 221 readFromAMessage(response, rate); 222 } 223 } 224 return err; 225} 226 227status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 228 if (mAudioSink != NULL && mAudioSink->ready()) { 229 status_t err = mAudioSink->getPlaybackRate(rate); 230 if (err == OK) { 231 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 232 ALOGW("correcting mismatch in internal/external playback rate"); 233 } 234 // get playback settings used by audiosink, as it may be 235 // slightly off due to audiosink not taking small changes. 236 mPlaybackSettings = *rate; 237 if (mPaused) { 238 rate->mSpeed = 0.f; 239 } 240 } 241 return err; 242 } 243 *rate = mPlaybackSettings; 244 return OK; 245} 246 247status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 248 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 249 writeToAMessage(msg, sync, videoFpsHint); 250 sp<AMessage> response; 251 status_t err = msg->postAndAwaitResponse(&response); 252 if (err == OK && response != NULL) { 253 CHECK(response->findInt32("err", &err)); 254 } 255 return err; 256} 257 258status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 259 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 260 return BAD_VALUE; 261 } 262 // TODO: support sync sources 263 return INVALID_OPERATION; 264} 265 266status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 267 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 268 sp<AMessage> response; 269 status_t err = msg->postAndAwaitResponse(&response); 270 if (err == OK && response != NULL) { 271 CHECK(response->findInt32("err", &err)); 272 if (err == OK) { 273 readFromAMessage(response, sync, videoFps); 274 } 275 } 276 return err; 277} 278 279status_t NuPlayer::Renderer::onGetSyncSettings( 280 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 281 *sync = mSyncSettings; 282 *videoFps = -1.f; 283 return OK; 284} 285 286void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 287 { 288 Mutex::Autolock autoLock(mLock); 289 if (audio) { 290 mNotifyCompleteAudio |= notifyComplete; 291 clearAudioFirstAnchorTime_l(); 292 ++mAudioQueueGeneration; 293 ++mAudioDrainGeneration; 294 } else { 295 mNotifyCompleteVideo |= notifyComplete; 296 ++mVideoQueueGeneration; 297 ++mVideoDrainGeneration; 298 } 299 300 clearAnchorTime_l(); 301 mVideoLateByUs = 0; 302 mSyncQueues = false; 303 } 304 305 sp<AMessage> msg = new AMessage(kWhatFlush, this); 306 msg->setInt32("audio", static_cast<int32_t>(audio)); 307 msg->post(); 308} 309 310void NuPlayer::Renderer::signalTimeDiscontinuity() { 311} 312 313void NuPlayer::Renderer::signalDisableOffloadAudio() { 314 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 315} 316 317void NuPlayer::Renderer::signalEnableOffloadAudio() { 318 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 319} 320 321void NuPlayer::Renderer::pause() { 322 (new AMessage(kWhatPause, this))->post(); 323} 324 325void NuPlayer::Renderer::resume() { 326 (new AMessage(kWhatResume, this))->post(); 327} 328 329void NuPlayer::Renderer::setVideoFrameRate(float fps) { 330 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 331 msg->setFloat("frame-rate", fps); 332 msg->post(); 333} 334 335// Called on any threads without mLock acquired. 336status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 337 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 338 if (result == OK) { 339 return result; 340 } 341 342 // MediaClock has not started yet. Try to start it if possible. 343 { 344 Mutex::Autolock autoLock(mLock); 345 if (mAudioFirstAnchorTimeMediaUs == -1) { 346 return result; 347 } 348 349 AudioTimestamp ts; 350 status_t res = mAudioSink->getTimestamp(ts); 351 if (res != OK) { 352 return result; 353 } 354 355 // AudioSink has rendered some frames. 356 int64_t nowUs = ALooper::GetNowUs(); 357 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) 358 + mAudioFirstAnchorTimeMediaUs; 359 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); 360 } 361 362 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 363} 364 365void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 366 mAudioFirstAnchorTimeMediaUs = -1; 367 mMediaClock->setStartingTimeMedia(-1); 368} 369 370void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 371 if (mAudioFirstAnchorTimeMediaUs == -1) { 372 mAudioFirstAnchorTimeMediaUs = mediaUs; 373 mMediaClock->setStartingTimeMedia(mediaUs); 374 } 375} 376 377void NuPlayer::Renderer::clearAnchorTime_l() { 378 mMediaClock->clearAnchor(); 379 mAnchorTimeMediaUs = -1; 380 mAnchorNumFramesWritten = -1; 381} 382 383void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 384 Mutex::Autolock autoLock(mLock); 385 mVideoLateByUs = lateUs; 386} 387 388int64_t NuPlayer::Renderer::getVideoLateByUs() { 389 Mutex::Autolock autoLock(mLock); 390 return mVideoLateByUs; 391} 392 393status_t NuPlayer::Renderer::openAudioSink( 394 const sp<AMessage> &format, 395 bool offloadOnly, 396 bool hasVideo, 397 uint32_t flags, 398 bool *isOffloaded) { 399 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 400 msg->setMessage("format", format); 401 msg->setInt32("offload-only", offloadOnly); 402 msg->setInt32("has-video", hasVideo); 403 msg->setInt32("flags", flags); 404 405 sp<AMessage> response; 406 msg->postAndAwaitResponse(&response); 407 408 int32_t err; 409 if (!response->findInt32("err", &err)) { 410 err = INVALID_OPERATION; 411 } else if (err == OK && isOffloaded != NULL) { 412 int32_t offload; 413 CHECK(response->findInt32("offload", &offload)); 414 *isOffloaded = (offload != 0); 415 } 416 return err; 417} 418 419void NuPlayer::Renderer::closeAudioSink() { 420 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 421 422 sp<AMessage> response; 423 msg->postAndAwaitResponse(&response); 424} 425 426void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 427 switch (msg->what()) { 428 case kWhatOpenAudioSink: 429 { 430 sp<AMessage> format; 431 CHECK(msg->findMessage("format", &format)); 432 433 int32_t offloadOnly; 434 CHECK(msg->findInt32("offload-only", &offloadOnly)); 435 436 int32_t hasVideo; 437 CHECK(msg->findInt32("has-video", &hasVideo)); 438 439 uint32_t flags; 440 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 441 442 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags); 443 444 sp<AMessage> response = new AMessage; 445 response->setInt32("err", err); 446 response->setInt32("offload", offloadingAudio()); 447 448 sp<AReplyToken> replyID; 449 CHECK(msg->senderAwaitsResponse(&replyID)); 450 response->postReply(replyID); 451 452 break; 453 } 454 455 case kWhatCloseAudioSink: 456 { 457 sp<AReplyToken> replyID; 458 CHECK(msg->senderAwaitsResponse(&replyID)); 459 460 onCloseAudioSink(); 461 462 sp<AMessage> response = new AMessage; 463 response->postReply(replyID); 464 break; 465 } 466 467 case kWhatStopAudioSink: 468 { 469 mAudioSink->stop(); 470 break; 471 } 472 473 case kWhatDrainAudioQueue: 474 { 475 mDrainAudioQueuePending = false; 476 477 int32_t generation; 478 CHECK(msg->findInt32("drainGeneration", &generation)); 479 if (generation != getDrainGeneration(true /* audio */)) { 480 break; 481 } 482 483 if (onDrainAudioQueue()) { 484 uint32_t numFramesPlayed; 485 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 486 (status_t)OK); 487 488 uint32_t numFramesPendingPlayout = 489 mNumFramesWritten - numFramesPlayed; 490 491 // This is how long the audio sink will have data to 492 // play back. 493 int64_t delayUs = 494 mAudioSink->msecsPerFrame() 495 * numFramesPendingPlayout * 1000ll; 496 if (mPlaybackRate > 1.0f) { 497 delayUs /= mPlaybackRate; 498 } 499 500 // Let's give it more data after about half that time 501 // has elapsed. 502 delayUs /= 2; 503 // check the buffer size to estimate maximum delay permitted. 504 const int64_t maxDrainDelayUs = std::max( 505 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */); 506 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld", 507 (long long)delayUs, (long long)maxDrainDelayUs); 508 Mutex::Autolock autoLock(mLock); 509 postDrainAudioQueue_l(delayUs); 510 } 511 break; 512 } 513 514 case kWhatDrainVideoQueue: 515 { 516 int32_t generation; 517 CHECK(msg->findInt32("drainGeneration", &generation)); 518 if (generation != getDrainGeneration(false /* audio */)) { 519 break; 520 } 521 522 mDrainVideoQueuePending = false; 523 524 onDrainVideoQueue(); 525 526 postDrainVideoQueue(); 527 break; 528 } 529 530 case kWhatPostDrainVideoQueue: 531 { 532 int32_t generation; 533 CHECK(msg->findInt32("drainGeneration", &generation)); 534 if (generation != getDrainGeneration(false /* audio */)) { 535 break; 536 } 537 538 mDrainVideoQueuePending = false; 539 postDrainVideoQueue(); 540 break; 541 } 542 543 case kWhatQueueBuffer: 544 { 545 onQueueBuffer(msg); 546 break; 547 } 548 549 case kWhatQueueEOS: 550 { 551 onQueueEOS(msg); 552 break; 553 } 554 555 case kWhatEOS: 556 { 557 int32_t generation; 558 CHECK(msg->findInt32("audioEOSGeneration", &generation)); 559 if (generation != mAudioEOSGeneration) { 560 break; 561 } 562 status_t finalResult; 563 CHECK(msg->findInt32("finalResult", &finalResult)); 564 notifyEOS(true /* audio */, finalResult); 565 break; 566 } 567 568 case kWhatConfigPlayback: 569 { 570 sp<AReplyToken> replyID; 571 CHECK(msg->senderAwaitsResponse(&replyID)); 572 AudioPlaybackRate rate; 573 readFromAMessage(msg, &rate); 574 status_t err = onConfigPlayback(rate); 575 sp<AMessage> response = new AMessage; 576 response->setInt32("err", err); 577 response->postReply(replyID); 578 break; 579 } 580 581 case kWhatGetPlaybackSettings: 582 { 583 sp<AReplyToken> replyID; 584 CHECK(msg->senderAwaitsResponse(&replyID)); 585 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 586 status_t err = onGetPlaybackSettings(&rate); 587 sp<AMessage> response = new AMessage; 588 if (err == OK) { 589 writeToAMessage(response, rate); 590 } 591 response->setInt32("err", err); 592 response->postReply(replyID); 593 break; 594 } 595 596 case kWhatConfigSync: 597 { 598 sp<AReplyToken> replyID; 599 CHECK(msg->senderAwaitsResponse(&replyID)); 600 AVSyncSettings sync; 601 float videoFpsHint; 602 readFromAMessage(msg, &sync, &videoFpsHint); 603 status_t err = onConfigSync(sync, videoFpsHint); 604 sp<AMessage> response = new AMessage; 605 response->setInt32("err", err); 606 response->postReply(replyID); 607 break; 608 } 609 610 case kWhatGetSyncSettings: 611 { 612 sp<AReplyToken> replyID; 613 CHECK(msg->senderAwaitsResponse(&replyID)); 614 615 ALOGV("kWhatGetSyncSettings"); 616 AVSyncSettings sync; 617 float videoFps = -1.f; 618 status_t err = onGetSyncSettings(&sync, &videoFps); 619 sp<AMessage> response = new AMessage; 620 if (err == OK) { 621 writeToAMessage(response, sync, videoFps); 622 } 623 response->setInt32("err", err); 624 response->postReply(replyID); 625 break; 626 } 627 628 case kWhatFlush: 629 { 630 onFlush(msg); 631 break; 632 } 633 634 case kWhatDisableOffloadAudio: 635 { 636 onDisableOffloadAudio(); 637 break; 638 } 639 640 case kWhatEnableOffloadAudio: 641 { 642 onEnableOffloadAudio(); 643 break; 644 } 645 646 case kWhatPause: 647 { 648 onPause(); 649 break; 650 } 651 652 case kWhatResume: 653 { 654 onResume(); 655 break; 656 } 657 658 case kWhatSetVideoFrameRate: 659 { 660 float fps; 661 CHECK(msg->findFloat("frame-rate", &fps)); 662 onSetVideoFrameRate(fps); 663 break; 664 } 665 666 case kWhatAudioTearDown: 667 { 668 int32_t reason; 669 CHECK(msg->findInt32("reason", &reason)); 670 671 onAudioTearDown((AudioTearDownReason)reason); 672 break; 673 } 674 675 case kWhatAudioOffloadPauseTimeout: 676 { 677 int32_t generation; 678 CHECK(msg->findInt32("drainGeneration", &generation)); 679 if (generation != mAudioOffloadPauseTimeoutGeneration) { 680 break; 681 } 682 ALOGV("Audio Offload tear down due to pause timeout."); 683 onAudioTearDown(kDueToTimeout); 684 mWakeLock->release(); 685 break; 686 } 687 688 default: 689 TRESPASS(); 690 break; 691 } 692} 693 694void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 695 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 696 return; 697 } 698 699 if (mAudioQueue.empty()) { 700 return; 701 } 702 703 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 704 if (mPaused) { 705 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 706 if (diffUs > delayUs) { 707 delayUs = diffUs; 708 } 709 } 710 711 mDrainAudioQueuePending = true; 712 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 713 msg->setInt32("drainGeneration", mAudioDrainGeneration); 714 msg->post(delayUs); 715} 716 717void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 718 mAudioRenderingStartGeneration = mAudioDrainGeneration; 719 mVideoRenderingStartGeneration = mVideoDrainGeneration; 720 mRenderingDataDelivered = false; 721} 722 723void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 724 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 725 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 726 mRenderingDataDelivered = true; 727 if (mPaused) { 728 return; 729 } 730 mVideoRenderingStartGeneration = -1; 731 mAudioRenderingStartGeneration = -1; 732 733 sp<AMessage> notify = mNotify->dup(); 734 notify->setInt32("what", kWhatMediaRenderingStart); 735 notify->post(); 736 } 737} 738 739// static 740size_t NuPlayer::Renderer::AudioSinkCallback( 741 MediaPlayerBase::AudioSink * /* audioSink */, 742 void *buffer, 743 size_t size, 744 void *cookie, 745 MediaPlayerBase::AudioSink::cb_event_t event) { 746 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 747 748 switch (event) { 749 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 750 { 751 return me->fillAudioBuffer(buffer, size); 752 break; 753 } 754 755 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 756 { 757 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 758 me->notifyEOSCallback(); 759 break; 760 } 761 762 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 763 { 764 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 765 me->notifyAudioTearDown(kDueToError); 766 break; 767 } 768 } 769 770 return 0; 771} 772 773void NuPlayer::Renderer::notifyEOSCallback() { 774 Mutex::Autolock autoLock(mLock); 775 776 if (!mUseAudioCallback) { 777 return; 778 } 779 780 notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 781} 782 783size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 784 Mutex::Autolock autoLock(mLock); 785 786 if (!mUseAudioCallback) { 787 return 0; 788 } 789 790 bool hasEOS = false; 791 792 size_t sizeCopied = 0; 793 bool firstEntry = true; 794 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 795 while (sizeCopied < size && !mAudioQueue.empty()) { 796 entry = &*mAudioQueue.begin(); 797 798 if (entry->mBuffer == NULL) { // EOS 799 hasEOS = true; 800 mAudioQueue.erase(mAudioQueue.begin()); 801 break; 802 } 803 804 if (firstEntry && entry->mOffset == 0) { 805 firstEntry = false; 806 int64_t mediaTimeUs; 807 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 808 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 809 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 810 } 811 812 size_t copy = entry->mBuffer->size() - entry->mOffset; 813 size_t sizeRemaining = size - sizeCopied; 814 if (copy > sizeRemaining) { 815 copy = sizeRemaining; 816 } 817 818 memcpy((char *)buffer + sizeCopied, 819 entry->mBuffer->data() + entry->mOffset, 820 copy); 821 822 entry->mOffset += copy; 823 if (entry->mOffset == entry->mBuffer->size()) { 824 entry->mNotifyConsumed->post(); 825 mAudioQueue.erase(mAudioQueue.begin()); 826 entry = NULL; 827 } 828 sizeCopied += copy; 829 830 notifyIfMediaRenderingStarted_l(); 831 } 832 833 if (mAudioFirstAnchorTimeMediaUs >= 0) { 834 int64_t nowUs = ALooper::GetNowUs(); 835 int64_t nowMediaUs = 836 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); 837 // we don't know how much data we are queueing for offloaded tracks. 838 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 839 } 840 841 // for non-offloaded audio, we need to compute the frames written because 842 // there is no EVENT_STREAM_END notification. The frames written gives 843 // an estimate on the pending played out duration. 844 if (!offloadingAudio()) { 845 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 846 } 847 848 if (hasEOS) { 849 (new AMessage(kWhatStopAudioSink, this))->post(); 850 // As there is currently no EVENT_STREAM_END callback notification for 851 // non-offloaded audio tracks, we need to post the EOS ourselves. 852 if (!offloadingAudio()) { 853 int64_t postEOSDelayUs = 0; 854 if (mAudioSink->needsTrailingPadding()) { 855 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 856 } 857 ALOGV("fillAudioBuffer: notifyEOS " 858 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 859 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 860 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 861 } 862 } 863 return sizeCopied; 864} 865 866void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 867 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 868 bool foundEOS = false; 869 while (it != mAudioQueue.end()) { 870 int32_t eos; 871 QueueEntry *entry = &*it++; 872 if (entry->mBuffer == NULL 873 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 874 itEOS = it; 875 foundEOS = true; 876 } 877 } 878 879 if (foundEOS) { 880 // post all replies before EOS and drop the samples 881 for (it = mAudioQueue.begin(); it != itEOS; it++) { 882 if (it->mBuffer == NULL) { 883 // delay doesn't matter as we don't even have an AudioTrack 884 notifyEOS(true /* audio */, it->mFinalResult); 885 } else { 886 it->mNotifyConsumed->post(); 887 } 888 } 889 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 890 } 891} 892 893bool NuPlayer::Renderer::onDrainAudioQueue() { 894 // do not drain audio during teardown as queued buffers may be invalid. 895 if (mAudioTornDown) { 896 return false; 897 } 898 // TODO: This call to getPosition checks if AudioTrack has been created 899 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 900 // CHECKs on getPosition will fail. 901 // We still need to figure out why AudioTrack is not created when 902 // this function is called. One possible reason could be leftover 903 // audio. Another possible place is to check whether decoder 904 // has received INFO_FORMAT_CHANGED as the first buffer since 905 // AudioSink is opened there, and possible interactions with flush 906 // immediately after start. Investigate error message 907 // "vorbis_dsp_synthesis returned -135", along with RTSP. 908 uint32_t numFramesPlayed; 909 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 910 // When getPosition fails, renderer will not reschedule the draining 911 // unless new samples are queued. 912 // If we have pending EOS (or "eos" marker for discontinuities), we need 913 // to post these now as NuPlayerDecoder might be waiting for it. 914 drainAudioQueueUntilLastEOS(); 915 916 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 917 return false; 918 } 919 920#if 0 921 ssize_t numFramesAvailableToWrite = 922 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 923 924 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 925 ALOGI("audio sink underrun"); 926 } else { 927 ALOGV("audio queue has %d frames left to play", 928 mAudioSink->frameCount() - numFramesAvailableToWrite); 929 } 930#endif 931 932 uint32_t prevFramesWritten = mNumFramesWritten; 933 while (!mAudioQueue.empty()) { 934 QueueEntry *entry = &*mAudioQueue.begin(); 935 936 mLastAudioBufferDrained = entry->mBufferOrdinal; 937 938 if (entry->mBuffer == NULL) { 939 // EOS 940 int64_t postEOSDelayUs = 0; 941 if (mAudioSink->needsTrailingPadding()) { 942 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 943 } 944 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 945 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 946 947 mAudioQueue.erase(mAudioQueue.begin()); 948 entry = NULL; 949 if (mAudioSink->needsTrailingPadding()) { 950 // If we're not in gapless playback (i.e. through setNextPlayer), we 951 // need to stop the track here, because that will play out the last 952 // little bit at the end of the file. Otherwise short files won't play. 953 mAudioSink->stop(); 954 mNumFramesWritten = 0; 955 } 956 return false; 957 } 958 959 // ignore 0-sized buffer which could be EOS marker with no data 960 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 961 int64_t mediaTimeUs; 962 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 963 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 964 mediaTimeUs / 1E6); 965 onNewAudioMediaTime(mediaTimeUs); 966 } 967 968 size_t copy = entry->mBuffer->size() - entry->mOffset; 969 970 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 971 copy, false /* blocking */); 972 if (written < 0) { 973 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 974 if (written == WOULD_BLOCK) { 975 ALOGV("AudioSink write would block when writing %zu bytes", copy); 976 } else { 977 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 978 // This can only happen when AudioSink was opened with doNotReconnect flag set to 979 // true, in which case the NuPlayer will handle the reconnect. 980 notifyAudioTearDown(kDueToError); 981 } 982 break; 983 } 984 985 entry->mOffset += written; 986 size_t remainder = entry->mBuffer->size() - entry->mOffset; 987 if ((ssize_t)remainder < mAudioSink->frameSize()) { 988 if (remainder > 0) { 989 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.", 990 remainder); 991 entry->mOffset += remainder; 992 copy -= remainder; 993 } 994 995 entry->mNotifyConsumed->post(); 996 mAudioQueue.erase(mAudioQueue.begin()); 997 998 entry = NULL; 999 } 1000 1001 size_t copiedFrames = written / mAudioSink->frameSize(); 1002 mNumFramesWritten += copiedFrames; 1003 1004 { 1005 Mutex::Autolock autoLock(mLock); 1006 int64_t maxTimeMedia; 1007 maxTimeMedia = 1008 mAnchorTimeMediaUs + 1009 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 1010 * 1000LL * mAudioSink->msecsPerFrame()); 1011 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 1012 1013 notifyIfMediaRenderingStarted_l(); 1014 } 1015 1016 if (written != (ssize_t)copy) { 1017 // A short count was received from AudioSink::write() 1018 // 1019 // AudioSink write is called in non-blocking mode. 1020 // It may return with a short count when: 1021 // 1022 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are 1023 // discarded. 1024 // 2) The data to be copied exceeds the available buffer in AudioSink. 1025 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 1026 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 1027 1028 // (Case 1) 1029 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 1030 // needs to fail, as we should not carry over fractional frames between calls. 1031 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 1032 1033 // (Case 2, 3, 4) 1034 // Return early to the caller. 1035 // Beware of calling immediately again as this may busy-loop if you are not careful. 1036 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 1037 break; 1038 } 1039 } 1040 1041 // calculate whether we need to reschedule another write. 1042 bool reschedule = !mAudioQueue.empty() 1043 && (!mPaused 1044 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 1045 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 1046 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 1047 return reschedule; 1048} 1049 1050int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 1051 int32_t sampleRate = offloadingAudio() ? 1052 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 1053 if (sampleRate == 0) { 1054 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 1055 return 0; 1056 } 1057 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 1058 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 1059} 1060 1061// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 1062int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 1063 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1064 if (mUseVirtualAudioSink) { 1065 int64_t nowUs = ALooper::GetNowUs(); 1066 int64_t mediaUs; 1067 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) { 1068 return 0ll; 1069 } else { 1070 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs); 1071 } 1072 } 1073 return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs); 1074} 1075 1076int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 1077 int64_t realUs; 1078 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 1079 // If failed to get current position, e.g. due to audio clock is 1080 // not ready, then just play out video immediately without delay. 1081 return nowUs; 1082 } 1083 return realUs; 1084} 1085 1086void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 1087 Mutex::Autolock autoLock(mLock); 1088 // TRICKY: vorbis decoder generates multiple frames with the same 1089 // timestamp, so only update on the first frame with a given timestamp 1090 if (mediaTimeUs == mAnchorTimeMediaUs) { 1091 return; 1092 } 1093 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 1094 1095 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start 1096 if (mNextAudioClockUpdateTimeUs == -1) { 1097 AudioTimestamp ts; 1098 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { 1099 mNextAudioClockUpdateTimeUs = 0; // start our clock updates 1100 } 1101 } 1102 int64_t nowUs = ALooper::GetNowUs(); 1103 if (mNextAudioClockUpdateTimeUs >= 0) { 1104 if (nowUs >= mNextAudioClockUpdateTimeUs) { 1105 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 1106 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 1107 mUseVirtualAudioSink = false; 1108 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; 1109 } 1110 } else { 1111 int64_t unused; 1112 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) 1113 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) 1114 > kMaxAllowedAudioSinkDelayUs)) { 1115 // Enough data has been sent to AudioSink, but AudioSink has not rendered 1116 // any data yet. Something is wrong with AudioSink, e.g., the device is not 1117 // connected to audio out. 1118 // Switch to system clock. This essentially creates a virtual AudioSink with 1119 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). 1120 // This virtual AudioSink renders audio data starting from the very first sample 1121 // and it's paced by system clock. 1122 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); 1123 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); 1124 mUseVirtualAudioSink = true; 1125 } 1126 } 1127 mAnchorNumFramesWritten = mNumFramesWritten; 1128 mAnchorTimeMediaUs = mediaTimeUs; 1129} 1130 1131// Called without mLock acquired. 1132void NuPlayer::Renderer::postDrainVideoQueue() { 1133 if (mDrainVideoQueuePending 1134 || getSyncQueues() 1135 || (mPaused && mVideoSampleReceived)) { 1136 return; 1137 } 1138 1139 if (mVideoQueue.empty()) { 1140 return; 1141 } 1142 1143 QueueEntry &entry = *mVideoQueue.begin(); 1144 1145 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1146 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1147 1148 if (entry.mBuffer == NULL) { 1149 // EOS doesn't carry a timestamp. 1150 msg->post(); 1151 mDrainVideoQueuePending = true; 1152 return; 1153 } 1154 1155 bool needRepostDrainVideoQueue = false; 1156 int64_t delayUs; 1157 int64_t nowUs = ALooper::GetNowUs(); 1158 int64_t realTimeUs; 1159 if (mFlags & FLAG_REAL_TIME) { 1160 int64_t mediaTimeUs; 1161 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1162 realTimeUs = mediaTimeUs; 1163 } else { 1164 int64_t mediaTimeUs; 1165 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1166 1167 { 1168 Mutex::Autolock autoLock(mLock); 1169 if (mAnchorTimeMediaUs < 0) { 1170 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1171 mAnchorTimeMediaUs = mediaTimeUs; 1172 realTimeUs = nowUs; 1173 } else if (!mVideoSampleReceived) { 1174 // Always render the first video frame. 1175 realTimeUs = nowUs; 1176 } else if (mAudioFirstAnchorTimeMediaUs < 0 1177 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) { 1178 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1179 } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) { 1180 needRepostDrainVideoQueue = true; 1181 realTimeUs = nowUs; 1182 } else { 1183 realTimeUs = nowUs; 1184 } 1185 } 1186 if (!mHasAudio) { 1187 // smooth out videos >= 10fps 1188 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1189 } 1190 1191 // Heuristics to handle situation when media time changed without a 1192 // discontinuity. If we have not drained an audio buffer that was 1193 // received after this buffer, repost in 10 msec. Otherwise repost 1194 // in 500 msec. 1195 delayUs = realTimeUs - nowUs; 1196 int64_t postDelayUs = -1; 1197 if (delayUs > 500000) { 1198 postDelayUs = 500000; 1199 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1200 postDelayUs = 10000; 1201 } 1202 } else if (needRepostDrainVideoQueue) { 1203 // CHECK(mPlaybackRate > 0); 1204 // CHECK(mAudioFirstAnchorTimeMediaUs >= 0); 1205 // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0); 1206 postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs; 1207 postDelayUs /= mPlaybackRate; 1208 } 1209 1210 if (postDelayUs >= 0) { 1211 msg->setWhat(kWhatPostDrainVideoQueue); 1212 msg->post(postDelayUs); 1213 mVideoScheduler->restart(); 1214 ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock," 1215 " retrying in %dms", 1216 (int)(delayUs / 1000), (long long)mediaTimeUs, 1217 (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000)); 1218 mDrainVideoQueuePending = true; 1219 return; 1220 } 1221 } 1222 1223 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1224 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1225 1226 delayUs = realTimeUs - nowUs; 1227 1228 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1229 // post 2 display refreshes before rendering is due 1230 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1231 1232 mDrainVideoQueuePending = true; 1233} 1234 1235void NuPlayer::Renderer::onDrainVideoQueue() { 1236 if (mVideoQueue.empty()) { 1237 return; 1238 } 1239 1240 QueueEntry *entry = &*mVideoQueue.begin(); 1241 1242 if (entry->mBuffer == NULL) { 1243 // EOS 1244 1245 notifyEOS(false /* audio */, entry->mFinalResult); 1246 1247 mVideoQueue.erase(mVideoQueue.begin()); 1248 entry = NULL; 1249 1250 setVideoLateByUs(0); 1251 return; 1252 } 1253 1254 int64_t nowUs = ALooper::GetNowUs(); 1255 int64_t realTimeUs; 1256 int64_t mediaTimeUs = -1; 1257 if (mFlags & FLAG_REAL_TIME) { 1258 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1259 } else { 1260 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1261 1262 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1263 } 1264 1265 bool tooLate = false; 1266 1267 if (!mPaused) { 1268 setVideoLateByUs(nowUs - realTimeUs); 1269 tooLate = (mVideoLateByUs > 40000); 1270 1271 if (tooLate) { 1272 ALOGV("video late by %lld us (%.2f secs)", 1273 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1274 } else { 1275 int64_t mediaUs = 0; 1276 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1277 ALOGV("rendering video at media time %.2f secs", 1278 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1279 mediaUs) / 1E6); 1280 1281 if (!(mFlags & FLAG_REAL_TIME) 1282 && mLastAudioMediaTimeUs != -1 1283 && mediaTimeUs > mLastAudioMediaTimeUs) { 1284 // If audio ends before video, video continues to drive media clock. 1285 // Also smooth out videos >= 10fps. 1286 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1287 } 1288 } 1289 } else { 1290 setVideoLateByUs(0); 1291 if (!mVideoSampleReceived && !mHasAudio) { 1292 // This will ensure that the first frame after a flush won't be used as anchor 1293 // when renderer is in paused state, because resume can happen any time after seek. 1294 Mutex::Autolock autoLock(mLock); 1295 clearAnchorTime_l(); 1296 } 1297 } 1298 1299 // Always render the first video frame while keeping stats on A/V sync. 1300 if (!mVideoSampleReceived) { 1301 realTimeUs = nowUs; 1302 tooLate = false; 1303 } 1304 1305 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1306 entry->mNotifyConsumed->setInt32("render", !tooLate); 1307 entry->mNotifyConsumed->post(); 1308 mVideoQueue.erase(mVideoQueue.begin()); 1309 entry = NULL; 1310 1311 mVideoSampleReceived = true; 1312 1313 if (!mPaused) { 1314 if (!mVideoRenderingStarted) { 1315 mVideoRenderingStarted = true; 1316 notifyVideoRenderingStart(); 1317 } 1318 Mutex::Autolock autoLock(mLock); 1319 notifyIfMediaRenderingStarted_l(); 1320 } 1321} 1322 1323void NuPlayer::Renderer::notifyVideoRenderingStart() { 1324 sp<AMessage> notify = mNotify->dup(); 1325 notify->setInt32("what", kWhatVideoRenderingStart); 1326 notify->post(); 1327} 1328 1329void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1330 if (audio && delayUs > 0) { 1331 sp<AMessage> msg = new AMessage(kWhatEOS, this); 1332 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); 1333 msg->setInt32("finalResult", finalResult); 1334 msg->post(delayUs); 1335 return; 1336 } 1337 sp<AMessage> notify = mNotify->dup(); 1338 notify->setInt32("what", kWhatEOS); 1339 notify->setInt32("audio", static_cast<int32_t>(audio)); 1340 notify->setInt32("finalResult", finalResult); 1341 notify->post(delayUs); 1342} 1343 1344void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) { 1345 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this); 1346 msg->setInt32("reason", reason); 1347 msg->post(); 1348} 1349 1350void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1351 int32_t audio; 1352 CHECK(msg->findInt32("audio", &audio)); 1353 1354 if (dropBufferIfStale(audio, msg)) { 1355 return; 1356 } 1357 1358 if (audio) { 1359 mHasAudio = true; 1360 } else { 1361 mHasVideo = true; 1362 } 1363 1364 if (mHasVideo) { 1365 if (mVideoScheduler == NULL) { 1366 mVideoScheduler = new VideoFrameScheduler(); 1367 mVideoScheduler->init(); 1368 } 1369 } 1370 1371 sp<RefBase> obj; 1372 CHECK(msg->findObject("buffer", &obj)); 1373 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get()); 1374 1375 sp<AMessage> notifyConsumed; 1376 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1377 1378 QueueEntry entry; 1379 entry.mBuffer = buffer; 1380 entry.mNotifyConsumed = notifyConsumed; 1381 entry.mOffset = 0; 1382 entry.mFinalResult = OK; 1383 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1384 1385 if (audio) { 1386 Mutex::Autolock autoLock(mLock); 1387 mAudioQueue.push_back(entry); 1388 postDrainAudioQueue_l(); 1389 } else { 1390 mVideoQueue.push_back(entry); 1391 postDrainVideoQueue(); 1392 } 1393 1394 Mutex::Autolock autoLock(mLock); 1395 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1396 return; 1397 } 1398 1399 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1400 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1401 1402 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1403 // EOS signalled on either queue. 1404 syncQueuesDone_l(); 1405 return; 1406 } 1407 1408 int64_t firstAudioTimeUs; 1409 int64_t firstVideoTimeUs; 1410 CHECK(firstAudioBuffer->meta() 1411 ->findInt64("timeUs", &firstAudioTimeUs)); 1412 CHECK(firstVideoBuffer->meta() 1413 ->findInt64("timeUs", &firstVideoTimeUs)); 1414 1415 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1416 1417 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1418 1419 if (diff > 100000ll) { 1420 // Audio data starts More than 0.1 secs before video. 1421 // Drop some audio. 1422 1423 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1424 mAudioQueue.erase(mAudioQueue.begin()); 1425 return; 1426 } 1427 1428 syncQueuesDone_l(); 1429} 1430 1431void NuPlayer::Renderer::syncQueuesDone_l() { 1432 if (!mSyncQueues) { 1433 return; 1434 } 1435 1436 mSyncQueues = false; 1437 1438 if (!mAudioQueue.empty()) { 1439 postDrainAudioQueue_l(); 1440 } 1441 1442 if (!mVideoQueue.empty()) { 1443 mLock.unlock(); 1444 postDrainVideoQueue(); 1445 mLock.lock(); 1446 } 1447} 1448 1449void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1450 int32_t audio; 1451 CHECK(msg->findInt32("audio", &audio)); 1452 1453 if (dropBufferIfStale(audio, msg)) { 1454 return; 1455 } 1456 1457 int32_t finalResult; 1458 CHECK(msg->findInt32("finalResult", &finalResult)); 1459 1460 QueueEntry entry; 1461 entry.mOffset = 0; 1462 entry.mFinalResult = finalResult; 1463 1464 if (audio) { 1465 Mutex::Autolock autoLock(mLock); 1466 if (mAudioQueue.empty() && mSyncQueues) { 1467 syncQueuesDone_l(); 1468 } 1469 mAudioQueue.push_back(entry); 1470 postDrainAudioQueue_l(); 1471 } else { 1472 if (mVideoQueue.empty() && getSyncQueues()) { 1473 Mutex::Autolock autoLock(mLock); 1474 syncQueuesDone_l(); 1475 } 1476 mVideoQueue.push_back(entry); 1477 postDrainVideoQueue(); 1478 } 1479} 1480 1481void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1482 int32_t audio, notifyComplete; 1483 CHECK(msg->findInt32("audio", &audio)); 1484 1485 { 1486 Mutex::Autolock autoLock(mLock); 1487 if (audio) { 1488 notifyComplete = mNotifyCompleteAudio; 1489 mNotifyCompleteAudio = false; 1490 mLastAudioMediaTimeUs = -1; 1491 } else { 1492 notifyComplete = mNotifyCompleteVideo; 1493 mNotifyCompleteVideo = false; 1494 } 1495 1496 // If we're currently syncing the queues, i.e. dropping audio while 1497 // aligning the first audio/video buffer times and only one of the 1498 // two queues has data, we may starve that queue by not requesting 1499 // more buffers from the decoder. If the other source then encounters 1500 // a discontinuity that leads to flushing, we'll never find the 1501 // corresponding discontinuity on the other queue. 1502 // Therefore we'll stop syncing the queues if at least one of them 1503 // is flushed. 1504 syncQueuesDone_l(); 1505 clearAnchorTime_l(); 1506 } 1507 1508 ALOGV("flushing %s", audio ? "audio" : "video"); 1509 if (audio) { 1510 { 1511 Mutex::Autolock autoLock(mLock); 1512 flushQueue(&mAudioQueue); 1513 1514 ++mAudioDrainGeneration; 1515 ++mAudioEOSGeneration; 1516 prepareForMediaRenderingStart_l(); 1517 1518 // the frame count will be reset after flush. 1519 clearAudioFirstAnchorTime_l(); 1520 } 1521 1522 mDrainAudioQueuePending = false; 1523 1524 if (offloadingAudio()) { 1525 mAudioSink->pause(); 1526 mAudioSink->flush(); 1527 if (!mPaused) { 1528 mAudioSink->start(); 1529 } 1530 } else { 1531 mAudioSink->pause(); 1532 mAudioSink->flush(); 1533 // Call stop() to signal to the AudioSink to completely fill the 1534 // internal buffer before resuming playback. 1535 // FIXME: this is ignored after flush(). 1536 mAudioSink->stop(); 1537 if (mPaused) { 1538 // Race condition: if renderer is paused and audio sink is stopped, 1539 // we need to make sure that the audio track buffer fully drains 1540 // before delivering data. 1541 // FIXME: remove this if we can detect if stop() is complete. 1542 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1543 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1544 } else { 1545 mAudioSink->start(); 1546 } 1547 mNumFramesWritten = 0; 1548 } 1549 mNextAudioClockUpdateTimeUs = -1; 1550 } else { 1551 flushQueue(&mVideoQueue); 1552 1553 mDrainVideoQueuePending = false; 1554 1555 if (mVideoScheduler != NULL) { 1556 mVideoScheduler->restart(); 1557 } 1558 1559 Mutex::Autolock autoLock(mLock); 1560 ++mVideoDrainGeneration; 1561 prepareForMediaRenderingStart_l(); 1562 } 1563 1564 mVideoSampleReceived = false; 1565 1566 if (notifyComplete) { 1567 notifyFlushComplete(audio); 1568 } 1569} 1570 1571void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1572 while (!queue->empty()) { 1573 QueueEntry *entry = &*queue->begin(); 1574 1575 if (entry->mBuffer != NULL) { 1576 entry->mNotifyConsumed->post(); 1577 } 1578 1579 queue->erase(queue->begin()); 1580 entry = NULL; 1581 } 1582} 1583 1584void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1585 sp<AMessage> notify = mNotify->dup(); 1586 notify->setInt32("what", kWhatFlushComplete); 1587 notify->setInt32("audio", static_cast<int32_t>(audio)); 1588 notify->post(); 1589} 1590 1591bool NuPlayer::Renderer::dropBufferIfStale( 1592 bool audio, const sp<AMessage> &msg) { 1593 int32_t queueGeneration; 1594 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1595 1596 if (queueGeneration == getQueueGeneration(audio)) { 1597 return false; 1598 } 1599 1600 sp<AMessage> notifyConsumed; 1601 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1602 notifyConsumed->post(); 1603 } 1604 1605 return true; 1606} 1607 1608void NuPlayer::Renderer::onAudioSinkChanged() { 1609 if (offloadingAudio()) { 1610 return; 1611 } 1612 CHECK(!mDrainAudioQueuePending); 1613 mNumFramesWritten = 0; 1614 { 1615 Mutex::Autolock autoLock(mLock); 1616 mAnchorNumFramesWritten = -1; 1617 } 1618 uint32_t written; 1619 if (mAudioSink->getFramesWritten(&written) == OK) { 1620 mNumFramesWritten = written; 1621 } 1622} 1623 1624void NuPlayer::Renderer::onDisableOffloadAudio() { 1625 Mutex::Autolock autoLock(mLock); 1626 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1627 ++mAudioDrainGeneration; 1628 if (mAudioRenderingStartGeneration != -1) { 1629 prepareForMediaRenderingStart_l(); 1630 } 1631} 1632 1633void NuPlayer::Renderer::onEnableOffloadAudio() { 1634 Mutex::Autolock autoLock(mLock); 1635 mFlags |= FLAG_OFFLOAD_AUDIO; 1636 ++mAudioDrainGeneration; 1637 if (mAudioRenderingStartGeneration != -1) { 1638 prepareForMediaRenderingStart_l(); 1639 } 1640} 1641 1642void NuPlayer::Renderer::onPause() { 1643 if (mPaused) { 1644 return; 1645 } 1646 1647 { 1648 Mutex::Autolock autoLock(mLock); 1649 // we do not increment audio drain generation so that we fill audio buffer during pause. 1650 ++mVideoDrainGeneration; 1651 prepareForMediaRenderingStart_l(); 1652 mPaused = true; 1653 mMediaClock->setPlaybackRate(0.0); 1654 } 1655 1656 mDrainAudioQueuePending = false; 1657 mDrainVideoQueuePending = false; 1658 1659 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1660 mAudioSink->pause(); 1661 startAudioOffloadPauseTimeout(); 1662 1663 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1664 mAudioQueue.size(), mVideoQueue.size()); 1665} 1666 1667void NuPlayer::Renderer::onResume() { 1668 if (!mPaused) { 1669 return; 1670 } 1671 1672 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1673 cancelAudioOffloadPauseTimeout(); 1674 if (mAudioSink->ready()) { 1675 status_t err = mAudioSink->start(); 1676 if (err != OK) { 1677 ALOGE("cannot start AudioSink err %d", err); 1678 notifyAudioTearDown(kDueToError); 1679 } 1680 } 1681 1682 { 1683 Mutex::Autolock autoLock(mLock); 1684 mPaused = false; 1685 // rendering started message may have been delayed if we were paused. 1686 if (mRenderingDataDelivered) { 1687 notifyIfMediaRenderingStarted_l(); 1688 } 1689 // configure audiosink as we did not do it when pausing 1690 if (mAudioSink != NULL && mAudioSink->ready()) { 1691 mAudioSink->setPlaybackRate(mPlaybackSettings); 1692 } 1693 1694 mMediaClock->setPlaybackRate(mPlaybackRate); 1695 1696 if (!mAudioQueue.empty()) { 1697 postDrainAudioQueue_l(); 1698 } 1699 } 1700 1701 if (!mVideoQueue.empty()) { 1702 postDrainVideoQueue(); 1703 } 1704} 1705 1706void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1707 if (mVideoScheduler == NULL) { 1708 mVideoScheduler = new VideoFrameScheduler(); 1709 } 1710 mVideoScheduler->init(fps); 1711} 1712 1713int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1714 Mutex::Autolock autoLock(mLock); 1715 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1716} 1717 1718int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1719 Mutex::Autolock autoLock(mLock); 1720 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1721} 1722 1723bool NuPlayer::Renderer::getSyncQueues() { 1724 Mutex::Autolock autoLock(mLock); 1725 return mSyncQueues; 1726} 1727 1728void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1729 if (mAudioTornDown) { 1730 return; 1731 } 1732 mAudioTornDown = true; 1733 1734 int64_t currentPositionUs; 1735 sp<AMessage> notify = mNotify->dup(); 1736 if (getCurrentPosition(¤tPositionUs) == OK) { 1737 notify->setInt64("positionUs", currentPositionUs); 1738 } 1739 1740 mAudioSink->stop(); 1741 mAudioSink->flush(); 1742 1743 notify->setInt32("what", kWhatAudioTearDown); 1744 notify->setInt32("reason", reason); 1745 notify->post(); 1746} 1747 1748void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1749 if (offloadingAudio()) { 1750 mWakeLock->acquire(); 1751 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1752 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1753 msg->post(kOffloadPauseMaxUs); 1754 } 1755} 1756 1757void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1758 // We may have called startAudioOffloadPauseTimeout() without 1759 // the AudioSink open and with offloadingAudio enabled. 1760 // 1761 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless 1762 // we always release the wakelock and increment the pause timeout generation. 1763 // 1764 // Note: The acquired wakelock prevents the device from suspending 1765 // immediately after offload pause (in case a resume happens shortly thereafter). 1766 mWakeLock->release(true); 1767 ++mAudioOffloadPauseTimeoutGeneration; 1768} 1769 1770status_t NuPlayer::Renderer::onOpenAudioSink( 1771 const sp<AMessage> &format, 1772 bool offloadOnly, 1773 bool hasVideo, 1774 uint32_t flags) { 1775 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1776 offloadOnly, offloadingAudio()); 1777 bool audioSinkChanged = false; 1778 1779 int32_t numChannels; 1780 CHECK(format->findInt32("channel-count", &numChannels)); 1781 1782 int32_t channelMask; 1783 if (!format->findInt32("channel-mask", &channelMask)) { 1784 // signal to the AudioSink to derive the mask from count. 1785 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1786 } 1787 1788 int32_t sampleRate; 1789 CHECK(format->findInt32("sample-rate", &sampleRate)); 1790 1791 if (offloadingAudio()) { 1792 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1793 AString mime; 1794 CHECK(format->findString("mime", &mime)); 1795 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1796 1797 if (err != OK) { 1798 ALOGE("Couldn't map mime \"%s\" to a valid " 1799 "audio_format", mime.c_str()); 1800 onDisableOffloadAudio(); 1801 } else { 1802 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1803 mime.c_str(), audioFormat); 1804 1805 int avgBitRate = -1; 1806 format->findInt32("bitrate", &avgBitRate); 1807 1808 int32_t aacProfile = -1; 1809 if (audioFormat == AUDIO_FORMAT_AAC 1810 && format->findInt32("aac-profile", &aacProfile)) { 1811 // Redefine AAC format as per aac profile 1812 mapAACProfileToAudioFormat( 1813 audioFormat, 1814 aacProfile); 1815 } 1816 1817 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1818 offloadInfo.duration_us = -1; 1819 format->findInt64( 1820 "durationUs", &offloadInfo.duration_us); 1821 offloadInfo.sample_rate = sampleRate; 1822 offloadInfo.channel_mask = channelMask; 1823 offloadInfo.format = audioFormat; 1824 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1825 offloadInfo.bit_rate = avgBitRate; 1826 offloadInfo.has_video = hasVideo; 1827 offloadInfo.is_streaming = true; 1828 1829 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1830 ALOGV("openAudioSink: no change in offload mode"); 1831 // no change from previous configuration, everything ok. 1832 return OK; 1833 } 1834 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1835 1836 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1837 uint32_t offloadFlags = flags; 1838 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1839 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1840 audioSinkChanged = true; 1841 mAudioSink->close(); 1842 1843 err = mAudioSink->open( 1844 sampleRate, 1845 numChannels, 1846 (audio_channel_mask_t)channelMask, 1847 audioFormat, 1848 0 /* bufferCount - unused */, 1849 &NuPlayer::Renderer::AudioSinkCallback, 1850 this, 1851 (audio_output_flags_t)offloadFlags, 1852 &offloadInfo); 1853 1854 if (err == OK) { 1855 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1856 } 1857 1858 if (err == OK) { 1859 // If the playback is offloaded to h/w, we pass 1860 // the HAL some metadata information. 1861 // We don't want to do this for PCM because it 1862 // will be going through the AudioFlinger mixer 1863 // before reaching the hardware. 1864 // TODO 1865 mCurrentOffloadInfo = offloadInfo; 1866 if (!mPaused) { // for preview mode, don't start if paused 1867 err = mAudioSink->start(); 1868 } 1869 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1870 } 1871 if (err != OK) { 1872 // Clean up, fall back to non offload mode. 1873 mAudioSink->close(); 1874 onDisableOffloadAudio(); 1875 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1876 ALOGV("openAudioSink: offload failed"); 1877 if (offloadOnly) { 1878 notifyAudioTearDown(kForceNonOffload); 1879 } 1880 } else { 1881 mUseAudioCallback = true; // offload mode transfers data through callback 1882 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1883 } 1884 } 1885 } 1886 if (!offloadOnly && !offloadingAudio()) { 1887 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1888 uint32_t pcmFlags = flags; 1889 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1890 1891 const PcmInfo info = { 1892 (audio_channel_mask_t)channelMask, 1893 (audio_output_flags_t)pcmFlags, 1894 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1895 numChannels, 1896 sampleRate 1897 }; 1898 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1899 ALOGV("openAudioSink: no change in pcm mode"); 1900 // no change from previous configuration, everything ok. 1901 return OK; 1902 } 1903 1904 audioSinkChanged = true; 1905 mAudioSink->close(); 1906 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1907 // Note: It is possible to set up the callback, but not use it to send audio data. 1908 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1909 mUseAudioCallback = getUseAudioCallbackSetting(); 1910 if (mUseAudioCallback) { 1911 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1912 } 1913 1914 // Compute the desired buffer size. 1915 // For callback mode, the amount of time before wakeup is about half the buffer size. 1916 const uint32_t frameCount = 1917 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1918 1919 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1920 // AudioSink. We don't want this when there's video because it will cause a video seek to 1921 // the previous I frame. But we do want this when there's only audio because it will give 1922 // NuPlayer a chance to switch from non-offload mode to offload mode. 1923 // So we only set doNotReconnect when there's no video. 1924 const bool doNotReconnect = !hasVideo; 1925 1926 // We should always be able to set our playback settings if the sink is closed. 1927 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK, 1928 "onOpenAudioSink: can't set playback rate on closed sink"); 1929 status_t err = mAudioSink->open( 1930 sampleRate, 1931 numChannels, 1932 (audio_channel_mask_t)channelMask, 1933 AUDIO_FORMAT_PCM_16_BIT, 1934 0 /* bufferCount - unused */, 1935 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 1936 mUseAudioCallback ? this : NULL, 1937 (audio_output_flags_t)pcmFlags, 1938 NULL, 1939 doNotReconnect, 1940 frameCount); 1941 if (err != OK) { 1942 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 1943 mAudioSink->close(); 1944 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1945 return err; 1946 } 1947 mCurrentPcmInfo = info; 1948 if (!mPaused) { // for preview mode, don't start if paused 1949 mAudioSink->start(); 1950 } 1951 } 1952 if (audioSinkChanged) { 1953 onAudioSinkChanged(); 1954 } 1955 mAudioTornDown = false; 1956 return OK; 1957} 1958 1959void NuPlayer::Renderer::onCloseAudioSink() { 1960 mAudioSink->close(); 1961 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1962 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1963} 1964 1965} // namespace android 1966 1967