NuPlayerRenderer.cpp revision 4c74fde2ef5b582196b296a8263cd39143e7abca
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <algorithm> 23#include <cutils/properties.h> 24#include <media/stagefright/foundation/ABuffer.h> 25#include <media/stagefright/foundation/ADebug.h> 26#include <media/stagefright/foundation/AMessage.h> 27#include <media/stagefright/foundation/AUtils.h> 28#include <media/stagefright/foundation/AWakeLock.h> 29#include <media/stagefright/MediaClock.h> 30#include <media/stagefright/MediaErrors.h> 31#include <media/stagefright/MetaData.h> 32#include <media/stagefright/Utils.h> 33#include <media/stagefright/VideoFrameScheduler.h> 34 35#include <inttypes.h> 36 37namespace android { 38 39/* 40 * Example of common configuration settings in shell script form 41 42 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 43 adb shell setprop audio.offload.disable 1 44 45 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 46 adb shell setprop audio.offload.video 1 47 48 #Use audio callbacks for PCM data 49 adb shell setprop media.stagefright.audio.cbk 1 50 51 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 52 adb shell setprop media.stagefright.audio.deep 1 53 54 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 55 adb shell setprop media.stagefright.audio.sink 1000 56 57 * These configurations take effect for the next track played (not the current track). 58 */ 59 60static inline bool getUseAudioCallbackSetting() { 61 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 62} 63 64static inline int32_t getAudioSinkPcmMsSetting() { 65 return property_get_int32( 66 "media.stagefright.audio.sink", 500 /* default_value */); 67} 68 69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 70// is closed to allow the audio DSP to power down. 71static const int64_t kOffloadPauseMaxUs = 10000000ll; 72 73// Maximum allowed delay from AudioSink, 1.5 seconds. 74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll; 75 76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; 77 78// static 79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 80 AUDIO_CHANNEL_NONE, 81 AUDIO_OUTPUT_FLAG_NONE, 82 AUDIO_FORMAT_INVALID, 83 0, // mNumChannels 84 0 // mSampleRate 85}; 86 87// static 88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 89 90NuPlayer::Renderer::Renderer( 91 const sp<MediaPlayerBase::AudioSink> &sink, 92 const sp<AMessage> ¬ify, 93 uint32_t flags) 94 : mAudioSink(sink), 95 mUseVirtualAudioSink(false), 96 mNotify(notify), 97 mFlags(flags), 98 mNumFramesWritten(0), 99 mDrainAudioQueuePending(false), 100 mDrainVideoQueuePending(false), 101 mAudioQueueGeneration(0), 102 mVideoQueueGeneration(0), 103 mAudioDrainGeneration(0), 104 mVideoDrainGeneration(0), 105 mAudioEOSGeneration(0), 106 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 107 mAudioFirstAnchorTimeMediaUs(-1), 108 mAnchorTimeMediaUs(-1), 109 mAnchorNumFramesWritten(-1), 110 mVideoLateByUs(0ll), 111 mHasAudio(false), 112 mHasVideo(false), 113 mNotifyCompleteAudio(false), 114 mNotifyCompleteVideo(false), 115 mSyncQueues(false), 116 mPaused(false), 117 mPauseDrainAudioAllowedUs(0), 118 mVideoSampleReceived(false), 119 mVideoRenderingStarted(false), 120 mVideoRenderingStartGeneration(0), 121 mAudioRenderingStartGeneration(0), 122 mRenderingDataDelivered(false), 123 mNextAudioClockUpdateTimeUs(-1), 124 mLastAudioMediaTimeUs(-1), 125 mAudioOffloadPauseTimeoutGeneration(0), 126 mAudioTornDown(false), 127 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 128 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 129 mTotalBuffersQueued(0), 130 mLastAudioBufferDrained(0), 131 mUseAudioCallback(false), 132 mWakeLock(new AWakeLock()) { 133 mMediaClock = new MediaClock; 134 mPlaybackRate = mPlaybackSettings.mSpeed; 135 mMediaClock->setPlaybackRate(mPlaybackRate); 136} 137 138NuPlayer::Renderer::~Renderer() { 139 if (offloadingAudio()) { 140 mAudioSink->stop(); 141 mAudioSink->flush(); 142 mAudioSink->close(); 143 } 144 145 // Try to avoid racing condition in case callback is still on. 146 Mutex::Autolock autoLock(mLock); 147 mUseAudioCallback = false; 148 flushQueue(&mAudioQueue); 149 flushQueue(&mVideoQueue); 150 mWakeLock.clear(); 151 mMediaClock.clear(); 152 mVideoScheduler.clear(); 153 mNotify.clear(); 154 mAudioSink.clear(); 155} 156 157void NuPlayer::Renderer::queueBuffer( 158 bool audio, 159 const sp<ABuffer> &buffer, 160 const sp<AMessage> ¬ifyConsumed) { 161 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 162 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 163 msg->setInt32("audio", static_cast<int32_t>(audio)); 164 msg->setBuffer("buffer", buffer); 165 msg->setMessage("notifyConsumed", notifyConsumed); 166 msg->post(); 167} 168 169void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 170 CHECK_NE(finalResult, (status_t)OK); 171 172 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 173 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 174 msg->setInt32("audio", static_cast<int32_t>(audio)); 175 msg->setInt32("finalResult", finalResult); 176 msg->post(); 177} 178 179status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 180 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 181 writeToAMessage(msg, rate); 182 sp<AMessage> response; 183 status_t err = msg->postAndAwaitResponse(&response); 184 if (err == OK && response != NULL) { 185 CHECK(response->findInt32("err", &err)); 186 } 187 return err; 188} 189 190status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 191 if (rate.mSpeed == 0.f) { 192 onPause(); 193 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 194 // have to correspond to the any non-0 speed (e.g old speed). Keep 195 // settings nonetheless, using the old speed, in case audiosink changes. 196 AudioPlaybackRate newRate = rate; 197 newRate.mSpeed = mPlaybackSettings.mSpeed; 198 mPlaybackSettings = newRate; 199 return OK; 200 } 201 202 if (mAudioSink != NULL && mAudioSink->ready()) { 203 status_t err = mAudioSink->setPlaybackRate(rate); 204 if (err != OK) { 205 return err; 206 } 207 } 208 mPlaybackSettings = rate; 209 mPlaybackRate = rate.mSpeed; 210 mMediaClock->setPlaybackRate(mPlaybackRate); 211 return OK; 212} 213 214status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 215 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 216 sp<AMessage> response; 217 status_t err = msg->postAndAwaitResponse(&response); 218 if (err == OK && response != NULL) { 219 CHECK(response->findInt32("err", &err)); 220 if (err == OK) { 221 readFromAMessage(response, rate); 222 } 223 } 224 return err; 225} 226 227status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 228 if (mAudioSink != NULL && mAudioSink->ready()) { 229 status_t err = mAudioSink->getPlaybackRate(rate); 230 if (err == OK) { 231 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 232 ALOGW("correcting mismatch in internal/external playback rate"); 233 } 234 // get playback settings used by audiosink, as it may be 235 // slightly off due to audiosink not taking small changes. 236 mPlaybackSettings = *rate; 237 if (mPaused) { 238 rate->mSpeed = 0.f; 239 } 240 } 241 return err; 242 } 243 *rate = mPlaybackSettings; 244 return OK; 245} 246 247status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 248 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 249 writeToAMessage(msg, sync, videoFpsHint); 250 sp<AMessage> response; 251 status_t err = msg->postAndAwaitResponse(&response); 252 if (err == OK && response != NULL) { 253 CHECK(response->findInt32("err", &err)); 254 } 255 return err; 256} 257 258status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 259 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 260 return BAD_VALUE; 261 } 262 // TODO: support sync sources 263 return INVALID_OPERATION; 264} 265 266status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 267 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 268 sp<AMessage> response; 269 status_t err = msg->postAndAwaitResponse(&response); 270 if (err == OK && response != NULL) { 271 CHECK(response->findInt32("err", &err)); 272 if (err == OK) { 273 readFromAMessage(response, sync, videoFps); 274 } 275 } 276 return err; 277} 278 279status_t NuPlayer::Renderer::onGetSyncSettings( 280 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 281 *sync = mSyncSettings; 282 *videoFps = -1.f; 283 return OK; 284} 285 286void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 287 { 288 Mutex::Autolock autoLock(mLock); 289 if (audio) { 290 mNotifyCompleteAudio |= notifyComplete; 291 clearAudioFirstAnchorTime_l(); 292 ++mAudioQueueGeneration; 293 ++mAudioDrainGeneration; 294 } else { 295 mNotifyCompleteVideo |= notifyComplete; 296 ++mVideoQueueGeneration; 297 ++mVideoDrainGeneration; 298 } 299 300 clearAnchorTime_l(); 301 mVideoLateByUs = 0; 302 mSyncQueues = false; 303 } 304 305 sp<AMessage> msg = new AMessage(kWhatFlush, this); 306 msg->setInt32("audio", static_cast<int32_t>(audio)); 307 msg->post(); 308} 309 310void NuPlayer::Renderer::signalTimeDiscontinuity() { 311} 312 313void NuPlayer::Renderer::signalDisableOffloadAudio() { 314 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 315} 316 317void NuPlayer::Renderer::signalEnableOffloadAudio() { 318 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 319} 320 321void NuPlayer::Renderer::pause() { 322 (new AMessage(kWhatPause, this))->post(); 323} 324 325void NuPlayer::Renderer::resume() { 326 (new AMessage(kWhatResume, this))->post(); 327} 328 329void NuPlayer::Renderer::setVideoFrameRate(float fps) { 330 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 331 msg->setFloat("frame-rate", fps); 332 msg->post(); 333} 334 335// Called on any threads without mLock acquired. 336status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 337 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 338 if (result == OK) { 339 return result; 340 } 341 342 // MediaClock has not started yet. Try to start it if possible. 343 { 344 Mutex::Autolock autoLock(mLock); 345 if (mAudioFirstAnchorTimeMediaUs == -1) { 346 return result; 347 } 348 349 AudioTimestamp ts; 350 status_t res = mAudioSink->getTimestamp(ts); 351 if (res != OK) { 352 return result; 353 } 354 355 // AudioSink has rendered some frames. 356 int64_t nowUs = ALooper::GetNowUs(); 357 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) 358 + mAudioFirstAnchorTimeMediaUs; 359 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); 360 } 361 362 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 363} 364 365void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 366 mAudioFirstAnchorTimeMediaUs = -1; 367 mMediaClock->setStartingTimeMedia(-1); 368} 369 370void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 371 if (mAudioFirstAnchorTimeMediaUs == -1) { 372 mAudioFirstAnchorTimeMediaUs = mediaUs; 373 mMediaClock->setStartingTimeMedia(mediaUs); 374 } 375} 376 377void NuPlayer::Renderer::clearAnchorTime_l() { 378 mMediaClock->clearAnchor(); 379 mAnchorTimeMediaUs = -1; 380 mAnchorNumFramesWritten = -1; 381} 382 383void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 384 Mutex::Autolock autoLock(mLock); 385 mVideoLateByUs = lateUs; 386} 387 388int64_t NuPlayer::Renderer::getVideoLateByUs() { 389 Mutex::Autolock autoLock(mLock); 390 return mVideoLateByUs; 391} 392 393status_t NuPlayer::Renderer::openAudioSink( 394 const sp<AMessage> &format, 395 bool offloadOnly, 396 bool hasVideo, 397 uint32_t flags, 398 bool *isOffloaded) { 399 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 400 msg->setMessage("format", format); 401 msg->setInt32("offload-only", offloadOnly); 402 msg->setInt32("has-video", hasVideo); 403 msg->setInt32("flags", flags); 404 405 sp<AMessage> response; 406 msg->postAndAwaitResponse(&response); 407 408 int32_t err; 409 if (!response->findInt32("err", &err)) { 410 err = INVALID_OPERATION; 411 } else if (err == OK && isOffloaded != NULL) { 412 int32_t offload; 413 CHECK(response->findInt32("offload", &offload)); 414 *isOffloaded = (offload != 0); 415 } 416 return err; 417} 418 419void NuPlayer::Renderer::closeAudioSink() { 420 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 421 422 sp<AMessage> response; 423 msg->postAndAwaitResponse(&response); 424} 425 426void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 427 switch (msg->what()) { 428 case kWhatOpenAudioSink: 429 { 430 sp<AMessage> format; 431 CHECK(msg->findMessage("format", &format)); 432 433 int32_t offloadOnly; 434 CHECK(msg->findInt32("offload-only", &offloadOnly)); 435 436 int32_t hasVideo; 437 CHECK(msg->findInt32("has-video", &hasVideo)); 438 439 uint32_t flags; 440 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 441 442 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags); 443 444 sp<AMessage> response = new AMessage; 445 response->setInt32("err", err); 446 response->setInt32("offload", offloadingAudio()); 447 448 sp<AReplyToken> replyID; 449 CHECK(msg->senderAwaitsResponse(&replyID)); 450 response->postReply(replyID); 451 452 break; 453 } 454 455 case kWhatCloseAudioSink: 456 { 457 sp<AReplyToken> replyID; 458 CHECK(msg->senderAwaitsResponse(&replyID)); 459 460 onCloseAudioSink(); 461 462 sp<AMessage> response = new AMessage; 463 response->postReply(replyID); 464 break; 465 } 466 467 case kWhatStopAudioSink: 468 { 469 mAudioSink->stop(); 470 break; 471 } 472 473 case kWhatDrainAudioQueue: 474 { 475 mDrainAudioQueuePending = false; 476 477 int32_t generation; 478 CHECK(msg->findInt32("drainGeneration", &generation)); 479 if (generation != getDrainGeneration(true /* audio */)) { 480 break; 481 } 482 483 if (onDrainAudioQueue()) { 484 uint32_t numFramesPlayed; 485 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 486 (status_t)OK); 487 488 uint32_t numFramesPendingPlayout = 489 mNumFramesWritten - numFramesPlayed; 490 491 // This is how long the audio sink will have data to 492 // play back. 493 int64_t delayUs = 494 mAudioSink->msecsPerFrame() 495 * numFramesPendingPlayout * 1000ll; 496 if (mPlaybackRate > 1.0f) { 497 delayUs /= mPlaybackRate; 498 } 499 500 // Let's give it more data after about half that time 501 // has elapsed. 502 delayUs /= 2; 503 // check the buffer size to estimate maximum delay permitted. 504 const int64_t maxDrainDelayUs = std::max( 505 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */); 506 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld", 507 (long long)delayUs, (long long)maxDrainDelayUs); 508 Mutex::Autolock autoLock(mLock); 509 postDrainAudioQueue_l(delayUs); 510 } 511 break; 512 } 513 514 case kWhatDrainVideoQueue: 515 { 516 int32_t generation; 517 CHECK(msg->findInt32("drainGeneration", &generation)); 518 if (generation != getDrainGeneration(false /* audio */)) { 519 break; 520 } 521 522 mDrainVideoQueuePending = false; 523 524 onDrainVideoQueue(); 525 526 postDrainVideoQueue(); 527 break; 528 } 529 530 case kWhatPostDrainVideoQueue: 531 { 532 int32_t generation; 533 CHECK(msg->findInt32("drainGeneration", &generation)); 534 if (generation != getDrainGeneration(false /* audio */)) { 535 break; 536 } 537 538 mDrainVideoQueuePending = false; 539 postDrainVideoQueue(); 540 break; 541 } 542 543 case kWhatQueueBuffer: 544 { 545 onQueueBuffer(msg); 546 break; 547 } 548 549 case kWhatQueueEOS: 550 { 551 onQueueEOS(msg); 552 break; 553 } 554 555 case kWhatEOS: 556 { 557 int32_t generation; 558 CHECK(msg->findInt32("audioEOSGeneration", &generation)); 559 if (generation != mAudioEOSGeneration) { 560 break; 561 } 562 status_t finalResult; 563 CHECK(msg->findInt32("finalResult", &finalResult)); 564 notifyEOS(true /* audio */, finalResult); 565 break; 566 } 567 568 case kWhatConfigPlayback: 569 { 570 sp<AReplyToken> replyID; 571 CHECK(msg->senderAwaitsResponse(&replyID)); 572 AudioPlaybackRate rate; 573 readFromAMessage(msg, &rate); 574 status_t err = onConfigPlayback(rate); 575 sp<AMessage> response = new AMessage; 576 response->setInt32("err", err); 577 response->postReply(replyID); 578 break; 579 } 580 581 case kWhatGetPlaybackSettings: 582 { 583 sp<AReplyToken> replyID; 584 CHECK(msg->senderAwaitsResponse(&replyID)); 585 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 586 status_t err = onGetPlaybackSettings(&rate); 587 sp<AMessage> response = new AMessage; 588 if (err == OK) { 589 writeToAMessage(response, rate); 590 } 591 response->setInt32("err", err); 592 response->postReply(replyID); 593 break; 594 } 595 596 case kWhatConfigSync: 597 { 598 sp<AReplyToken> replyID; 599 CHECK(msg->senderAwaitsResponse(&replyID)); 600 AVSyncSettings sync; 601 float videoFpsHint; 602 readFromAMessage(msg, &sync, &videoFpsHint); 603 status_t err = onConfigSync(sync, videoFpsHint); 604 sp<AMessage> response = new AMessage; 605 response->setInt32("err", err); 606 response->postReply(replyID); 607 break; 608 } 609 610 case kWhatGetSyncSettings: 611 { 612 sp<AReplyToken> replyID; 613 CHECK(msg->senderAwaitsResponse(&replyID)); 614 615 ALOGV("kWhatGetSyncSettings"); 616 AVSyncSettings sync; 617 float videoFps = -1.f; 618 status_t err = onGetSyncSettings(&sync, &videoFps); 619 sp<AMessage> response = new AMessage; 620 if (err == OK) { 621 writeToAMessage(response, sync, videoFps); 622 } 623 response->setInt32("err", err); 624 response->postReply(replyID); 625 break; 626 } 627 628 case kWhatFlush: 629 { 630 onFlush(msg); 631 break; 632 } 633 634 case kWhatDisableOffloadAudio: 635 { 636 onDisableOffloadAudio(); 637 break; 638 } 639 640 case kWhatEnableOffloadAudio: 641 { 642 onEnableOffloadAudio(); 643 break; 644 } 645 646 case kWhatPause: 647 { 648 onPause(); 649 break; 650 } 651 652 case kWhatResume: 653 { 654 onResume(); 655 break; 656 } 657 658 case kWhatSetVideoFrameRate: 659 { 660 float fps; 661 CHECK(msg->findFloat("frame-rate", &fps)); 662 onSetVideoFrameRate(fps); 663 break; 664 } 665 666 case kWhatAudioTearDown: 667 { 668 int32_t reason; 669 CHECK(msg->findInt32("reason", &reason)); 670 671 onAudioTearDown((AudioTearDownReason)reason); 672 break; 673 } 674 675 case kWhatAudioOffloadPauseTimeout: 676 { 677 int32_t generation; 678 CHECK(msg->findInt32("drainGeneration", &generation)); 679 if (generation != mAudioOffloadPauseTimeoutGeneration) { 680 break; 681 } 682 ALOGV("Audio Offload tear down due to pause timeout."); 683 onAudioTearDown(kDueToTimeout); 684 mWakeLock->release(); 685 break; 686 } 687 688 default: 689 TRESPASS(); 690 break; 691 } 692} 693 694void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 695 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 696 return; 697 } 698 699 if (mAudioQueue.empty()) { 700 return; 701 } 702 703 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 704 if (mPaused) { 705 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 706 if (diffUs > delayUs) { 707 delayUs = diffUs; 708 } 709 } 710 711 mDrainAudioQueuePending = true; 712 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 713 msg->setInt32("drainGeneration", mAudioDrainGeneration); 714 msg->post(delayUs); 715} 716 717void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 718 mAudioRenderingStartGeneration = mAudioDrainGeneration; 719 mVideoRenderingStartGeneration = mVideoDrainGeneration; 720 mRenderingDataDelivered = false; 721} 722 723void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 724 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 725 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 726 mRenderingDataDelivered = true; 727 if (mPaused) { 728 return; 729 } 730 mVideoRenderingStartGeneration = -1; 731 mAudioRenderingStartGeneration = -1; 732 733 sp<AMessage> notify = mNotify->dup(); 734 notify->setInt32("what", kWhatMediaRenderingStart); 735 notify->post(); 736 } 737} 738 739// static 740size_t NuPlayer::Renderer::AudioSinkCallback( 741 MediaPlayerBase::AudioSink * /* audioSink */, 742 void *buffer, 743 size_t size, 744 void *cookie, 745 MediaPlayerBase::AudioSink::cb_event_t event) { 746 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 747 748 switch (event) { 749 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 750 { 751 return me->fillAudioBuffer(buffer, size); 752 break; 753 } 754 755 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 756 { 757 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 758 me->notifyEOSCallback(); 759 break; 760 } 761 762 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 763 { 764 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 765 me->notifyAudioTearDown(kDueToError); 766 break; 767 } 768 } 769 770 return 0; 771} 772 773void NuPlayer::Renderer::notifyEOSCallback() { 774 Mutex::Autolock autoLock(mLock); 775 776 if (!mUseAudioCallback) { 777 return; 778 } 779 780 notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 781} 782 783size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 784 Mutex::Autolock autoLock(mLock); 785 786 if (!mUseAudioCallback) { 787 return 0; 788 } 789 790 bool hasEOS = false; 791 792 size_t sizeCopied = 0; 793 bool firstEntry = true; 794 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 795 while (sizeCopied < size && !mAudioQueue.empty()) { 796 entry = &*mAudioQueue.begin(); 797 798 if (entry->mBuffer == NULL) { // EOS 799 hasEOS = true; 800 mAudioQueue.erase(mAudioQueue.begin()); 801 break; 802 } 803 804 if (firstEntry && entry->mOffset == 0) { 805 firstEntry = false; 806 int64_t mediaTimeUs; 807 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 808 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 809 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 810 } 811 812 size_t copy = entry->mBuffer->size() - entry->mOffset; 813 size_t sizeRemaining = size - sizeCopied; 814 if (copy > sizeRemaining) { 815 copy = sizeRemaining; 816 } 817 818 memcpy((char *)buffer + sizeCopied, 819 entry->mBuffer->data() + entry->mOffset, 820 copy); 821 822 entry->mOffset += copy; 823 if (entry->mOffset == entry->mBuffer->size()) { 824 entry->mNotifyConsumed->post(); 825 mAudioQueue.erase(mAudioQueue.begin()); 826 entry = NULL; 827 } 828 sizeCopied += copy; 829 830 notifyIfMediaRenderingStarted_l(); 831 } 832 833 if (mAudioFirstAnchorTimeMediaUs >= 0) { 834 int64_t nowUs = ALooper::GetNowUs(); 835 int64_t nowMediaUs = 836 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); 837 // we don't know how much data we are queueing for offloaded tracks. 838 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 839 } 840 841 // for non-offloaded audio, we need to compute the frames written because 842 // there is no EVENT_STREAM_END notification. The frames written gives 843 // an estimate on the pending played out duration. 844 if (!offloadingAudio()) { 845 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 846 } 847 848 if (hasEOS) { 849 (new AMessage(kWhatStopAudioSink, this))->post(); 850 // As there is currently no EVENT_STREAM_END callback notification for 851 // non-offloaded audio tracks, we need to post the EOS ourselves. 852 if (!offloadingAudio()) { 853 int64_t postEOSDelayUs = 0; 854 if (mAudioSink->needsTrailingPadding()) { 855 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 856 } 857 ALOGV("fillAudioBuffer: notifyEOS " 858 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 859 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 860 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 861 } 862 } 863 return sizeCopied; 864} 865 866void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 867 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 868 bool foundEOS = false; 869 while (it != mAudioQueue.end()) { 870 int32_t eos; 871 QueueEntry *entry = &*it++; 872 if (entry->mBuffer == NULL 873 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 874 itEOS = it; 875 foundEOS = true; 876 } 877 } 878 879 if (foundEOS) { 880 // post all replies before EOS and drop the samples 881 for (it = mAudioQueue.begin(); it != itEOS; it++) { 882 if (it->mBuffer == NULL) { 883 // delay doesn't matter as we don't even have an AudioTrack 884 notifyEOS(true /* audio */, it->mFinalResult); 885 } else { 886 it->mNotifyConsumed->post(); 887 } 888 } 889 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 890 } 891} 892 893bool NuPlayer::Renderer::onDrainAudioQueue() { 894 // do not drain audio during teardown as queued buffers may be invalid. 895 if (mAudioTornDown) { 896 return false; 897 } 898 // TODO: This call to getPosition checks if AudioTrack has been created 899 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 900 // CHECKs on getPosition will fail. 901 // We still need to figure out why AudioTrack is not created when 902 // this function is called. One possible reason could be leftover 903 // audio. Another possible place is to check whether decoder 904 // has received INFO_FORMAT_CHANGED as the first buffer since 905 // AudioSink is opened there, and possible interactions with flush 906 // immediately after start. Investigate error message 907 // "vorbis_dsp_synthesis returned -135", along with RTSP. 908 uint32_t numFramesPlayed; 909 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 910 // When getPosition fails, renderer will not reschedule the draining 911 // unless new samples are queued. 912 // If we have pending EOS (or "eos" marker for discontinuities), we need 913 // to post these now as NuPlayerDecoder might be waiting for it. 914 drainAudioQueueUntilLastEOS(); 915 916 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 917 return false; 918 } 919 920#if 0 921 ssize_t numFramesAvailableToWrite = 922 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 923 924 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 925 ALOGI("audio sink underrun"); 926 } else { 927 ALOGV("audio queue has %d frames left to play", 928 mAudioSink->frameCount() - numFramesAvailableToWrite); 929 } 930#endif 931 932 uint32_t prevFramesWritten = mNumFramesWritten; 933 while (!mAudioQueue.empty()) { 934 QueueEntry *entry = &*mAudioQueue.begin(); 935 936 mLastAudioBufferDrained = entry->mBufferOrdinal; 937 938 if (entry->mBuffer == NULL) { 939 // EOS 940 int64_t postEOSDelayUs = 0; 941 if (mAudioSink->needsTrailingPadding()) { 942 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 943 } 944 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 945 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 946 947 mAudioQueue.erase(mAudioQueue.begin()); 948 entry = NULL; 949 if (mAudioSink->needsTrailingPadding()) { 950 // If we're not in gapless playback (i.e. through setNextPlayer), we 951 // need to stop the track here, because that will play out the last 952 // little bit at the end of the file. Otherwise short files won't play. 953 mAudioSink->stop(); 954 mNumFramesWritten = 0; 955 } 956 return false; 957 } 958 959 // ignore 0-sized buffer which could be EOS marker with no data 960 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 961 int64_t mediaTimeUs; 962 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 963 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 964 mediaTimeUs / 1E6); 965 onNewAudioMediaTime(mediaTimeUs); 966 } 967 968 size_t copy = entry->mBuffer->size() - entry->mOffset; 969 970 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 971 copy, false /* blocking */); 972 if (written < 0) { 973 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 974 if (written == WOULD_BLOCK) { 975 ALOGV("AudioSink write would block when writing %zu bytes", copy); 976 } else { 977 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 978 // This can only happen when AudioSink was opened with doNotReconnect flag set to 979 // true, in which case the NuPlayer will handle the reconnect. 980 notifyAudioTearDown(kDueToError); 981 } 982 break; 983 } 984 985 entry->mOffset += written; 986 size_t remainder = entry->mBuffer->size() - entry->mOffset; 987 if ((ssize_t)remainder < mAudioSink->frameSize()) { 988 if (remainder > 0) { 989 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.", 990 remainder); 991 entry->mOffset += remainder; 992 copy -= remainder; 993 } 994 995 entry->mNotifyConsumed->post(); 996 mAudioQueue.erase(mAudioQueue.begin()); 997 998 entry = NULL; 999 } 1000 1001 size_t copiedFrames = written / mAudioSink->frameSize(); 1002 mNumFramesWritten += copiedFrames; 1003 1004 { 1005 Mutex::Autolock autoLock(mLock); 1006 int64_t maxTimeMedia; 1007 maxTimeMedia = 1008 mAnchorTimeMediaUs + 1009 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 1010 * 1000LL * mAudioSink->msecsPerFrame()); 1011 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 1012 1013 notifyIfMediaRenderingStarted_l(); 1014 } 1015 1016 if (written != (ssize_t)copy) { 1017 // A short count was received from AudioSink::write() 1018 // 1019 // AudioSink write is called in non-blocking mode. 1020 // It may return with a short count when: 1021 // 1022 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are 1023 // discarded. 1024 // 2) The data to be copied exceeds the available buffer in AudioSink. 1025 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 1026 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 1027 1028 // (Case 1) 1029 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 1030 // needs to fail, as we should not carry over fractional frames between calls. 1031 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 1032 1033 // (Case 2, 3, 4) 1034 // Return early to the caller. 1035 // Beware of calling immediately again as this may busy-loop if you are not careful. 1036 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 1037 break; 1038 } 1039 } 1040 1041 // calculate whether we need to reschedule another write. 1042 bool reschedule = !mAudioQueue.empty() 1043 && (!mPaused 1044 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 1045 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 1046 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 1047 return reschedule; 1048} 1049 1050int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 1051 int32_t sampleRate = offloadingAudio() ? 1052 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 1053 if (sampleRate == 0) { 1054 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 1055 return 0; 1056 } 1057 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 1058 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 1059} 1060 1061// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 1062int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 1063 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1064 if (mUseVirtualAudioSink) { 1065 int64_t nowUs = ALooper::GetNowUs(); 1066 int64_t mediaUs; 1067 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) { 1068 return 0ll; 1069 } else { 1070 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs); 1071 } 1072 } 1073 return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs); 1074} 1075 1076int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 1077 int64_t realUs; 1078 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 1079 // If failed to get current position, e.g. due to audio clock is 1080 // not ready, then just play out video immediately without delay. 1081 return nowUs; 1082 } 1083 return realUs; 1084} 1085 1086void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 1087 Mutex::Autolock autoLock(mLock); 1088 // TRICKY: vorbis decoder generates multiple frames with the same 1089 // timestamp, so only update on the first frame with a given timestamp 1090 if (mediaTimeUs == mAnchorTimeMediaUs) { 1091 return; 1092 } 1093 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 1094 1095 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start 1096 if (mNextAudioClockUpdateTimeUs == -1) { 1097 AudioTimestamp ts; 1098 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { 1099 mNextAudioClockUpdateTimeUs = 0; // start our clock updates 1100 } 1101 } 1102 int64_t nowUs = ALooper::GetNowUs(); 1103 if (mNextAudioClockUpdateTimeUs >= 0) { 1104 if (nowUs >= mNextAudioClockUpdateTimeUs) { 1105 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 1106 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 1107 mUseVirtualAudioSink = false; 1108 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; 1109 } 1110 } else { 1111 int64_t unused; 1112 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) 1113 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) 1114 > kMaxAllowedAudioSinkDelayUs)) { 1115 // Enough data has been sent to AudioSink, but AudioSink has not rendered 1116 // any data yet. Something is wrong with AudioSink, e.g., the device is not 1117 // connected to audio out. 1118 // Switch to system clock. This essentially creates a virtual AudioSink with 1119 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). 1120 // This virtual AudioSink renders audio data starting from the very first sample 1121 // and it's paced by system clock. 1122 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); 1123 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); 1124 mUseVirtualAudioSink = true; 1125 } 1126 } 1127 mAnchorNumFramesWritten = mNumFramesWritten; 1128 mAnchorTimeMediaUs = mediaTimeUs; 1129} 1130 1131// Called without mLock acquired. 1132void NuPlayer::Renderer::postDrainVideoQueue() { 1133 if (mDrainVideoQueuePending 1134 || getSyncQueues() 1135 || (mPaused && mVideoSampleReceived)) { 1136 return; 1137 } 1138 1139 if (mVideoQueue.empty()) { 1140 return; 1141 } 1142 1143 QueueEntry &entry = *mVideoQueue.begin(); 1144 1145 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1146 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1147 1148 if (entry.mBuffer == NULL) { 1149 // EOS doesn't carry a timestamp. 1150 msg->post(); 1151 mDrainVideoQueuePending = true; 1152 return; 1153 } 1154 1155 bool needRepostDrainVideoQueue = false; 1156 int64_t delayUs; 1157 int64_t nowUs = ALooper::GetNowUs(); 1158 int64_t realTimeUs; 1159 if (mFlags & FLAG_REAL_TIME) { 1160 int64_t mediaTimeUs; 1161 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1162 realTimeUs = mediaTimeUs; 1163 } else { 1164 int64_t mediaTimeUs; 1165 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1166 1167 { 1168 Mutex::Autolock autoLock(mLock); 1169 if (mAnchorTimeMediaUs < 0) { 1170 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1171 mAnchorTimeMediaUs = mediaTimeUs; 1172 realTimeUs = nowUs; 1173 } else if (!mVideoSampleReceived) { 1174 // Always render the first video frame. 1175 realTimeUs = nowUs; 1176 } else if (mAudioFirstAnchorTimeMediaUs < 0 1177 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) { 1178 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1179 } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) { 1180 needRepostDrainVideoQueue = true; 1181 realTimeUs = nowUs; 1182 } else { 1183 realTimeUs = nowUs; 1184 } 1185 } 1186 if (!mHasAudio) { 1187 // smooth out videos >= 10fps 1188 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1189 } 1190 1191 // Heuristics to handle situation when media time changed without a 1192 // discontinuity. If we have not drained an audio buffer that was 1193 // received after this buffer, repost in 10 msec. Otherwise repost 1194 // in 500 msec. 1195 delayUs = realTimeUs - nowUs; 1196 int64_t postDelayUs = -1; 1197 if (delayUs > 500000) { 1198 postDelayUs = 500000; 1199 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1200 postDelayUs = 10000; 1201 } 1202 } else if (needRepostDrainVideoQueue) { 1203 // CHECK(mPlaybackRate > 0); 1204 // CHECK(mAudioFirstAnchorTimeMediaUs >= 0); 1205 // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0); 1206 postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs; 1207 postDelayUs /= mPlaybackRate; 1208 } 1209 1210 if (postDelayUs >= 0) { 1211 msg->setWhat(kWhatPostDrainVideoQueue); 1212 msg->post(postDelayUs); 1213 mVideoScheduler->restart(); 1214 ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms", 1215 (int)(delayUs / 1000), (int)(postDelayUs / 1000)); 1216 mDrainVideoQueuePending = true; 1217 return; 1218 } 1219 } 1220 1221 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1222 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1223 1224 delayUs = realTimeUs - nowUs; 1225 1226 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1227 // post 2 display refreshes before rendering is due 1228 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1229 1230 mDrainVideoQueuePending = true; 1231} 1232 1233void NuPlayer::Renderer::onDrainVideoQueue() { 1234 if (mVideoQueue.empty()) { 1235 return; 1236 } 1237 1238 QueueEntry *entry = &*mVideoQueue.begin(); 1239 1240 if (entry->mBuffer == NULL) { 1241 // EOS 1242 1243 notifyEOS(false /* audio */, entry->mFinalResult); 1244 1245 mVideoQueue.erase(mVideoQueue.begin()); 1246 entry = NULL; 1247 1248 setVideoLateByUs(0); 1249 return; 1250 } 1251 1252 int64_t nowUs = ALooper::GetNowUs(); 1253 int64_t realTimeUs; 1254 int64_t mediaTimeUs = -1; 1255 if (mFlags & FLAG_REAL_TIME) { 1256 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1257 } else { 1258 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1259 1260 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1261 } 1262 1263 bool tooLate = false; 1264 1265 if (!mPaused) { 1266 setVideoLateByUs(nowUs - realTimeUs); 1267 tooLate = (mVideoLateByUs > 40000); 1268 1269 if (tooLate) { 1270 ALOGV("video late by %lld us (%.2f secs)", 1271 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1272 } else { 1273 int64_t mediaUs = 0; 1274 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1275 ALOGV("rendering video at media time %.2f secs", 1276 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1277 mediaUs) / 1E6); 1278 1279 if (!(mFlags & FLAG_REAL_TIME) 1280 && mLastAudioMediaTimeUs != -1 1281 && mediaTimeUs > mLastAudioMediaTimeUs) { 1282 // If audio ends before video, video continues to drive media clock. 1283 // Also smooth out videos >= 10fps. 1284 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1285 } 1286 } 1287 } else { 1288 setVideoLateByUs(0); 1289 if (!mVideoSampleReceived && !mHasAudio) { 1290 // This will ensure that the first frame after a flush won't be used as anchor 1291 // when renderer is in paused state, because resume can happen any time after seek. 1292 Mutex::Autolock autoLock(mLock); 1293 clearAnchorTime_l(); 1294 } 1295 } 1296 1297 // Always render the first video frame while keeping stats on A/V sync. 1298 if (!mVideoSampleReceived) { 1299 realTimeUs = nowUs; 1300 tooLate = false; 1301 } 1302 1303 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1304 entry->mNotifyConsumed->setInt32("render", !tooLate); 1305 entry->mNotifyConsumed->post(); 1306 mVideoQueue.erase(mVideoQueue.begin()); 1307 entry = NULL; 1308 1309 mVideoSampleReceived = true; 1310 1311 if (!mPaused) { 1312 if (!mVideoRenderingStarted) { 1313 mVideoRenderingStarted = true; 1314 notifyVideoRenderingStart(); 1315 } 1316 Mutex::Autolock autoLock(mLock); 1317 notifyIfMediaRenderingStarted_l(); 1318 } 1319} 1320 1321void NuPlayer::Renderer::notifyVideoRenderingStart() { 1322 sp<AMessage> notify = mNotify->dup(); 1323 notify->setInt32("what", kWhatVideoRenderingStart); 1324 notify->post(); 1325} 1326 1327void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1328 if (audio && delayUs > 0) { 1329 sp<AMessage> msg = new AMessage(kWhatEOS, this); 1330 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); 1331 msg->setInt32("finalResult", finalResult); 1332 msg->post(delayUs); 1333 return; 1334 } 1335 sp<AMessage> notify = mNotify->dup(); 1336 notify->setInt32("what", kWhatEOS); 1337 notify->setInt32("audio", static_cast<int32_t>(audio)); 1338 notify->setInt32("finalResult", finalResult); 1339 notify->post(delayUs); 1340} 1341 1342void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) { 1343 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this); 1344 msg->setInt32("reason", reason); 1345 msg->post(); 1346} 1347 1348void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1349 int32_t audio; 1350 CHECK(msg->findInt32("audio", &audio)); 1351 1352 if (dropBufferIfStale(audio, msg)) { 1353 return; 1354 } 1355 1356 if (audio) { 1357 mHasAudio = true; 1358 } else { 1359 mHasVideo = true; 1360 } 1361 1362 if (mHasVideo) { 1363 if (mVideoScheduler == NULL) { 1364 mVideoScheduler = new VideoFrameScheduler(); 1365 mVideoScheduler->init(); 1366 } 1367 } 1368 1369 sp<ABuffer> buffer; 1370 CHECK(msg->findBuffer("buffer", &buffer)); 1371 1372 sp<AMessage> notifyConsumed; 1373 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1374 1375 QueueEntry entry; 1376 entry.mBuffer = buffer; 1377 entry.mNotifyConsumed = notifyConsumed; 1378 entry.mOffset = 0; 1379 entry.mFinalResult = OK; 1380 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1381 1382 if (audio) { 1383 Mutex::Autolock autoLock(mLock); 1384 mAudioQueue.push_back(entry); 1385 postDrainAudioQueue_l(); 1386 } else { 1387 mVideoQueue.push_back(entry); 1388 postDrainVideoQueue(); 1389 } 1390 1391 Mutex::Autolock autoLock(mLock); 1392 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1393 return; 1394 } 1395 1396 sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1397 sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1398 1399 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1400 // EOS signalled on either queue. 1401 syncQueuesDone_l(); 1402 return; 1403 } 1404 1405 int64_t firstAudioTimeUs; 1406 int64_t firstVideoTimeUs; 1407 CHECK(firstAudioBuffer->meta() 1408 ->findInt64("timeUs", &firstAudioTimeUs)); 1409 CHECK(firstVideoBuffer->meta() 1410 ->findInt64("timeUs", &firstVideoTimeUs)); 1411 1412 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1413 1414 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1415 1416 if (diff > 100000ll) { 1417 // Audio data starts More than 0.1 secs before video. 1418 // Drop some audio. 1419 1420 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1421 mAudioQueue.erase(mAudioQueue.begin()); 1422 return; 1423 } 1424 1425 syncQueuesDone_l(); 1426} 1427 1428void NuPlayer::Renderer::syncQueuesDone_l() { 1429 if (!mSyncQueues) { 1430 return; 1431 } 1432 1433 mSyncQueues = false; 1434 1435 if (!mAudioQueue.empty()) { 1436 postDrainAudioQueue_l(); 1437 } 1438 1439 if (!mVideoQueue.empty()) { 1440 mLock.unlock(); 1441 postDrainVideoQueue(); 1442 mLock.lock(); 1443 } 1444} 1445 1446void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1447 int32_t audio; 1448 CHECK(msg->findInt32("audio", &audio)); 1449 1450 if (dropBufferIfStale(audio, msg)) { 1451 return; 1452 } 1453 1454 int32_t finalResult; 1455 CHECK(msg->findInt32("finalResult", &finalResult)); 1456 1457 QueueEntry entry; 1458 entry.mOffset = 0; 1459 entry.mFinalResult = finalResult; 1460 1461 if (audio) { 1462 Mutex::Autolock autoLock(mLock); 1463 if (mAudioQueue.empty() && mSyncQueues) { 1464 syncQueuesDone_l(); 1465 } 1466 mAudioQueue.push_back(entry); 1467 postDrainAudioQueue_l(); 1468 } else { 1469 if (mVideoQueue.empty() && getSyncQueues()) { 1470 Mutex::Autolock autoLock(mLock); 1471 syncQueuesDone_l(); 1472 } 1473 mVideoQueue.push_back(entry); 1474 postDrainVideoQueue(); 1475 } 1476} 1477 1478void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1479 int32_t audio, notifyComplete; 1480 CHECK(msg->findInt32("audio", &audio)); 1481 1482 { 1483 Mutex::Autolock autoLock(mLock); 1484 if (audio) { 1485 notifyComplete = mNotifyCompleteAudio; 1486 mNotifyCompleteAudio = false; 1487 mLastAudioMediaTimeUs = -1; 1488 } else { 1489 notifyComplete = mNotifyCompleteVideo; 1490 mNotifyCompleteVideo = false; 1491 } 1492 1493 // If we're currently syncing the queues, i.e. dropping audio while 1494 // aligning the first audio/video buffer times and only one of the 1495 // two queues has data, we may starve that queue by not requesting 1496 // more buffers from the decoder. If the other source then encounters 1497 // a discontinuity that leads to flushing, we'll never find the 1498 // corresponding discontinuity on the other queue. 1499 // Therefore we'll stop syncing the queues if at least one of them 1500 // is flushed. 1501 syncQueuesDone_l(); 1502 clearAnchorTime_l(); 1503 } 1504 1505 ALOGV("flushing %s", audio ? "audio" : "video"); 1506 if (audio) { 1507 { 1508 Mutex::Autolock autoLock(mLock); 1509 flushQueue(&mAudioQueue); 1510 1511 ++mAudioDrainGeneration; 1512 ++mAudioEOSGeneration; 1513 prepareForMediaRenderingStart_l(); 1514 1515 // the frame count will be reset after flush. 1516 clearAudioFirstAnchorTime_l(); 1517 } 1518 1519 mDrainAudioQueuePending = false; 1520 1521 if (offloadingAudio()) { 1522 mAudioSink->pause(); 1523 mAudioSink->flush(); 1524 if (!mPaused) { 1525 mAudioSink->start(); 1526 } 1527 } else { 1528 mAudioSink->pause(); 1529 mAudioSink->flush(); 1530 // Call stop() to signal to the AudioSink to completely fill the 1531 // internal buffer before resuming playback. 1532 // FIXME: this is ignored after flush(). 1533 mAudioSink->stop(); 1534 if (mPaused) { 1535 // Race condition: if renderer is paused and audio sink is stopped, 1536 // we need to make sure that the audio track buffer fully drains 1537 // before delivering data. 1538 // FIXME: remove this if we can detect if stop() is complete. 1539 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1540 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1541 } else { 1542 mAudioSink->start(); 1543 } 1544 mNumFramesWritten = 0; 1545 } 1546 mNextAudioClockUpdateTimeUs = -1; 1547 } else { 1548 flushQueue(&mVideoQueue); 1549 1550 mDrainVideoQueuePending = false; 1551 1552 if (mVideoScheduler != NULL) { 1553 mVideoScheduler->restart(); 1554 } 1555 1556 Mutex::Autolock autoLock(mLock); 1557 ++mVideoDrainGeneration; 1558 prepareForMediaRenderingStart_l(); 1559 } 1560 1561 mVideoSampleReceived = false; 1562 1563 if (notifyComplete) { 1564 notifyFlushComplete(audio); 1565 } 1566} 1567 1568void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1569 while (!queue->empty()) { 1570 QueueEntry *entry = &*queue->begin(); 1571 1572 if (entry->mBuffer != NULL) { 1573 entry->mNotifyConsumed->post(); 1574 } 1575 1576 queue->erase(queue->begin()); 1577 entry = NULL; 1578 } 1579} 1580 1581void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1582 sp<AMessage> notify = mNotify->dup(); 1583 notify->setInt32("what", kWhatFlushComplete); 1584 notify->setInt32("audio", static_cast<int32_t>(audio)); 1585 notify->post(); 1586} 1587 1588bool NuPlayer::Renderer::dropBufferIfStale( 1589 bool audio, const sp<AMessage> &msg) { 1590 int32_t queueGeneration; 1591 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1592 1593 if (queueGeneration == getQueueGeneration(audio)) { 1594 return false; 1595 } 1596 1597 sp<AMessage> notifyConsumed; 1598 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1599 notifyConsumed->post(); 1600 } 1601 1602 return true; 1603} 1604 1605void NuPlayer::Renderer::onAudioSinkChanged() { 1606 if (offloadingAudio()) { 1607 return; 1608 } 1609 CHECK(!mDrainAudioQueuePending); 1610 mNumFramesWritten = 0; 1611 { 1612 Mutex::Autolock autoLock(mLock); 1613 mAnchorNumFramesWritten = -1; 1614 } 1615 uint32_t written; 1616 if (mAudioSink->getFramesWritten(&written) == OK) { 1617 mNumFramesWritten = written; 1618 } 1619} 1620 1621void NuPlayer::Renderer::onDisableOffloadAudio() { 1622 Mutex::Autolock autoLock(mLock); 1623 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1624 ++mAudioDrainGeneration; 1625 if (mAudioRenderingStartGeneration != -1) { 1626 prepareForMediaRenderingStart_l(); 1627 } 1628} 1629 1630void NuPlayer::Renderer::onEnableOffloadAudio() { 1631 Mutex::Autolock autoLock(mLock); 1632 mFlags |= FLAG_OFFLOAD_AUDIO; 1633 ++mAudioDrainGeneration; 1634 if (mAudioRenderingStartGeneration != -1) { 1635 prepareForMediaRenderingStart_l(); 1636 } 1637} 1638 1639void NuPlayer::Renderer::onPause() { 1640 if (mPaused) { 1641 return; 1642 } 1643 1644 { 1645 Mutex::Autolock autoLock(mLock); 1646 // we do not increment audio drain generation so that we fill audio buffer during pause. 1647 ++mVideoDrainGeneration; 1648 prepareForMediaRenderingStart_l(); 1649 mPaused = true; 1650 mMediaClock->setPlaybackRate(0.0); 1651 } 1652 1653 mDrainAudioQueuePending = false; 1654 mDrainVideoQueuePending = false; 1655 1656 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1657 mAudioSink->pause(); 1658 startAudioOffloadPauseTimeout(); 1659 1660 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1661 mAudioQueue.size(), mVideoQueue.size()); 1662} 1663 1664void NuPlayer::Renderer::onResume() { 1665 if (!mPaused) { 1666 return; 1667 } 1668 1669 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1670 cancelAudioOffloadPauseTimeout(); 1671 if (mAudioSink->ready()) { 1672 status_t err = mAudioSink->start(); 1673 if (err != OK) { 1674 ALOGE("cannot start AudioSink err %d", err); 1675 notifyAudioTearDown(kDueToError); 1676 } 1677 } 1678 1679 { 1680 Mutex::Autolock autoLock(mLock); 1681 mPaused = false; 1682 // rendering started message may have been delayed if we were paused. 1683 if (mRenderingDataDelivered) { 1684 notifyIfMediaRenderingStarted_l(); 1685 } 1686 // configure audiosink as we did not do it when pausing 1687 if (mAudioSink != NULL && mAudioSink->ready()) { 1688 mAudioSink->setPlaybackRate(mPlaybackSettings); 1689 } 1690 1691 mMediaClock->setPlaybackRate(mPlaybackRate); 1692 1693 if (!mAudioQueue.empty()) { 1694 postDrainAudioQueue_l(); 1695 } 1696 } 1697 1698 if (!mVideoQueue.empty()) { 1699 postDrainVideoQueue(); 1700 } 1701} 1702 1703void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1704 if (mVideoScheduler == NULL) { 1705 mVideoScheduler = new VideoFrameScheduler(); 1706 } 1707 mVideoScheduler->init(fps); 1708} 1709 1710int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1711 Mutex::Autolock autoLock(mLock); 1712 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1713} 1714 1715int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1716 Mutex::Autolock autoLock(mLock); 1717 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1718} 1719 1720bool NuPlayer::Renderer::getSyncQueues() { 1721 Mutex::Autolock autoLock(mLock); 1722 return mSyncQueues; 1723} 1724 1725void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1726 if (mAudioTornDown) { 1727 return; 1728 } 1729 mAudioTornDown = true; 1730 1731 int64_t currentPositionUs; 1732 sp<AMessage> notify = mNotify->dup(); 1733 if (getCurrentPosition(¤tPositionUs) == OK) { 1734 notify->setInt64("positionUs", currentPositionUs); 1735 } 1736 1737 mAudioSink->stop(); 1738 mAudioSink->flush(); 1739 1740 notify->setInt32("what", kWhatAudioTearDown); 1741 notify->setInt32("reason", reason); 1742 notify->post(); 1743} 1744 1745void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1746 if (offloadingAudio()) { 1747 mWakeLock->acquire(); 1748 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1749 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1750 msg->post(kOffloadPauseMaxUs); 1751 } 1752} 1753 1754void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1755 // We may have called startAudioOffloadPauseTimeout() without 1756 // the AudioSink open and with offloadingAudio enabled. 1757 // 1758 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless 1759 // we always release the wakelock and increment the pause timeout generation. 1760 // 1761 // Note: The acquired wakelock prevents the device from suspending 1762 // immediately after offload pause (in case a resume happens shortly thereafter). 1763 mWakeLock->release(true); 1764 ++mAudioOffloadPauseTimeoutGeneration; 1765} 1766 1767status_t NuPlayer::Renderer::onOpenAudioSink( 1768 const sp<AMessage> &format, 1769 bool offloadOnly, 1770 bool hasVideo, 1771 uint32_t flags) { 1772 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1773 offloadOnly, offloadingAudio()); 1774 bool audioSinkChanged = false; 1775 1776 int32_t numChannels; 1777 CHECK(format->findInt32("channel-count", &numChannels)); 1778 1779 int32_t channelMask; 1780 if (!format->findInt32("channel-mask", &channelMask)) { 1781 // signal to the AudioSink to derive the mask from count. 1782 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1783 } 1784 1785 int32_t sampleRate; 1786 CHECK(format->findInt32("sample-rate", &sampleRate)); 1787 1788 if (offloadingAudio()) { 1789 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1790 AString mime; 1791 CHECK(format->findString("mime", &mime)); 1792 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1793 1794 if (err != OK) { 1795 ALOGE("Couldn't map mime \"%s\" to a valid " 1796 "audio_format", mime.c_str()); 1797 onDisableOffloadAudio(); 1798 } else { 1799 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1800 mime.c_str(), audioFormat); 1801 1802 int avgBitRate = -1; 1803 format->findInt32("bitrate", &avgBitRate); 1804 1805 int32_t aacProfile = -1; 1806 if (audioFormat == AUDIO_FORMAT_AAC 1807 && format->findInt32("aac-profile", &aacProfile)) { 1808 // Redefine AAC format as per aac profile 1809 mapAACProfileToAudioFormat( 1810 audioFormat, 1811 aacProfile); 1812 } 1813 1814 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1815 offloadInfo.duration_us = -1; 1816 format->findInt64( 1817 "durationUs", &offloadInfo.duration_us); 1818 offloadInfo.sample_rate = sampleRate; 1819 offloadInfo.channel_mask = channelMask; 1820 offloadInfo.format = audioFormat; 1821 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1822 offloadInfo.bit_rate = avgBitRate; 1823 offloadInfo.has_video = hasVideo; 1824 offloadInfo.is_streaming = true; 1825 1826 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1827 ALOGV("openAudioSink: no change in offload mode"); 1828 // no change from previous configuration, everything ok. 1829 return OK; 1830 } 1831 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1832 1833 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1834 uint32_t offloadFlags = flags; 1835 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1836 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1837 audioSinkChanged = true; 1838 mAudioSink->close(); 1839 1840 err = mAudioSink->open( 1841 sampleRate, 1842 numChannels, 1843 (audio_channel_mask_t)channelMask, 1844 audioFormat, 1845 0 /* bufferCount - unused */, 1846 &NuPlayer::Renderer::AudioSinkCallback, 1847 this, 1848 (audio_output_flags_t)offloadFlags, 1849 &offloadInfo); 1850 1851 if (err == OK) { 1852 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1853 } 1854 1855 if (err == OK) { 1856 // If the playback is offloaded to h/w, we pass 1857 // the HAL some metadata information. 1858 // We don't want to do this for PCM because it 1859 // will be going through the AudioFlinger mixer 1860 // before reaching the hardware. 1861 // TODO 1862 mCurrentOffloadInfo = offloadInfo; 1863 if (!mPaused) { // for preview mode, don't start if paused 1864 err = mAudioSink->start(); 1865 } 1866 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1867 } 1868 if (err != OK) { 1869 // Clean up, fall back to non offload mode. 1870 mAudioSink->close(); 1871 onDisableOffloadAudio(); 1872 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1873 ALOGV("openAudioSink: offload failed"); 1874 if (offloadOnly) { 1875 notifyAudioTearDown(kForceNonOffload); 1876 } 1877 } else { 1878 mUseAudioCallback = true; // offload mode transfers data through callback 1879 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1880 } 1881 } 1882 } 1883 if (!offloadOnly && !offloadingAudio()) { 1884 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1885 uint32_t pcmFlags = flags; 1886 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1887 1888 const PcmInfo info = { 1889 (audio_channel_mask_t)channelMask, 1890 (audio_output_flags_t)pcmFlags, 1891 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1892 numChannels, 1893 sampleRate 1894 }; 1895 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1896 ALOGV("openAudioSink: no change in pcm mode"); 1897 // no change from previous configuration, everything ok. 1898 return OK; 1899 } 1900 1901 audioSinkChanged = true; 1902 mAudioSink->close(); 1903 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1904 // Note: It is possible to set up the callback, but not use it to send audio data. 1905 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1906 mUseAudioCallback = getUseAudioCallbackSetting(); 1907 if (mUseAudioCallback) { 1908 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1909 } 1910 1911 // Compute the desired buffer size. 1912 // For callback mode, the amount of time before wakeup is about half the buffer size. 1913 const uint32_t frameCount = 1914 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1915 1916 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1917 // AudioSink. We don't want this when there's video because it will cause a video seek to 1918 // the previous I frame. But we do want this when there's only audio because it will give 1919 // NuPlayer a chance to switch from non-offload mode to offload mode. 1920 // So we only set doNotReconnect when there's no video. 1921 const bool doNotReconnect = !hasVideo; 1922 1923 // We should always be able to set our playback settings if the sink is closed. 1924 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK, 1925 "onOpenAudioSink: can't set playback rate on closed sink"); 1926 status_t err = mAudioSink->open( 1927 sampleRate, 1928 numChannels, 1929 (audio_channel_mask_t)channelMask, 1930 AUDIO_FORMAT_PCM_16_BIT, 1931 0 /* bufferCount - unused */, 1932 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 1933 mUseAudioCallback ? this : NULL, 1934 (audio_output_flags_t)pcmFlags, 1935 NULL, 1936 doNotReconnect, 1937 frameCount); 1938 if (err != OK) { 1939 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 1940 mAudioSink->close(); 1941 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1942 return err; 1943 } 1944 mCurrentPcmInfo = info; 1945 if (!mPaused) { // for preview mode, don't start if paused 1946 mAudioSink->start(); 1947 } 1948 } 1949 if (audioSinkChanged) { 1950 onAudioSinkChanged(); 1951 } 1952 mAudioTornDown = false; 1953 return OK; 1954} 1955 1956void NuPlayer::Renderer::onCloseAudioSink() { 1957 mAudioSink->close(); 1958 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1959 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1960} 1961 1962} // namespace android 1963 1964