NuPlayerRenderer.cpp revision c387f2b719a1a26c8306f77d79cc9a6f26b36813
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <algorithm> 23#include <cutils/properties.h> 24#include <media/stagefright/foundation/ADebug.h> 25#include <media/stagefright/foundation/AMessage.h> 26#include <media/stagefright/foundation/AUtils.h> 27#include <media/stagefright/foundation/AWakeLock.h> 28#include <media/stagefright/MediaClock.h> 29#include <media/stagefright/MediaErrors.h> 30#include <media/stagefright/MetaData.h> 31#include <media/stagefright/Utils.h> 32#include <media/stagefright/VideoFrameScheduler.h> 33#include <media/MediaCodecBuffer.h> 34 35#include <inttypes.h> 36 37namespace android { 38 39/* 40 * Example of common configuration settings in shell script form 41 42 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 43 adb shell setprop audio.offload.disable 1 44 45 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 46 adb shell setprop audio.offload.video 1 47 48 #Use audio callbacks for PCM data 49 adb shell setprop media.stagefright.audio.cbk 1 50 51 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 52 adb shell setprop media.stagefright.audio.deep 1 53 54 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 55 adb shell setprop media.stagefright.audio.sink 1000 56 57 * These configurations take effect for the next track played (not the current track). 58 */ 59 60static inline bool getUseAudioCallbackSetting() { 61 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 62} 63 64static inline int32_t getAudioSinkPcmMsSetting() { 65 return property_get_int32( 66 "media.stagefright.audio.sink", 500 /* default_value */); 67} 68 69// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 70// is closed to allow the audio DSP to power down. 71static const int64_t kOffloadPauseMaxUs = 10000000ll; 72 73// Maximum allowed delay from AudioSink, 1.5 seconds. 74static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll; 75 76static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; 77 78// static 79const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 80 AUDIO_CHANNEL_NONE, 81 AUDIO_OUTPUT_FLAG_NONE, 82 AUDIO_FORMAT_INVALID, 83 0, // mNumChannels 84 0 // mSampleRate 85}; 86 87// static 88const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 89 90NuPlayer::Renderer::Renderer( 91 const sp<MediaPlayerBase::AudioSink> &sink, 92 const sp<AMessage> ¬ify, 93 uint32_t flags) 94 : mAudioSink(sink), 95 mUseVirtualAudioSink(false), 96 mNotify(notify), 97 mFlags(flags), 98 mNumFramesWritten(0), 99 mDrainAudioQueuePending(false), 100 mDrainVideoQueuePending(false), 101 mAudioQueueGeneration(0), 102 mVideoQueueGeneration(0), 103 mAudioDrainGeneration(0), 104 mVideoDrainGeneration(0), 105 mAudioEOSGeneration(0), 106 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 107 mAudioFirstAnchorTimeMediaUs(-1), 108 mAnchorTimeMediaUs(-1), 109 mAnchorNumFramesWritten(-1), 110 mVideoLateByUs(0ll), 111 mHasAudio(false), 112 mHasVideo(false), 113 mNotifyCompleteAudio(false), 114 mNotifyCompleteVideo(false), 115 mSyncQueues(false), 116 mPaused(false), 117 mPauseDrainAudioAllowedUs(0), 118 mVideoSampleReceived(false), 119 mVideoRenderingStarted(false), 120 mVideoRenderingStartGeneration(0), 121 mAudioRenderingStartGeneration(0), 122 mRenderingDataDelivered(false), 123 mNextAudioClockUpdateTimeUs(-1), 124 mLastAudioMediaTimeUs(-1), 125 mAudioOffloadPauseTimeoutGeneration(0), 126 mAudioTornDown(false), 127 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 128 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 129 mTotalBuffersQueued(0), 130 mLastAudioBufferDrained(0), 131 mUseAudioCallback(false), 132 mWakeLock(new AWakeLock()) { 133 mMediaClock = new MediaClock; 134 mPlaybackRate = mPlaybackSettings.mSpeed; 135 mMediaClock->setPlaybackRate(mPlaybackRate); 136} 137 138NuPlayer::Renderer::~Renderer() { 139 if (offloadingAudio()) { 140 mAudioSink->stop(); 141 mAudioSink->flush(); 142 mAudioSink->close(); 143 } 144 145 // Try to avoid racing condition in case callback is still on. 146 Mutex::Autolock autoLock(mLock); 147 if (mUseAudioCallback) { 148 flushQueue(&mAudioQueue); 149 flushQueue(&mVideoQueue); 150 } 151 mWakeLock.clear(); 152 mMediaClock.clear(); 153 mVideoScheduler.clear(); 154 mNotify.clear(); 155 mAudioSink.clear(); 156} 157 158void NuPlayer::Renderer::queueBuffer( 159 bool audio, 160 const sp<MediaCodecBuffer> &buffer, 161 const sp<AMessage> ¬ifyConsumed) { 162 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 163 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 164 msg->setInt32("audio", static_cast<int32_t>(audio)); 165 msg->setObject("buffer", buffer); 166 msg->setMessage("notifyConsumed", notifyConsumed); 167 msg->post(); 168} 169 170void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 171 CHECK_NE(finalResult, (status_t)OK); 172 173 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 174 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 175 msg->setInt32("audio", static_cast<int32_t>(audio)); 176 msg->setInt32("finalResult", finalResult); 177 msg->post(); 178} 179 180status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 181 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 182 writeToAMessage(msg, rate); 183 sp<AMessage> response; 184 status_t err = msg->postAndAwaitResponse(&response); 185 if (err == OK && response != NULL) { 186 CHECK(response->findInt32("err", &err)); 187 } 188 return err; 189} 190 191status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 192 if (rate.mSpeed == 0.f) { 193 onPause(); 194 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 195 // have to correspond to the any non-0 speed (e.g old speed). Keep 196 // settings nonetheless, using the old speed, in case audiosink changes. 197 AudioPlaybackRate newRate = rate; 198 newRate.mSpeed = mPlaybackSettings.mSpeed; 199 mPlaybackSettings = newRate; 200 return OK; 201 } 202 203 if (mAudioSink != NULL && mAudioSink->ready()) { 204 status_t err = mAudioSink->setPlaybackRate(rate); 205 if (err != OK) { 206 return err; 207 } 208 } 209 mPlaybackSettings = rate; 210 mPlaybackRate = rate.mSpeed; 211 mMediaClock->setPlaybackRate(mPlaybackRate); 212 return OK; 213} 214 215status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 216 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 217 sp<AMessage> response; 218 status_t err = msg->postAndAwaitResponse(&response); 219 if (err == OK && response != NULL) { 220 CHECK(response->findInt32("err", &err)); 221 if (err == OK) { 222 readFromAMessage(response, rate); 223 } 224 } 225 return err; 226} 227 228status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 229 if (mAudioSink != NULL && mAudioSink->ready()) { 230 status_t err = mAudioSink->getPlaybackRate(rate); 231 if (err == OK) { 232 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 233 ALOGW("correcting mismatch in internal/external playback rate"); 234 } 235 // get playback settings used by audiosink, as it may be 236 // slightly off due to audiosink not taking small changes. 237 mPlaybackSettings = *rate; 238 if (mPaused) { 239 rate->mSpeed = 0.f; 240 } 241 } 242 return err; 243 } 244 *rate = mPlaybackSettings; 245 return OK; 246} 247 248status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 249 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 250 writeToAMessage(msg, sync, videoFpsHint); 251 sp<AMessage> response; 252 status_t err = msg->postAndAwaitResponse(&response); 253 if (err == OK && response != NULL) { 254 CHECK(response->findInt32("err", &err)); 255 } 256 return err; 257} 258 259status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 260 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 261 return BAD_VALUE; 262 } 263 // TODO: support sync sources 264 return INVALID_OPERATION; 265} 266 267status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 268 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 269 sp<AMessage> response; 270 status_t err = msg->postAndAwaitResponse(&response); 271 if (err == OK && response != NULL) { 272 CHECK(response->findInt32("err", &err)); 273 if (err == OK) { 274 readFromAMessage(response, sync, videoFps); 275 } 276 } 277 return err; 278} 279 280status_t NuPlayer::Renderer::onGetSyncSettings( 281 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 282 *sync = mSyncSettings; 283 *videoFps = -1.f; 284 return OK; 285} 286 287void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 288 { 289 Mutex::Autolock autoLock(mLock); 290 if (audio) { 291 mNotifyCompleteAudio |= notifyComplete; 292 clearAudioFirstAnchorTime_l(); 293 ++mAudioQueueGeneration; 294 ++mAudioDrainGeneration; 295 } else { 296 mNotifyCompleteVideo |= notifyComplete; 297 ++mVideoQueueGeneration; 298 ++mVideoDrainGeneration; 299 } 300 301 mMediaClock->clearAnchor(); 302 mVideoLateByUs = 0; 303 mSyncQueues = false; 304 } 305 306 sp<AMessage> msg = new AMessage(kWhatFlush, this); 307 msg->setInt32("audio", static_cast<int32_t>(audio)); 308 msg->post(); 309} 310 311void NuPlayer::Renderer::signalTimeDiscontinuity() { 312} 313 314void NuPlayer::Renderer::signalDisableOffloadAudio() { 315 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 316} 317 318void NuPlayer::Renderer::signalEnableOffloadAudio() { 319 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 320} 321 322void NuPlayer::Renderer::pause() { 323 (new AMessage(kWhatPause, this))->post(); 324} 325 326void NuPlayer::Renderer::resume() { 327 (new AMessage(kWhatResume, this))->post(); 328} 329 330void NuPlayer::Renderer::setVideoFrameRate(float fps) { 331 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 332 msg->setFloat("frame-rate", fps); 333 msg->post(); 334} 335 336// Called on any threads without mLock acquired. 337status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 338 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 339 if (result == OK) { 340 return result; 341 } 342 343 // MediaClock has not started yet. Try to start it if possible. 344 { 345 Mutex::Autolock autoLock(mLock); 346 if (mAudioFirstAnchorTimeMediaUs == -1) { 347 return result; 348 } 349 350 AudioTimestamp ts; 351 status_t res = mAudioSink->getTimestamp(ts); 352 if (res != OK) { 353 return result; 354 } 355 356 // AudioSink has rendered some frames. 357 int64_t nowUs = ALooper::GetNowUs(); 358 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) 359 + mAudioFirstAnchorTimeMediaUs; 360 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); 361 } 362 363 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 364} 365 366void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 367 mAudioFirstAnchorTimeMediaUs = -1; 368 mMediaClock->setStartingTimeMedia(-1); 369} 370 371void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 372 if (mAudioFirstAnchorTimeMediaUs == -1) { 373 mAudioFirstAnchorTimeMediaUs = mediaUs; 374 mMediaClock->setStartingTimeMedia(mediaUs); 375 } 376} 377 378// Called on renderer looper. 379void NuPlayer::Renderer::clearAnchorTime() { 380 mMediaClock->clearAnchor(); 381 mAnchorTimeMediaUs = -1; 382 mAnchorNumFramesWritten = -1; 383} 384 385void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 386 Mutex::Autolock autoLock(mLock); 387 mVideoLateByUs = lateUs; 388} 389 390int64_t NuPlayer::Renderer::getVideoLateByUs() { 391 Mutex::Autolock autoLock(mLock); 392 return mVideoLateByUs; 393} 394 395status_t NuPlayer::Renderer::openAudioSink( 396 const sp<AMessage> &format, 397 bool offloadOnly, 398 bool hasVideo, 399 uint32_t flags, 400 bool *isOffloaded, 401 bool isStreaming) { 402 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 403 msg->setMessage("format", format); 404 msg->setInt32("offload-only", offloadOnly); 405 msg->setInt32("has-video", hasVideo); 406 msg->setInt32("flags", flags); 407 msg->setInt32("isStreaming", isStreaming); 408 409 sp<AMessage> response; 410 status_t postStatus = msg->postAndAwaitResponse(&response); 411 412 int32_t err; 413 if (postStatus != OK || response.get() == nullptr || !response->findInt32("err", &err)) { 414 err = INVALID_OPERATION; 415 } else if (err == OK && isOffloaded != NULL) { 416 int32_t offload; 417 CHECK(response->findInt32("offload", &offload)); 418 *isOffloaded = (offload != 0); 419 } 420 return err; 421} 422 423void NuPlayer::Renderer::closeAudioSink() { 424 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 425 426 sp<AMessage> response; 427 msg->postAndAwaitResponse(&response); 428} 429 430void NuPlayer::Renderer::changeAudioFormat( 431 const sp<AMessage> &format, 432 bool offloadOnly, 433 bool hasVideo, 434 uint32_t flags, 435 bool isStreaming, 436 const sp<AMessage> ¬ify) { 437 sp<AMessage> meta = new AMessage; 438 meta->setMessage("format", format); 439 meta->setInt32("offload-only", offloadOnly); 440 meta->setInt32("has-video", hasVideo); 441 meta->setInt32("flags", flags); 442 meta->setInt32("isStreaming", isStreaming); 443 444 sp<AMessage> msg = new AMessage(kWhatChangeAudioFormat, this); 445 msg->setInt32("queueGeneration", getQueueGeneration(true /* audio */)); 446 msg->setMessage("notify", notify); 447 msg->setMessage("meta", meta); 448 msg->post(); 449} 450 451void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 452 switch (msg->what()) { 453 case kWhatOpenAudioSink: 454 { 455 sp<AMessage> format; 456 CHECK(msg->findMessage("format", &format)); 457 458 int32_t offloadOnly; 459 CHECK(msg->findInt32("offload-only", &offloadOnly)); 460 461 int32_t hasVideo; 462 CHECK(msg->findInt32("has-video", &hasVideo)); 463 464 uint32_t flags; 465 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 466 467 uint32_t isStreaming; 468 CHECK(msg->findInt32("isStreaming", (int32_t *)&isStreaming)); 469 470 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming); 471 472 sp<AMessage> response = new AMessage; 473 response->setInt32("err", err); 474 response->setInt32("offload", offloadingAudio()); 475 476 sp<AReplyToken> replyID; 477 CHECK(msg->senderAwaitsResponse(&replyID)); 478 response->postReply(replyID); 479 480 break; 481 } 482 483 case kWhatCloseAudioSink: 484 { 485 sp<AReplyToken> replyID; 486 CHECK(msg->senderAwaitsResponse(&replyID)); 487 488 onCloseAudioSink(); 489 490 sp<AMessage> response = new AMessage; 491 response->postReply(replyID); 492 break; 493 } 494 495 case kWhatStopAudioSink: 496 { 497 mAudioSink->stop(); 498 break; 499 } 500 501 case kWhatChangeAudioFormat: 502 { 503 int32_t queueGeneration; 504 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 505 506 sp<AMessage> notify; 507 CHECK(msg->findMessage("notify", ¬ify)); 508 509 if (offloadingAudio()) { 510 ALOGW("changeAudioFormat should NOT be called in offload mode"); 511 notify->setInt32("err", INVALID_OPERATION); 512 notify->post(); 513 break; 514 } 515 516 sp<AMessage> meta; 517 CHECK(msg->findMessage("meta", &meta)); 518 519 if (queueGeneration != getQueueGeneration(true /* audio */) 520 || mAudioQueue.empty()) { 521 onChangeAudioFormat(meta, notify); 522 break; 523 } 524 525 QueueEntry entry; 526 entry.mNotifyConsumed = notify; 527 entry.mMeta = meta; 528 529 Mutex::Autolock autoLock(mLock); 530 mAudioQueue.push_back(entry); 531 postDrainAudioQueue_l(); 532 533 break; 534 } 535 536 case kWhatDrainAudioQueue: 537 { 538 mDrainAudioQueuePending = false; 539 540 int32_t generation; 541 CHECK(msg->findInt32("drainGeneration", &generation)); 542 if (generation != getDrainGeneration(true /* audio */)) { 543 break; 544 } 545 546 if (onDrainAudioQueue()) { 547 uint32_t numFramesPlayed; 548 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 549 (status_t)OK); 550 551 uint32_t numFramesPendingPlayout = 552 mNumFramesWritten - numFramesPlayed; 553 554 // This is how long the audio sink will have data to 555 // play back. 556 int64_t delayUs = 557 mAudioSink->msecsPerFrame() 558 * numFramesPendingPlayout * 1000ll; 559 if (mPlaybackRate > 1.0f) { 560 delayUs /= mPlaybackRate; 561 } 562 563 // Let's give it more data after about half that time 564 // has elapsed. 565 delayUs /= 2; 566 // check the buffer size to estimate maximum delay permitted. 567 const int64_t maxDrainDelayUs = std::max( 568 mAudioSink->getBufferDurationInUs(), (int64_t)500000 /* half second */); 569 ALOGD_IF(delayUs > maxDrainDelayUs, "postDrainAudioQueue long delay: %lld > %lld", 570 (long long)delayUs, (long long)maxDrainDelayUs); 571 Mutex::Autolock autoLock(mLock); 572 postDrainAudioQueue_l(delayUs); 573 } 574 break; 575 } 576 577 case kWhatDrainVideoQueue: 578 { 579 int32_t generation; 580 CHECK(msg->findInt32("drainGeneration", &generation)); 581 if (generation != getDrainGeneration(false /* audio */)) { 582 break; 583 } 584 585 mDrainVideoQueuePending = false; 586 587 onDrainVideoQueue(); 588 589 postDrainVideoQueue(); 590 break; 591 } 592 593 case kWhatPostDrainVideoQueue: 594 { 595 int32_t generation; 596 CHECK(msg->findInt32("drainGeneration", &generation)); 597 if (generation != getDrainGeneration(false /* audio */)) { 598 break; 599 } 600 601 mDrainVideoQueuePending = false; 602 postDrainVideoQueue(); 603 break; 604 } 605 606 case kWhatQueueBuffer: 607 { 608 onQueueBuffer(msg); 609 break; 610 } 611 612 case kWhatQueueEOS: 613 { 614 onQueueEOS(msg); 615 break; 616 } 617 618 case kWhatEOS: 619 { 620 int32_t generation; 621 CHECK(msg->findInt32("audioEOSGeneration", &generation)); 622 if (generation != mAudioEOSGeneration) { 623 break; 624 } 625 status_t finalResult; 626 CHECK(msg->findInt32("finalResult", &finalResult)); 627 notifyEOS(true /* audio */, finalResult); 628 break; 629 } 630 631 case kWhatConfigPlayback: 632 { 633 sp<AReplyToken> replyID; 634 CHECK(msg->senderAwaitsResponse(&replyID)); 635 AudioPlaybackRate rate; 636 readFromAMessage(msg, &rate); 637 status_t err = onConfigPlayback(rate); 638 sp<AMessage> response = new AMessage; 639 response->setInt32("err", err); 640 response->postReply(replyID); 641 break; 642 } 643 644 case kWhatGetPlaybackSettings: 645 { 646 sp<AReplyToken> replyID; 647 CHECK(msg->senderAwaitsResponse(&replyID)); 648 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 649 status_t err = onGetPlaybackSettings(&rate); 650 sp<AMessage> response = new AMessage; 651 if (err == OK) { 652 writeToAMessage(response, rate); 653 } 654 response->setInt32("err", err); 655 response->postReply(replyID); 656 break; 657 } 658 659 case kWhatConfigSync: 660 { 661 sp<AReplyToken> replyID; 662 CHECK(msg->senderAwaitsResponse(&replyID)); 663 AVSyncSettings sync; 664 float videoFpsHint; 665 readFromAMessage(msg, &sync, &videoFpsHint); 666 status_t err = onConfigSync(sync, videoFpsHint); 667 sp<AMessage> response = new AMessage; 668 response->setInt32("err", err); 669 response->postReply(replyID); 670 break; 671 } 672 673 case kWhatGetSyncSettings: 674 { 675 sp<AReplyToken> replyID; 676 CHECK(msg->senderAwaitsResponse(&replyID)); 677 678 ALOGV("kWhatGetSyncSettings"); 679 AVSyncSettings sync; 680 float videoFps = -1.f; 681 status_t err = onGetSyncSettings(&sync, &videoFps); 682 sp<AMessage> response = new AMessage; 683 if (err == OK) { 684 writeToAMessage(response, sync, videoFps); 685 } 686 response->setInt32("err", err); 687 response->postReply(replyID); 688 break; 689 } 690 691 case kWhatFlush: 692 { 693 onFlush(msg); 694 break; 695 } 696 697 case kWhatDisableOffloadAudio: 698 { 699 onDisableOffloadAudio(); 700 break; 701 } 702 703 case kWhatEnableOffloadAudio: 704 { 705 onEnableOffloadAudio(); 706 break; 707 } 708 709 case kWhatPause: 710 { 711 onPause(); 712 break; 713 } 714 715 case kWhatResume: 716 { 717 onResume(); 718 break; 719 } 720 721 case kWhatSetVideoFrameRate: 722 { 723 float fps; 724 CHECK(msg->findFloat("frame-rate", &fps)); 725 onSetVideoFrameRate(fps); 726 break; 727 } 728 729 case kWhatAudioTearDown: 730 { 731 int32_t reason; 732 CHECK(msg->findInt32("reason", &reason)); 733 734 onAudioTearDown((AudioTearDownReason)reason); 735 break; 736 } 737 738 case kWhatAudioOffloadPauseTimeout: 739 { 740 int32_t generation; 741 CHECK(msg->findInt32("drainGeneration", &generation)); 742 if (generation != mAudioOffloadPauseTimeoutGeneration) { 743 break; 744 } 745 ALOGV("Audio Offload tear down due to pause timeout."); 746 onAudioTearDown(kDueToTimeout); 747 mWakeLock->release(); 748 break; 749 } 750 751 default: 752 TRESPASS(); 753 break; 754 } 755} 756 757void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 758 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 759 return; 760 } 761 762 if (mAudioQueue.empty()) { 763 return; 764 } 765 766 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 767 if (mPaused) { 768 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 769 if (diffUs > delayUs) { 770 delayUs = diffUs; 771 } 772 } 773 774 mDrainAudioQueuePending = true; 775 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 776 msg->setInt32("drainGeneration", mAudioDrainGeneration); 777 msg->post(delayUs); 778} 779 780void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 781 mAudioRenderingStartGeneration = mAudioDrainGeneration; 782 mVideoRenderingStartGeneration = mVideoDrainGeneration; 783 mRenderingDataDelivered = false; 784} 785 786void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 787 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 788 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 789 mRenderingDataDelivered = true; 790 if (mPaused) { 791 return; 792 } 793 mVideoRenderingStartGeneration = -1; 794 mAudioRenderingStartGeneration = -1; 795 796 sp<AMessage> notify = mNotify->dup(); 797 notify->setInt32("what", kWhatMediaRenderingStart); 798 notify->post(); 799 } 800} 801 802// static 803size_t NuPlayer::Renderer::AudioSinkCallback( 804 MediaPlayerBase::AudioSink * /* audioSink */, 805 void *buffer, 806 size_t size, 807 void *cookie, 808 MediaPlayerBase::AudioSink::cb_event_t event) { 809 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 810 811 switch (event) { 812 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 813 { 814 return me->fillAudioBuffer(buffer, size); 815 break; 816 } 817 818 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 819 { 820 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 821 me->notifyEOSCallback(); 822 break; 823 } 824 825 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 826 { 827 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 828 me->notifyAudioTearDown(kDueToError); 829 break; 830 } 831 } 832 833 return 0; 834} 835 836void NuPlayer::Renderer::notifyEOSCallback() { 837 Mutex::Autolock autoLock(mLock); 838 839 if (!mUseAudioCallback) { 840 return; 841 } 842 843 notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 844} 845 846size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 847 Mutex::Autolock autoLock(mLock); 848 849 if (!mUseAudioCallback) { 850 return 0; 851 } 852 853 bool hasEOS = false; 854 855 size_t sizeCopied = 0; 856 bool firstEntry = true; 857 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 858 while (sizeCopied < size && !mAudioQueue.empty()) { 859 entry = &*mAudioQueue.begin(); 860 861 if (entry->mBuffer == NULL) { // EOS 862 hasEOS = true; 863 mAudioQueue.erase(mAudioQueue.begin()); 864 break; 865 } 866 867 if (firstEntry && entry->mOffset == 0) { 868 firstEntry = false; 869 int64_t mediaTimeUs; 870 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 871 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 872 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 873 } 874 875 size_t copy = entry->mBuffer->size() - entry->mOffset; 876 size_t sizeRemaining = size - sizeCopied; 877 if (copy > sizeRemaining) { 878 copy = sizeRemaining; 879 } 880 881 memcpy((char *)buffer + sizeCopied, 882 entry->mBuffer->data() + entry->mOffset, 883 copy); 884 885 entry->mOffset += copy; 886 if (entry->mOffset == entry->mBuffer->size()) { 887 entry->mNotifyConsumed->post(); 888 mAudioQueue.erase(mAudioQueue.begin()); 889 entry = NULL; 890 } 891 sizeCopied += copy; 892 893 notifyIfMediaRenderingStarted_l(); 894 } 895 896 if (mAudioFirstAnchorTimeMediaUs >= 0) { 897 int64_t nowUs = ALooper::GetNowUs(); 898 int64_t nowMediaUs = 899 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); 900 // we don't know how much data we are queueing for offloaded tracks. 901 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 902 } 903 904 // for non-offloaded audio, we need to compute the frames written because 905 // there is no EVENT_STREAM_END notification. The frames written gives 906 // an estimate on the pending played out duration. 907 if (!offloadingAudio()) { 908 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 909 } 910 911 if (hasEOS) { 912 (new AMessage(kWhatStopAudioSink, this))->post(); 913 // As there is currently no EVENT_STREAM_END callback notification for 914 // non-offloaded audio tracks, we need to post the EOS ourselves. 915 if (!offloadingAudio()) { 916 int64_t postEOSDelayUs = 0; 917 if (mAudioSink->needsTrailingPadding()) { 918 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 919 } 920 ALOGV("fillAudioBuffer: notifyEOS " 921 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 922 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 923 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 924 } 925 } 926 return sizeCopied; 927} 928 929void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 930 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 931 bool foundEOS = false; 932 while (it != mAudioQueue.end()) { 933 int32_t eos; 934 QueueEntry *entry = &*it++; 935 if ((entry->mBuffer == nullptr && entry->mNotifyConsumed == nullptr) 936 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 937 itEOS = it; 938 foundEOS = true; 939 } 940 } 941 942 if (foundEOS) { 943 // post all replies before EOS and drop the samples 944 for (it = mAudioQueue.begin(); it != itEOS; it++) { 945 if (it->mBuffer == nullptr) { 946 if (it->mNotifyConsumed == nullptr) { 947 // delay doesn't matter as we don't even have an AudioTrack 948 notifyEOS(true /* audio */, it->mFinalResult); 949 } else { 950 // TAG for re-opening audio sink. 951 onChangeAudioFormat(it->mMeta, it->mNotifyConsumed); 952 } 953 } else { 954 it->mNotifyConsumed->post(); 955 } 956 } 957 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 958 } 959} 960 961bool NuPlayer::Renderer::onDrainAudioQueue() { 962 // do not drain audio during teardown as queued buffers may be invalid. 963 if (mAudioTornDown) { 964 return false; 965 } 966 // TODO: This call to getPosition checks if AudioTrack has been created 967 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 968 // CHECKs on getPosition will fail. 969 // We still need to figure out why AudioTrack is not created when 970 // this function is called. One possible reason could be leftover 971 // audio. Another possible place is to check whether decoder 972 // has received INFO_FORMAT_CHANGED as the first buffer since 973 // AudioSink is opened there, and possible interactions with flush 974 // immediately after start. Investigate error message 975 // "vorbis_dsp_synthesis returned -135", along with RTSP. 976 uint32_t numFramesPlayed; 977 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 978 // When getPosition fails, renderer will not reschedule the draining 979 // unless new samples are queued. 980 // If we have pending EOS (or "eos" marker for discontinuities), we need 981 // to post these now as NuPlayerDecoder might be waiting for it. 982 drainAudioQueueUntilLastEOS(); 983 984 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 985 return false; 986 } 987 988#if 0 989 ssize_t numFramesAvailableToWrite = 990 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 991 992 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 993 ALOGI("audio sink underrun"); 994 } else { 995 ALOGV("audio queue has %d frames left to play", 996 mAudioSink->frameCount() - numFramesAvailableToWrite); 997 } 998#endif 999 1000 uint32_t prevFramesWritten = mNumFramesWritten; 1001 while (!mAudioQueue.empty()) { 1002 QueueEntry *entry = &*mAudioQueue.begin(); 1003 1004 if (entry->mBuffer == NULL) { 1005 if (entry->mNotifyConsumed != nullptr) { 1006 // TAG for re-open audio sink. 1007 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed); 1008 mAudioQueue.erase(mAudioQueue.begin()); 1009 continue; 1010 } 1011 1012 // EOS 1013 int64_t postEOSDelayUs = 0; 1014 if (mAudioSink->needsTrailingPadding()) { 1015 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 1016 } 1017 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 1018 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1019 1020 mAudioQueue.erase(mAudioQueue.begin()); 1021 entry = NULL; 1022 if (mAudioSink->needsTrailingPadding()) { 1023 // If we're not in gapless playback (i.e. through setNextPlayer), we 1024 // need to stop the track here, because that will play out the last 1025 // little bit at the end of the file. Otherwise short files won't play. 1026 mAudioSink->stop(); 1027 mNumFramesWritten = 0; 1028 } 1029 return false; 1030 } 1031 1032 mLastAudioBufferDrained = entry->mBufferOrdinal; 1033 1034 // ignore 0-sized buffer which could be EOS marker with no data 1035 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 1036 int64_t mediaTimeUs; 1037 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1038 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 1039 mediaTimeUs / 1E6); 1040 onNewAudioMediaTime(mediaTimeUs); 1041 } 1042 1043 size_t copy = entry->mBuffer->size() - entry->mOffset; 1044 1045 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 1046 copy, false /* blocking */); 1047 if (written < 0) { 1048 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 1049 if (written == WOULD_BLOCK) { 1050 ALOGV("AudioSink write would block when writing %zu bytes", copy); 1051 } else { 1052 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 1053 // This can only happen when AudioSink was opened with doNotReconnect flag set to 1054 // true, in which case the NuPlayer will handle the reconnect. 1055 notifyAudioTearDown(kDueToError); 1056 } 1057 break; 1058 } 1059 1060 entry->mOffset += written; 1061 size_t remainder = entry->mBuffer->size() - entry->mOffset; 1062 if ((ssize_t)remainder < mAudioSink->frameSize()) { 1063 if (remainder > 0) { 1064 ALOGW("Corrupted audio buffer has fractional frames, discarding %zu bytes.", 1065 remainder); 1066 entry->mOffset += remainder; 1067 copy -= remainder; 1068 } 1069 1070 entry->mNotifyConsumed->post(); 1071 mAudioQueue.erase(mAudioQueue.begin()); 1072 1073 entry = NULL; 1074 } 1075 1076 size_t copiedFrames = written / mAudioSink->frameSize(); 1077 mNumFramesWritten += copiedFrames; 1078 1079 { 1080 Mutex::Autolock autoLock(mLock); 1081 int64_t maxTimeMedia; 1082 maxTimeMedia = 1083 mAnchorTimeMediaUs + 1084 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 1085 * 1000LL * mAudioSink->msecsPerFrame()); 1086 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 1087 1088 notifyIfMediaRenderingStarted_l(); 1089 } 1090 1091 if (written != (ssize_t)copy) { 1092 // A short count was received from AudioSink::write() 1093 // 1094 // AudioSink write is called in non-blocking mode. 1095 // It may return with a short count when: 1096 // 1097 // 1) Size to be copied is not a multiple of the frame size. Fractional frames are 1098 // discarded. 1099 // 2) The data to be copied exceeds the available buffer in AudioSink. 1100 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 1101 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 1102 1103 // (Case 1) 1104 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 1105 // needs to fail, as we should not carry over fractional frames between calls. 1106 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 1107 1108 // (Case 2, 3, 4) 1109 // Return early to the caller. 1110 // Beware of calling immediately again as this may busy-loop if you are not careful. 1111 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 1112 break; 1113 } 1114 } 1115 1116 // calculate whether we need to reschedule another write. 1117 bool reschedule = !mAudioQueue.empty() 1118 && (!mPaused 1119 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 1120 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 1121 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 1122 return reschedule; 1123} 1124 1125int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 1126 int32_t sampleRate = offloadingAudio() ? 1127 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 1128 if (sampleRate == 0) { 1129 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 1130 return 0; 1131 } 1132 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 1133 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 1134} 1135 1136// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 1137int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 1138 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1139 if (mUseVirtualAudioSink) { 1140 int64_t nowUs = ALooper::GetNowUs(); 1141 int64_t mediaUs; 1142 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) { 1143 return 0ll; 1144 } else { 1145 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs); 1146 } 1147 } 1148 return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs); 1149} 1150 1151int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 1152 int64_t realUs; 1153 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 1154 // If failed to get current position, e.g. due to audio clock is 1155 // not ready, then just play out video immediately without delay. 1156 return nowUs; 1157 } 1158 return realUs; 1159} 1160 1161void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 1162 Mutex::Autolock autoLock(mLock); 1163 // TRICKY: vorbis decoder generates multiple frames with the same 1164 // timestamp, so only update on the first frame with a given timestamp 1165 if (mediaTimeUs == mAnchorTimeMediaUs) { 1166 return; 1167 } 1168 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 1169 1170 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start 1171 if (mNextAudioClockUpdateTimeUs == -1) { 1172 AudioTimestamp ts; 1173 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { 1174 mNextAudioClockUpdateTimeUs = 0; // start our clock updates 1175 } 1176 } 1177 int64_t nowUs = ALooper::GetNowUs(); 1178 if (mNextAudioClockUpdateTimeUs >= 0) { 1179 if (nowUs >= mNextAudioClockUpdateTimeUs) { 1180 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 1181 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 1182 mUseVirtualAudioSink = false; 1183 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; 1184 } 1185 } else { 1186 int64_t unused; 1187 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) 1188 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) 1189 > kMaxAllowedAudioSinkDelayUs)) { 1190 // Enough data has been sent to AudioSink, but AudioSink has not rendered 1191 // any data yet. Something is wrong with AudioSink, e.g., the device is not 1192 // connected to audio out. 1193 // Switch to system clock. This essentially creates a virtual AudioSink with 1194 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). 1195 // This virtual AudioSink renders audio data starting from the very first sample 1196 // and it's paced by system clock. 1197 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); 1198 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); 1199 mUseVirtualAudioSink = true; 1200 } 1201 } 1202 mAnchorNumFramesWritten = mNumFramesWritten; 1203 mAnchorTimeMediaUs = mediaTimeUs; 1204} 1205 1206// Called without mLock acquired. 1207void NuPlayer::Renderer::postDrainVideoQueue() { 1208 if (mDrainVideoQueuePending 1209 || getSyncQueues() 1210 || (mPaused && mVideoSampleReceived)) { 1211 return; 1212 } 1213 1214 if (mVideoQueue.empty()) { 1215 return; 1216 } 1217 1218 QueueEntry &entry = *mVideoQueue.begin(); 1219 1220 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1221 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1222 1223 if (entry.mBuffer == NULL) { 1224 // EOS doesn't carry a timestamp. 1225 msg->post(); 1226 mDrainVideoQueuePending = true; 1227 return; 1228 } 1229 1230 bool needRepostDrainVideoQueue = false; 1231 int64_t delayUs; 1232 int64_t nowUs = ALooper::GetNowUs(); 1233 int64_t realTimeUs; 1234 if (mFlags & FLAG_REAL_TIME) { 1235 int64_t mediaTimeUs; 1236 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1237 realTimeUs = mediaTimeUs; 1238 } else { 1239 int64_t mediaTimeUs; 1240 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1241 1242 { 1243 Mutex::Autolock autoLock(mLock); 1244 if (mAnchorTimeMediaUs < 0) { 1245 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1246 mAnchorTimeMediaUs = mediaTimeUs; 1247 realTimeUs = nowUs; 1248 } else if (!mVideoSampleReceived) { 1249 // Always render the first video frame. 1250 realTimeUs = nowUs; 1251 } else if (mAudioFirstAnchorTimeMediaUs < 0 1252 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) { 1253 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1254 } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) { 1255 needRepostDrainVideoQueue = true; 1256 realTimeUs = nowUs; 1257 } else { 1258 realTimeUs = nowUs; 1259 } 1260 } 1261 if (!mHasAudio) { 1262 // smooth out videos >= 10fps 1263 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1264 } 1265 1266 // Heuristics to handle situation when media time changed without a 1267 // discontinuity. If we have not drained an audio buffer that was 1268 // received after this buffer, repost in 10 msec. Otherwise repost 1269 // in 500 msec. 1270 delayUs = realTimeUs - nowUs; 1271 int64_t postDelayUs = -1; 1272 if (delayUs > 500000) { 1273 postDelayUs = 500000; 1274 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1275 postDelayUs = 10000; 1276 } 1277 } else if (needRepostDrainVideoQueue) { 1278 // CHECK(mPlaybackRate > 0); 1279 // CHECK(mAudioFirstAnchorTimeMediaUs >= 0); 1280 // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0); 1281 postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs; 1282 postDelayUs /= mPlaybackRate; 1283 } 1284 1285 if (postDelayUs >= 0) { 1286 msg->setWhat(kWhatPostDrainVideoQueue); 1287 msg->post(postDelayUs); 1288 mVideoScheduler->restart(); 1289 ALOGI("possible video time jump of %dms (%lld : %lld) or uninitialized media clock," 1290 " retrying in %dms", 1291 (int)(delayUs / 1000), (long long)mediaTimeUs, 1292 (long long)mAudioFirstAnchorTimeMediaUs, (int)(postDelayUs / 1000)); 1293 mDrainVideoQueuePending = true; 1294 return; 1295 } 1296 } 1297 1298 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1299 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1300 1301 delayUs = realTimeUs - nowUs; 1302 1303 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1304 // post 2 display refreshes before rendering is due 1305 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1306 1307 mDrainVideoQueuePending = true; 1308} 1309 1310void NuPlayer::Renderer::onDrainVideoQueue() { 1311 if (mVideoQueue.empty()) { 1312 return; 1313 } 1314 1315 QueueEntry *entry = &*mVideoQueue.begin(); 1316 1317 if (entry->mBuffer == NULL) { 1318 // EOS 1319 1320 notifyEOS(false /* audio */, entry->mFinalResult); 1321 1322 mVideoQueue.erase(mVideoQueue.begin()); 1323 entry = NULL; 1324 1325 setVideoLateByUs(0); 1326 return; 1327 } 1328 1329 int64_t nowUs = ALooper::GetNowUs(); 1330 int64_t realTimeUs; 1331 int64_t mediaTimeUs = -1; 1332 if (mFlags & FLAG_REAL_TIME) { 1333 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1334 } else { 1335 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1336 1337 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1338 } 1339 1340 bool tooLate = false; 1341 1342 if (!mPaused) { 1343 setVideoLateByUs(nowUs - realTimeUs); 1344 tooLate = (mVideoLateByUs > 40000); 1345 1346 if (tooLate) { 1347 ALOGV("video late by %lld us (%.2f secs)", 1348 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1349 } else { 1350 int64_t mediaUs = 0; 1351 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1352 ALOGV("rendering video at media time %.2f secs", 1353 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1354 mediaUs) / 1E6); 1355 1356 if (!(mFlags & FLAG_REAL_TIME) 1357 && mLastAudioMediaTimeUs != -1 1358 && mediaTimeUs > mLastAudioMediaTimeUs) { 1359 // If audio ends before video, video continues to drive media clock. 1360 // Also smooth out videos >= 10fps. 1361 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1362 } 1363 } 1364 } else { 1365 setVideoLateByUs(0); 1366 if (!mVideoSampleReceived && !mHasAudio) { 1367 // This will ensure that the first frame after a flush won't be used as anchor 1368 // when renderer is in paused state, because resume can happen any time after seek. 1369 clearAnchorTime(); 1370 } 1371 } 1372 1373 // Always render the first video frame while keeping stats on A/V sync. 1374 if (!mVideoSampleReceived) { 1375 realTimeUs = nowUs; 1376 tooLate = false; 1377 } 1378 1379 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1380 entry->mNotifyConsumed->setInt32("render", !tooLate); 1381 entry->mNotifyConsumed->post(); 1382 mVideoQueue.erase(mVideoQueue.begin()); 1383 entry = NULL; 1384 1385 mVideoSampleReceived = true; 1386 1387 if (!mPaused) { 1388 if (!mVideoRenderingStarted) { 1389 mVideoRenderingStarted = true; 1390 notifyVideoRenderingStart(); 1391 } 1392 Mutex::Autolock autoLock(mLock); 1393 notifyIfMediaRenderingStarted_l(); 1394 } 1395} 1396 1397void NuPlayer::Renderer::notifyVideoRenderingStart() { 1398 sp<AMessage> notify = mNotify->dup(); 1399 notify->setInt32("what", kWhatVideoRenderingStart); 1400 notify->post(); 1401} 1402 1403void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1404 if (audio && delayUs > 0) { 1405 sp<AMessage> msg = new AMessage(kWhatEOS, this); 1406 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); 1407 msg->setInt32("finalResult", finalResult); 1408 msg->post(delayUs); 1409 return; 1410 } 1411 sp<AMessage> notify = mNotify->dup(); 1412 notify->setInt32("what", kWhatEOS); 1413 notify->setInt32("audio", static_cast<int32_t>(audio)); 1414 notify->setInt32("finalResult", finalResult); 1415 notify->post(delayUs); 1416} 1417 1418void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) { 1419 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this); 1420 msg->setInt32("reason", reason); 1421 msg->post(); 1422} 1423 1424void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1425 int32_t audio; 1426 CHECK(msg->findInt32("audio", &audio)); 1427 1428 if (dropBufferIfStale(audio, msg)) { 1429 return; 1430 } 1431 1432 if (audio) { 1433 mHasAudio = true; 1434 } else { 1435 mHasVideo = true; 1436 } 1437 1438 if (mHasVideo) { 1439 if (mVideoScheduler == NULL) { 1440 mVideoScheduler = new VideoFrameScheduler(); 1441 mVideoScheduler->init(); 1442 } 1443 } 1444 1445 sp<RefBase> obj; 1446 CHECK(msg->findObject("buffer", &obj)); 1447 sp<MediaCodecBuffer> buffer = static_cast<MediaCodecBuffer *>(obj.get()); 1448 1449 sp<AMessage> notifyConsumed; 1450 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1451 1452 QueueEntry entry; 1453 entry.mBuffer = buffer; 1454 entry.mNotifyConsumed = notifyConsumed; 1455 entry.mOffset = 0; 1456 entry.mFinalResult = OK; 1457 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1458 1459 if (audio) { 1460 Mutex::Autolock autoLock(mLock); 1461 mAudioQueue.push_back(entry); 1462 postDrainAudioQueue_l(); 1463 } else { 1464 mVideoQueue.push_back(entry); 1465 postDrainVideoQueue(); 1466 } 1467 1468 Mutex::Autolock autoLock(mLock); 1469 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1470 return; 1471 } 1472 1473 sp<MediaCodecBuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1474 sp<MediaCodecBuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1475 1476 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1477 // EOS signalled on either queue. 1478 syncQueuesDone_l(); 1479 return; 1480 } 1481 1482 int64_t firstAudioTimeUs; 1483 int64_t firstVideoTimeUs; 1484 CHECK(firstAudioBuffer->meta() 1485 ->findInt64("timeUs", &firstAudioTimeUs)); 1486 CHECK(firstVideoBuffer->meta() 1487 ->findInt64("timeUs", &firstVideoTimeUs)); 1488 1489 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1490 1491 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1492 1493 if (diff > 100000ll) { 1494 // Audio data starts More than 0.1 secs before video. 1495 // Drop some audio. 1496 1497 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1498 mAudioQueue.erase(mAudioQueue.begin()); 1499 return; 1500 } 1501 1502 syncQueuesDone_l(); 1503} 1504 1505void NuPlayer::Renderer::syncQueuesDone_l() { 1506 if (!mSyncQueues) { 1507 return; 1508 } 1509 1510 mSyncQueues = false; 1511 1512 if (!mAudioQueue.empty()) { 1513 postDrainAudioQueue_l(); 1514 } 1515 1516 if (!mVideoQueue.empty()) { 1517 mLock.unlock(); 1518 postDrainVideoQueue(); 1519 mLock.lock(); 1520 } 1521} 1522 1523void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1524 int32_t audio; 1525 CHECK(msg->findInt32("audio", &audio)); 1526 1527 if (dropBufferIfStale(audio, msg)) { 1528 return; 1529 } 1530 1531 int32_t finalResult; 1532 CHECK(msg->findInt32("finalResult", &finalResult)); 1533 1534 QueueEntry entry; 1535 entry.mOffset = 0; 1536 entry.mFinalResult = finalResult; 1537 1538 if (audio) { 1539 Mutex::Autolock autoLock(mLock); 1540 if (mAudioQueue.empty() && mSyncQueues) { 1541 syncQueuesDone_l(); 1542 } 1543 mAudioQueue.push_back(entry); 1544 postDrainAudioQueue_l(); 1545 } else { 1546 if (mVideoQueue.empty() && getSyncQueues()) { 1547 Mutex::Autolock autoLock(mLock); 1548 syncQueuesDone_l(); 1549 } 1550 mVideoQueue.push_back(entry); 1551 postDrainVideoQueue(); 1552 } 1553} 1554 1555void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1556 int32_t audio, notifyComplete; 1557 CHECK(msg->findInt32("audio", &audio)); 1558 1559 { 1560 Mutex::Autolock autoLock(mLock); 1561 if (audio) { 1562 notifyComplete = mNotifyCompleteAudio; 1563 mNotifyCompleteAudio = false; 1564 mLastAudioMediaTimeUs = -1; 1565 } else { 1566 notifyComplete = mNotifyCompleteVideo; 1567 mNotifyCompleteVideo = false; 1568 } 1569 1570 // If we're currently syncing the queues, i.e. dropping audio while 1571 // aligning the first audio/video buffer times and only one of the 1572 // two queues has data, we may starve that queue by not requesting 1573 // more buffers from the decoder. If the other source then encounters 1574 // a discontinuity that leads to flushing, we'll never find the 1575 // corresponding discontinuity on the other queue. 1576 // Therefore we'll stop syncing the queues if at least one of them 1577 // is flushed. 1578 syncQueuesDone_l(); 1579 } 1580 clearAnchorTime(); 1581 1582 ALOGV("flushing %s", audio ? "audio" : "video"); 1583 if (audio) { 1584 { 1585 Mutex::Autolock autoLock(mLock); 1586 flushQueue(&mAudioQueue); 1587 1588 ++mAudioDrainGeneration; 1589 ++mAudioEOSGeneration; 1590 prepareForMediaRenderingStart_l(); 1591 1592 // the frame count will be reset after flush. 1593 clearAudioFirstAnchorTime_l(); 1594 } 1595 1596 mDrainAudioQueuePending = false; 1597 1598 if (offloadingAudio()) { 1599 mAudioSink->pause(); 1600 mAudioSink->flush(); 1601 if (!mPaused) { 1602 mAudioSink->start(); 1603 } 1604 } else { 1605 mAudioSink->pause(); 1606 mAudioSink->flush(); 1607 // Call stop() to signal to the AudioSink to completely fill the 1608 // internal buffer before resuming playback. 1609 // FIXME: this is ignored after flush(). 1610 mAudioSink->stop(); 1611 if (mPaused) { 1612 // Race condition: if renderer is paused and audio sink is stopped, 1613 // we need to make sure that the audio track buffer fully drains 1614 // before delivering data. 1615 // FIXME: remove this if we can detect if stop() is complete. 1616 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1617 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1618 } else { 1619 mAudioSink->start(); 1620 } 1621 mNumFramesWritten = 0; 1622 } 1623 mNextAudioClockUpdateTimeUs = -1; 1624 } else { 1625 flushQueue(&mVideoQueue); 1626 1627 mDrainVideoQueuePending = false; 1628 1629 if (mVideoScheduler != NULL) { 1630 mVideoScheduler->restart(); 1631 } 1632 1633 Mutex::Autolock autoLock(mLock); 1634 ++mVideoDrainGeneration; 1635 prepareForMediaRenderingStart_l(); 1636 } 1637 1638 mVideoSampleReceived = false; 1639 1640 if (notifyComplete) { 1641 notifyFlushComplete(audio); 1642 } 1643} 1644 1645void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1646 while (!queue->empty()) { 1647 QueueEntry *entry = &*queue->begin(); 1648 1649 if (entry->mBuffer != NULL) { 1650 entry->mNotifyConsumed->post(); 1651 } else if (entry->mNotifyConsumed != nullptr) { 1652 // Is it needed to open audio sink now? 1653 onChangeAudioFormat(entry->mMeta, entry->mNotifyConsumed); 1654 } 1655 1656 queue->erase(queue->begin()); 1657 entry = NULL; 1658 } 1659} 1660 1661void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1662 sp<AMessage> notify = mNotify->dup(); 1663 notify->setInt32("what", kWhatFlushComplete); 1664 notify->setInt32("audio", static_cast<int32_t>(audio)); 1665 notify->post(); 1666} 1667 1668bool NuPlayer::Renderer::dropBufferIfStale( 1669 bool audio, const sp<AMessage> &msg) { 1670 int32_t queueGeneration; 1671 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1672 1673 if (queueGeneration == getQueueGeneration(audio)) { 1674 return false; 1675 } 1676 1677 sp<AMessage> notifyConsumed; 1678 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1679 notifyConsumed->post(); 1680 } 1681 1682 return true; 1683} 1684 1685void NuPlayer::Renderer::onAudioSinkChanged() { 1686 if (offloadingAudio()) { 1687 return; 1688 } 1689 CHECK(!mDrainAudioQueuePending); 1690 mNumFramesWritten = 0; 1691 mAnchorNumFramesWritten = -1; 1692 uint32_t written; 1693 if (mAudioSink->getFramesWritten(&written) == OK) { 1694 mNumFramesWritten = written; 1695 } 1696} 1697 1698void NuPlayer::Renderer::onDisableOffloadAudio() { 1699 Mutex::Autolock autoLock(mLock); 1700 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1701 ++mAudioDrainGeneration; 1702 if (mAudioRenderingStartGeneration != -1) { 1703 prepareForMediaRenderingStart_l(); 1704 } 1705} 1706 1707void NuPlayer::Renderer::onEnableOffloadAudio() { 1708 Mutex::Autolock autoLock(mLock); 1709 mFlags |= FLAG_OFFLOAD_AUDIO; 1710 ++mAudioDrainGeneration; 1711 if (mAudioRenderingStartGeneration != -1) { 1712 prepareForMediaRenderingStart_l(); 1713 } 1714} 1715 1716void NuPlayer::Renderer::onPause() { 1717 if (mPaused) { 1718 return; 1719 } 1720 1721 { 1722 Mutex::Autolock autoLock(mLock); 1723 // we do not increment audio drain generation so that we fill audio buffer during pause. 1724 ++mVideoDrainGeneration; 1725 prepareForMediaRenderingStart_l(); 1726 mPaused = true; 1727 mMediaClock->setPlaybackRate(0.0); 1728 } 1729 1730 mDrainAudioQueuePending = false; 1731 mDrainVideoQueuePending = false; 1732 1733 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1734 mAudioSink->pause(); 1735 startAudioOffloadPauseTimeout(); 1736 1737 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1738 mAudioQueue.size(), mVideoQueue.size()); 1739} 1740 1741void NuPlayer::Renderer::onResume() { 1742 if (!mPaused) { 1743 return; 1744 } 1745 1746 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1747 cancelAudioOffloadPauseTimeout(); 1748 if (mAudioSink->ready()) { 1749 status_t err = mAudioSink->start(); 1750 if (err != OK) { 1751 ALOGE("cannot start AudioSink err %d", err); 1752 notifyAudioTearDown(kDueToError); 1753 } 1754 } 1755 1756 { 1757 Mutex::Autolock autoLock(mLock); 1758 mPaused = false; 1759 // rendering started message may have been delayed if we were paused. 1760 if (mRenderingDataDelivered) { 1761 notifyIfMediaRenderingStarted_l(); 1762 } 1763 // configure audiosink as we did not do it when pausing 1764 if (mAudioSink != NULL && mAudioSink->ready()) { 1765 mAudioSink->setPlaybackRate(mPlaybackSettings); 1766 } 1767 1768 mMediaClock->setPlaybackRate(mPlaybackRate); 1769 1770 if (!mAudioQueue.empty()) { 1771 postDrainAudioQueue_l(); 1772 } 1773 } 1774 1775 if (!mVideoQueue.empty()) { 1776 postDrainVideoQueue(); 1777 } 1778} 1779 1780void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1781 if (mVideoScheduler == NULL) { 1782 mVideoScheduler = new VideoFrameScheduler(); 1783 } 1784 mVideoScheduler->init(fps); 1785} 1786 1787int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1788 Mutex::Autolock autoLock(mLock); 1789 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1790} 1791 1792int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1793 Mutex::Autolock autoLock(mLock); 1794 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1795} 1796 1797bool NuPlayer::Renderer::getSyncQueues() { 1798 Mutex::Autolock autoLock(mLock); 1799 return mSyncQueues; 1800} 1801 1802void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1803 if (mAudioTornDown) { 1804 return; 1805 } 1806 mAudioTornDown = true; 1807 1808 int64_t currentPositionUs; 1809 sp<AMessage> notify = mNotify->dup(); 1810 if (getCurrentPosition(¤tPositionUs) == OK) { 1811 notify->setInt64("positionUs", currentPositionUs); 1812 } 1813 1814 mAudioSink->stop(); 1815 mAudioSink->flush(); 1816 1817 notify->setInt32("what", kWhatAudioTearDown); 1818 notify->setInt32("reason", reason); 1819 notify->post(); 1820} 1821 1822void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1823 if (offloadingAudio()) { 1824 mWakeLock->acquire(); 1825 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1826 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1827 msg->post(kOffloadPauseMaxUs); 1828 } 1829} 1830 1831void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1832 // We may have called startAudioOffloadPauseTimeout() without 1833 // the AudioSink open and with offloadingAudio enabled. 1834 // 1835 // When we cancel, it may be that offloadingAudio is subsequently disabled, so regardless 1836 // we always release the wakelock and increment the pause timeout generation. 1837 // 1838 // Note: The acquired wakelock prevents the device from suspending 1839 // immediately after offload pause (in case a resume happens shortly thereafter). 1840 mWakeLock->release(true); 1841 ++mAudioOffloadPauseTimeoutGeneration; 1842} 1843 1844status_t NuPlayer::Renderer::onOpenAudioSink( 1845 const sp<AMessage> &format, 1846 bool offloadOnly, 1847 bool hasVideo, 1848 uint32_t flags, 1849 bool isStreaming) { 1850 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1851 offloadOnly, offloadingAudio()); 1852 bool audioSinkChanged = false; 1853 1854 int32_t numChannels; 1855 CHECK(format->findInt32("channel-count", &numChannels)); 1856 1857 int32_t channelMask; 1858 if (!format->findInt32("channel-mask", &channelMask)) { 1859 // signal to the AudioSink to derive the mask from count. 1860 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1861 } 1862 1863 int32_t sampleRate; 1864 CHECK(format->findInt32("sample-rate", &sampleRate)); 1865 1866 if (offloadingAudio()) { 1867 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1868 AString mime; 1869 CHECK(format->findString("mime", &mime)); 1870 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1871 1872 if (err != OK) { 1873 ALOGE("Couldn't map mime \"%s\" to a valid " 1874 "audio_format", mime.c_str()); 1875 onDisableOffloadAudio(); 1876 } else { 1877 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1878 mime.c_str(), audioFormat); 1879 1880 int avgBitRate = -1; 1881 format->findInt32("bitrate", &avgBitRate); 1882 1883 int32_t aacProfile = -1; 1884 if (audioFormat == AUDIO_FORMAT_AAC 1885 && format->findInt32("aac-profile", &aacProfile)) { 1886 // Redefine AAC format as per aac profile 1887 mapAACProfileToAudioFormat( 1888 audioFormat, 1889 aacProfile); 1890 } 1891 1892 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1893 offloadInfo.duration_us = -1; 1894 format->findInt64( 1895 "durationUs", &offloadInfo.duration_us); 1896 offloadInfo.sample_rate = sampleRate; 1897 offloadInfo.channel_mask = channelMask; 1898 offloadInfo.format = audioFormat; 1899 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1900 offloadInfo.bit_rate = avgBitRate; 1901 offloadInfo.has_video = hasVideo; 1902 offloadInfo.is_streaming = isStreaming; 1903 1904 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1905 ALOGV("openAudioSink: no change in offload mode"); 1906 // no change from previous configuration, everything ok. 1907 return OK; 1908 } 1909 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1910 1911 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1912 uint32_t offloadFlags = flags; 1913 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1914 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1915 audioSinkChanged = true; 1916 mAudioSink->close(); 1917 1918 err = mAudioSink->open( 1919 sampleRate, 1920 numChannels, 1921 (audio_channel_mask_t)channelMask, 1922 audioFormat, 1923 0 /* bufferCount - unused */, 1924 &NuPlayer::Renderer::AudioSinkCallback, 1925 this, 1926 (audio_output_flags_t)offloadFlags, 1927 &offloadInfo); 1928 1929 if (err == OK) { 1930 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1931 } 1932 1933 if (err == OK) { 1934 // If the playback is offloaded to h/w, we pass 1935 // the HAL some metadata information. 1936 // We don't want to do this for PCM because it 1937 // will be going through the AudioFlinger mixer 1938 // before reaching the hardware. 1939 // TODO 1940 mCurrentOffloadInfo = offloadInfo; 1941 if (!mPaused) { // for preview mode, don't start if paused 1942 err = mAudioSink->start(); 1943 } 1944 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1945 } 1946 if (err != OK) { 1947 // Clean up, fall back to non offload mode. 1948 mAudioSink->close(); 1949 onDisableOffloadAudio(); 1950 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1951 ALOGV("openAudioSink: offload failed"); 1952 if (offloadOnly) { 1953 notifyAudioTearDown(kForceNonOffload); 1954 } 1955 } else { 1956 mUseAudioCallback = true; // offload mode transfers data through callback 1957 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1958 } 1959 } 1960 } 1961 if (!offloadOnly && !offloadingAudio()) { 1962 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1963 uint32_t pcmFlags = flags; 1964 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1965 1966 const PcmInfo info = { 1967 (audio_channel_mask_t)channelMask, 1968 (audio_output_flags_t)pcmFlags, 1969 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1970 numChannels, 1971 sampleRate 1972 }; 1973 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1974 ALOGV("openAudioSink: no change in pcm mode"); 1975 // no change from previous configuration, everything ok. 1976 return OK; 1977 } 1978 1979 audioSinkChanged = true; 1980 mAudioSink->close(); 1981 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1982 // Note: It is possible to set up the callback, but not use it to send audio data. 1983 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1984 mUseAudioCallback = getUseAudioCallbackSetting(); 1985 if (mUseAudioCallback) { 1986 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1987 } 1988 1989 // Compute the desired buffer size. 1990 // For callback mode, the amount of time before wakeup is about half the buffer size. 1991 const uint32_t frameCount = 1992 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1993 1994 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1995 // AudioSink. We don't want this when there's video because it will cause a video seek to 1996 // the previous I frame. But we do want this when there's only audio because it will give 1997 // NuPlayer a chance to switch from non-offload mode to offload mode. 1998 // So we only set doNotReconnect when there's no video. 1999 const bool doNotReconnect = !hasVideo; 2000 2001 // We should always be able to set our playback settings if the sink is closed. 2002 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK, 2003 "onOpenAudioSink: can't set playback rate on closed sink"); 2004 status_t err = mAudioSink->open( 2005 sampleRate, 2006 numChannels, 2007 (audio_channel_mask_t)channelMask, 2008 AUDIO_FORMAT_PCM_16_BIT, 2009 0 /* bufferCount - unused */, 2010 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 2011 mUseAudioCallback ? this : NULL, 2012 (audio_output_flags_t)pcmFlags, 2013 NULL, 2014 doNotReconnect, 2015 frameCount); 2016 if (err != OK) { 2017 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 2018 mAudioSink->close(); 2019 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 2020 return err; 2021 } 2022 mCurrentPcmInfo = info; 2023 if (!mPaused) { // for preview mode, don't start if paused 2024 mAudioSink->start(); 2025 } 2026 } 2027 if (audioSinkChanged) { 2028 onAudioSinkChanged(); 2029 } 2030 mAudioTornDown = false; 2031 return OK; 2032} 2033 2034void NuPlayer::Renderer::onCloseAudioSink() { 2035 mAudioSink->close(); 2036 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 2037 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 2038} 2039 2040void NuPlayer::Renderer::onChangeAudioFormat( 2041 const sp<AMessage> &meta, const sp<AMessage> ¬ify) { 2042 sp<AMessage> format; 2043 CHECK(meta->findMessage("format", &format)); 2044 2045 int32_t offloadOnly; 2046 CHECK(meta->findInt32("offload-only", &offloadOnly)); 2047 2048 int32_t hasVideo; 2049 CHECK(meta->findInt32("has-video", &hasVideo)); 2050 2051 uint32_t flags; 2052 CHECK(meta->findInt32("flags", (int32_t *)&flags)); 2053 2054 uint32_t isStreaming; 2055 CHECK(meta->findInt32("isStreaming", (int32_t *)&isStreaming)); 2056 2057 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags, isStreaming); 2058 2059 if (err != OK) { 2060 notify->setInt32("err", err); 2061 } 2062 notify->post(); 2063} 2064 2065} // namespace android 2066 2067