NuPlayerRenderer.cpp revision ff874dc957f9ea70d87f4d627bf903e1fc86d58b
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <cutils/properties.h> 23#include <media/stagefright/foundation/ABuffer.h> 24#include <media/stagefright/foundation/ADebug.h> 25#include <media/stagefright/foundation/AMessage.h> 26#include <media/stagefright/foundation/AUtils.h> 27#include <media/stagefright/foundation/AWakeLock.h> 28#include <media/stagefright/MediaClock.h> 29#include <media/stagefright/MediaErrors.h> 30#include <media/stagefright/MetaData.h> 31#include <media/stagefright/Utils.h> 32#include <media/stagefright/VideoFrameScheduler.h> 33 34#include <inttypes.h> 35 36namespace android { 37 38/* 39 * Example of common configuration settings in shell script form 40 41 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 42 adb shell setprop audio.offload.disable 1 43 44 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 45 adb shell setprop audio.offload.video 1 46 47 #Use audio callbacks for PCM data 48 adb shell setprop media.stagefright.audio.cbk 1 49 50 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 51 adb shell setprop media.stagefright.audio.deep 1 52 53 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 54 adb shell setprop media.stagefright.audio.sink 1000 55 56 * These configurations take effect for the next track played (not the current track). 57 */ 58 59static inline bool getUseAudioCallbackSetting() { 60 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 61} 62 63static inline int32_t getAudioSinkPcmMsSetting() { 64 return property_get_int32( 65 "media.stagefright.audio.sink", 500 /* default_value */); 66} 67 68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 69// is closed to allow the audio DSP to power down. 70static const int64_t kOffloadPauseMaxUs = 10000000ll; 71 72// Maximum allowed delay from AudioSink, 1.5 seconds. 73static const int64_t kMaxAllowedAudioSinkDelayUs = 1500000ll; 74 75static const int64_t kMinimumAudioClockUpdatePeriodUs = 20 /* msec */ * 1000; 76 77// static 78const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 79 AUDIO_CHANNEL_NONE, 80 AUDIO_OUTPUT_FLAG_NONE, 81 AUDIO_FORMAT_INVALID, 82 0, // mNumChannels 83 0 // mSampleRate 84}; 85 86// static 87const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 88 89NuPlayer::Renderer::Renderer( 90 const sp<MediaPlayerBase::AudioSink> &sink, 91 const sp<AMessage> ¬ify, 92 uint32_t flags) 93 : mAudioSink(sink), 94 mUseVirtualAudioSink(false), 95 mNotify(notify), 96 mFlags(flags), 97 mNumFramesWritten(0), 98 mDrainAudioQueuePending(false), 99 mDrainVideoQueuePending(false), 100 mAudioQueueGeneration(0), 101 mVideoQueueGeneration(0), 102 mAudioDrainGeneration(0), 103 mVideoDrainGeneration(0), 104 mAudioEOSGeneration(0), 105 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 106 mAudioFirstAnchorTimeMediaUs(-1), 107 mAnchorTimeMediaUs(-1), 108 mAnchorNumFramesWritten(-1), 109 mVideoLateByUs(0ll), 110 mHasAudio(false), 111 mHasVideo(false), 112 mNotifyCompleteAudio(false), 113 mNotifyCompleteVideo(false), 114 mSyncQueues(false), 115 mPaused(false), 116 mPauseDrainAudioAllowedUs(0), 117 mVideoSampleReceived(false), 118 mVideoRenderingStarted(false), 119 mVideoRenderingStartGeneration(0), 120 mAudioRenderingStartGeneration(0), 121 mRenderingDataDelivered(false), 122 mNextAudioClockUpdateTimeUs(-1), 123 mLastAudioMediaTimeUs(-1), 124 mAudioOffloadPauseTimeoutGeneration(0), 125 mAudioTornDown(false), 126 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 127 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 128 mTotalBuffersQueued(0), 129 mLastAudioBufferDrained(0), 130 mUseAudioCallback(false), 131 mWakeLock(new AWakeLock()) { 132 mMediaClock = new MediaClock; 133 mPlaybackRate = mPlaybackSettings.mSpeed; 134 mMediaClock->setPlaybackRate(mPlaybackRate); 135} 136 137NuPlayer::Renderer::~Renderer() { 138 if (offloadingAudio()) { 139 mAudioSink->stop(); 140 mAudioSink->flush(); 141 mAudioSink->close(); 142 } 143} 144 145void NuPlayer::Renderer::queueBuffer( 146 bool audio, 147 const sp<ABuffer> &buffer, 148 const sp<AMessage> ¬ifyConsumed) { 149 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 150 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 151 msg->setInt32("audio", static_cast<int32_t>(audio)); 152 msg->setBuffer("buffer", buffer); 153 msg->setMessage("notifyConsumed", notifyConsumed); 154 msg->post(); 155} 156 157void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 158 CHECK_NE(finalResult, (status_t)OK); 159 160 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 161 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 162 msg->setInt32("audio", static_cast<int32_t>(audio)); 163 msg->setInt32("finalResult", finalResult); 164 msg->post(); 165} 166 167status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 168 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 169 writeToAMessage(msg, rate); 170 sp<AMessage> response; 171 status_t err = msg->postAndAwaitResponse(&response); 172 if (err == OK && response != NULL) { 173 CHECK(response->findInt32("err", &err)); 174 } 175 return err; 176} 177 178status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 179 if (rate.mSpeed == 0.f) { 180 onPause(); 181 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 182 // have to correspond to the any non-0 speed (e.g old speed). Keep 183 // settings nonetheless, using the old speed, in case audiosink changes. 184 AudioPlaybackRate newRate = rate; 185 newRate.mSpeed = mPlaybackSettings.mSpeed; 186 mPlaybackSettings = newRate; 187 return OK; 188 } 189 190 if (mAudioSink != NULL && mAudioSink->ready()) { 191 status_t err = mAudioSink->setPlaybackRate(rate); 192 if (err != OK) { 193 return err; 194 } 195 } 196 mPlaybackSettings = rate; 197 mPlaybackRate = rate.mSpeed; 198 mMediaClock->setPlaybackRate(mPlaybackRate); 199 return OK; 200} 201 202status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 203 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 204 sp<AMessage> response; 205 status_t err = msg->postAndAwaitResponse(&response); 206 if (err == OK && response != NULL) { 207 CHECK(response->findInt32("err", &err)); 208 if (err == OK) { 209 readFromAMessage(response, rate); 210 } 211 } 212 return err; 213} 214 215status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 216 if (mAudioSink != NULL && mAudioSink->ready()) { 217 status_t err = mAudioSink->getPlaybackRate(rate); 218 if (err == OK) { 219 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 220 ALOGW("correcting mismatch in internal/external playback rate"); 221 } 222 // get playback settings used by audiosink, as it may be 223 // slightly off due to audiosink not taking small changes. 224 mPlaybackSettings = *rate; 225 if (mPaused) { 226 rate->mSpeed = 0.f; 227 } 228 } 229 return err; 230 } 231 *rate = mPlaybackSettings; 232 return OK; 233} 234 235status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 236 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 237 writeToAMessage(msg, sync, videoFpsHint); 238 sp<AMessage> response; 239 status_t err = msg->postAndAwaitResponse(&response); 240 if (err == OK && response != NULL) { 241 CHECK(response->findInt32("err", &err)); 242 } 243 return err; 244} 245 246status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 247 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 248 return BAD_VALUE; 249 } 250 // TODO: support sync sources 251 return INVALID_OPERATION; 252} 253 254status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 255 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 256 sp<AMessage> response; 257 status_t err = msg->postAndAwaitResponse(&response); 258 if (err == OK && response != NULL) { 259 CHECK(response->findInt32("err", &err)); 260 if (err == OK) { 261 readFromAMessage(response, sync, videoFps); 262 } 263 } 264 return err; 265} 266 267status_t NuPlayer::Renderer::onGetSyncSettings( 268 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 269 *sync = mSyncSettings; 270 *videoFps = -1.f; 271 return OK; 272} 273 274void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 275 { 276 Mutex::Autolock autoLock(mLock); 277 if (audio) { 278 mNotifyCompleteAudio |= notifyComplete; 279 clearAudioFirstAnchorTime_l(); 280 ++mAudioQueueGeneration; 281 ++mAudioDrainGeneration; 282 } else { 283 mNotifyCompleteVideo |= notifyComplete; 284 ++mVideoQueueGeneration; 285 ++mVideoDrainGeneration; 286 } 287 288 clearAnchorTime_l(); 289 mVideoLateByUs = 0; 290 mSyncQueues = false; 291 } 292 293 sp<AMessage> msg = new AMessage(kWhatFlush, this); 294 msg->setInt32("audio", static_cast<int32_t>(audio)); 295 msg->post(); 296} 297 298void NuPlayer::Renderer::signalTimeDiscontinuity() { 299} 300 301void NuPlayer::Renderer::signalDisableOffloadAudio() { 302 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 303} 304 305void NuPlayer::Renderer::signalEnableOffloadAudio() { 306 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 307} 308 309void NuPlayer::Renderer::pause() { 310 (new AMessage(kWhatPause, this))->post(); 311} 312 313void NuPlayer::Renderer::resume() { 314 (new AMessage(kWhatResume, this))->post(); 315} 316 317void NuPlayer::Renderer::setVideoFrameRate(float fps) { 318 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 319 msg->setFloat("frame-rate", fps); 320 msg->post(); 321} 322 323// Called on any threads without mLock acquired. 324status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 325 status_t result = mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 326 if (result == OK) { 327 return result; 328 } 329 330 // MediaClock has not started yet. Try to start it if possible. 331 { 332 Mutex::Autolock autoLock(mLock); 333 if (mAudioFirstAnchorTimeMediaUs == -1) { 334 return result; 335 } 336 337 AudioTimestamp ts; 338 status_t res = mAudioSink->getTimestamp(ts); 339 if (res != OK) { 340 return result; 341 } 342 343 // AudioSink has rendered some frames. 344 int64_t nowUs = ALooper::GetNowUs(); 345 int64_t nowMediaUs = mAudioSink->getPlayedOutDurationUs(nowUs) 346 + mAudioFirstAnchorTimeMediaUs; 347 mMediaClock->updateAnchor(nowMediaUs, nowUs, -1); 348 } 349 350 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 351} 352 353void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 354 mAudioFirstAnchorTimeMediaUs = -1; 355 mMediaClock->setStartingTimeMedia(-1); 356} 357 358void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 359 if (mAudioFirstAnchorTimeMediaUs == -1) { 360 mAudioFirstAnchorTimeMediaUs = mediaUs; 361 mMediaClock->setStartingTimeMedia(mediaUs); 362 } 363} 364 365void NuPlayer::Renderer::clearAnchorTime_l() { 366 mMediaClock->clearAnchor(); 367 mAnchorTimeMediaUs = -1; 368 mAnchorNumFramesWritten = -1; 369} 370 371void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 372 Mutex::Autolock autoLock(mLock); 373 mVideoLateByUs = lateUs; 374} 375 376int64_t NuPlayer::Renderer::getVideoLateByUs() { 377 Mutex::Autolock autoLock(mLock); 378 return mVideoLateByUs; 379} 380 381status_t NuPlayer::Renderer::openAudioSink( 382 const sp<AMessage> &format, 383 bool offloadOnly, 384 bool hasVideo, 385 uint32_t flags, 386 bool *isOffloaded) { 387 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 388 msg->setMessage("format", format); 389 msg->setInt32("offload-only", offloadOnly); 390 msg->setInt32("has-video", hasVideo); 391 msg->setInt32("flags", flags); 392 393 sp<AMessage> response; 394 msg->postAndAwaitResponse(&response); 395 396 int32_t err; 397 if (!response->findInt32("err", &err)) { 398 err = INVALID_OPERATION; 399 } else if (err == OK && isOffloaded != NULL) { 400 int32_t offload; 401 CHECK(response->findInt32("offload", &offload)); 402 *isOffloaded = (offload != 0); 403 } 404 return err; 405} 406 407void NuPlayer::Renderer::closeAudioSink() { 408 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 409 410 sp<AMessage> response; 411 msg->postAndAwaitResponse(&response); 412} 413 414void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 415 switch (msg->what()) { 416 case kWhatOpenAudioSink: 417 { 418 sp<AMessage> format; 419 CHECK(msg->findMessage("format", &format)); 420 421 int32_t offloadOnly; 422 CHECK(msg->findInt32("offload-only", &offloadOnly)); 423 424 int32_t hasVideo; 425 CHECK(msg->findInt32("has-video", &hasVideo)); 426 427 uint32_t flags; 428 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 429 430 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags); 431 432 sp<AMessage> response = new AMessage; 433 response->setInt32("err", err); 434 response->setInt32("offload", offloadingAudio()); 435 436 sp<AReplyToken> replyID; 437 CHECK(msg->senderAwaitsResponse(&replyID)); 438 response->postReply(replyID); 439 440 break; 441 } 442 443 case kWhatCloseAudioSink: 444 { 445 sp<AReplyToken> replyID; 446 CHECK(msg->senderAwaitsResponse(&replyID)); 447 448 onCloseAudioSink(); 449 450 sp<AMessage> response = new AMessage; 451 response->postReply(replyID); 452 break; 453 } 454 455 case kWhatStopAudioSink: 456 { 457 mAudioSink->stop(); 458 break; 459 } 460 461 case kWhatDrainAudioQueue: 462 { 463 mDrainAudioQueuePending = false; 464 465 int32_t generation; 466 CHECK(msg->findInt32("drainGeneration", &generation)); 467 if (generation != getDrainGeneration(true /* audio */)) { 468 break; 469 } 470 471 if (onDrainAudioQueue()) { 472 uint32_t numFramesPlayed; 473 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 474 (status_t)OK); 475 476 uint32_t numFramesPendingPlayout = 477 mNumFramesWritten - numFramesPlayed; 478 479 // This is how long the audio sink will have data to 480 // play back. 481 int64_t delayUs = 482 mAudioSink->msecsPerFrame() 483 * numFramesPendingPlayout * 1000ll; 484 if (mPlaybackRate > 1.0f) { 485 delayUs /= mPlaybackRate; 486 } 487 488 // Let's give it more data after about half that time 489 // has elapsed. 490 Mutex::Autolock autoLock(mLock); 491 postDrainAudioQueue_l(delayUs / 2); 492 } 493 break; 494 } 495 496 case kWhatDrainVideoQueue: 497 { 498 int32_t generation; 499 CHECK(msg->findInt32("drainGeneration", &generation)); 500 if (generation != getDrainGeneration(false /* audio */)) { 501 break; 502 } 503 504 mDrainVideoQueuePending = false; 505 506 onDrainVideoQueue(); 507 508 postDrainVideoQueue(); 509 break; 510 } 511 512 case kWhatPostDrainVideoQueue: 513 { 514 int32_t generation; 515 CHECK(msg->findInt32("drainGeneration", &generation)); 516 if (generation != getDrainGeneration(false /* audio */)) { 517 break; 518 } 519 520 mDrainVideoQueuePending = false; 521 postDrainVideoQueue(); 522 break; 523 } 524 525 case kWhatQueueBuffer: 526 { 527 onQueueBuffer(msg); 528 break; 529 } 530 531 case kWhatQueueEOS: 532 { 533 onQueueEOS(msg); 534 break; 535 } 536 537 case kWhatEOS: 538 { 539 int32_t generation; 540 CHECK(msg->findInt32("audioEOSGeneration", &generation)); 541 if (generation != mAudioEOSGeneration) { 542 break; 543 } 544 status_t finalResult; 545 CHECK(msg->findInt32("finalResult", &finalResult)); 546 notifyEOS(true /* audio */, finalResult); 547 break; 548 } 549 550 case kWhatConfigPlayback: 551 { 552 sp<AReplyToken> replyID; 553 CHECK(msg->senderAwaitsResponse(&replyID)); 554 AudioPlaybackRate rate; 555 readFromAMessage(msg, &rate); 556 status_t err = onConfigPlayback(rate); 557 sp<AMessage> response = new AMessage; 558 response->setInt32("err", err); 559 response->postReply(replyID); 560 break; 561 } 562 563 case kWhatGetPlaybackSettings: 564 { 565 sp<AReplyToken> replyID; 566 CHECK(msg->senderAwaitsResponse(&replyID)); 567 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 568 status_t err = onGetPlaybackSettings(&rate); 569 sp<AMessage> response = new AMessage; 570 if (err == OK) { 571 writeToAMessage(response, rate); 572 } 573 response->setInt32("err", err); 574 response->postReply(replyID); 575 break; 576 } 577 578 case kWhatConfigSync: 579 { 580 sp<AReplyToken> replyID; 581 CHECK(msg->senderAwaitsResponse(&replyID)); 582 AVSyncSettings sync; 583 float videoFpsHint; 584 readFromAMessage(msg, &sync, &videoFpsHint); 585 status_t err = onConfigSync(sync, videoFpsHint); 586 sp<AMessage> response = new AMessage; 587 response->setInt32("err", err); 588 response->postReply(replyID); 589 break; 590 } 591 592 case kWhatGetSyncSettings: 593 { 594 sp<AReplyToken> replyID; 595 CHECK(msg->senderAwaitsResponse(&replyID)); 596 597 ALOGV("kWhatGetSyncSettings"); 598 AVSyncSettings sync; 599 float videoFps = -1.f; 600 status_t err = onGetSyncSettings(&sync, &videoFps); 601 sp<AMessage> response = new AMessage; 602 if (err == OK) { 603 writeToAMessage(response, sync, videoFps); 604 } 605 response->setInt32("err", err); 606 response->postReply(replyID); 607 break; 608 } 609 610 case kWhatFlush: 611 { 612 onFlush(msg); 613 break; 614 } 615 616 case kWhatDisableOffloadAudio: 617 { 618 onDisableOffloadAudio(); 619 break; 620 } 621 622 case kWhatEnableOffloadAudio: 623 { 624 onEnableOffloadAudio(); 625 break; 626 } 627 628 case kWhatPause: 629 { 630 onPause(); 631 break; 632 } 633 634 case kWhatResume: 635 { 636 onResume(); 637 break; 638 } 639 640 case kWhatSetVideoFrameRate: 641 { 642 float fps; 643 CHECK(msg->findFloat("frame-rate", &fps)); 644 onSetVideoFrameRate(fps); 645 break; 646 } 647 648 case kWhatAudioTearDown: 649 { 650 int32_t reason; 651 CHECK(msg->findInt32("reason", &reason)); 652 653 onAudioTearDown((AudioTearDownReason)reason); 654 break; 655 } 656 657 case kWhatAudioOffloadPauseTimeout: 658 { 659 int32_t generation; 660 CHECK(msg->findInt32("drainGeneration", &generation)); 661 if (generation != mAudioOffloadPauseTimeoutGeneration) { 662 break; 663 } 664 ALOGV("Audio Offload tear down due to pause timeout."); 665 onAudioTearDown(kDueToTimeout); 666 mWakeLock->release(); 667 break; 668 } 669 670 default: 671 TRESPASS(); 672 break; 673 } 674} 675 676void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 677 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 678 return; 679 } 680 681 if (mAudioQueue.empty()) { 682 return; 683 } 684 685 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 686 if (mPaused) { 687 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 688 if (diffUs > delayUs) { 689 delayUs = diffUs; 690 } 691 } 692 693 mDrainAudioQueuePending = true; 694 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 695 msg->setInt32("drainGeneration", mAudioDrainGeneration); 696 msg->post(delayUs); 697} 698 699void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 700 mAudioRenderingStartGeneration = mAudioDrainGeneration; 701 mVideoRenderingStartGeneration = mVideoDrainGeneration; 702 mRenderingDataDelivered = false; 703} 704 705void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 706 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 707 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 708 mRenderingDataDelivered = true; 709 if (mPaused) { 710 return; 711 } 712 mVideoRenderingStartGeneration = -1; 713 mAudioRenderingStartGeneration = -1; 714 715 sp<AMessage> notify = mNotify->dup(); 716 notify->setInt32("what", kWhatMediaRenderingStart); 717 notify->post(); 718 } 719} 720 721// static 722size_t NuPlayer::Renderer::AudioSinkCallback( 723 MediaPlayerBase::AudioSink * /* audioSink */, 724 void *buffer, 725 size_t size, 726 void *cookie, 727 MediaPlayerBase::AudioSink::cb_event_t event) { 728 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 729 730 switch (event) { 731 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 732 { 733 return me->fillAudioBuffer(buffer, size); 734 break; 735 } 736 737 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 738 { 739 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 740 me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 741 break; 742 } 743 744 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 745 { 746 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 747 me->notifyAudioTearDown(kDueToError); 748 break; 749 } 750 } 751 752 return 0; 753} 754 755size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 756 Mutex::Autolock autoLock(mLock); 757 758 if (!mUseAudioCallback) { 759 return 0; 760 } 761 762 bool hasEOS = false; 763 764 size_t sizeCopied = 0; 765 bool firstEntry = true; 766 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 767 while (sizeCopied < size && !mAudioQueue.empty()) { 768 entry = &*mAudioQueue.begin(); 769 770 if (entry->mBuffer == NULL) { // EOS 771 hasEOS = true; 772 mAudioQueue.erase(mAudioQueue.begin()); 773 break; 774 } 775 776 if (firstEntry && entry->mOffset == 0) { 777 firstEntry = false; 778 int64_t mediaTimeUs; 779 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 780 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 781 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 782 } 783 784 size_t copy = entry->mBuffer->size() - entry->mOffset; 785 size_t sizeRemaining = size - sizeCopied; 786 if (copy > sizeRemaining) { 787 copy = sizeRemaining; 788 } 789 790 memcpy((char *)buffer + sizeCopied, 791 entry->mBuffer->data() + entry->mOffset, 792 copy); 793 794 entry->mOffset += copy; 795 if (entry->mOffset == entry->mBuffer->size()) { 796 entry->mNotifyConsumed->post(); 797 mAudioQueue.erase(mAudioQueue.begin()); 798 entry = NULL; 799 } 800 sizeCopied += copy; 801 802 notifyIfMediaRenderingStarted_l(); 803 } 804 805 if (mAudioFirstAnchorTimeMediaUs >= 0) { 806 int64_t nowUs = ALooper::GetNowUs(); 807 int64_t nowMediaUs = 808 mAudioFirstAnchorTimeMediaUs + mAudioSink->getPlayedOutDurationUs(nowUs); 809 // we don't know how much data we are queueing for offloaded tracks. 810 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 811 } 812 813 // for non-offloaded audio, we need to compute the frames written because 814 // there is no EVENT_STREAM_END notification. The frames written gives 815 // an estimate on the pending played out duration. 816 if (!offloadingAudio()) { 817 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 818 } 819 820 if (hasEOS) { 821 (new AMessage(kWhatStopAudioSink, this))->post(); 822 // As there is currently no EVENT_STREAM_END callback notification for 823 // non-offloaded audio tracks, we need to post the EOS ourselves. 824 if (!offloadingAudio()) { 825 int64_t postEOSDelayUs = 0; 826 if (mAudioSink->needsTrailingPadding()) { 827 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 828 } 829 ALOGV("fillAudioBuffer: notifyEOS " 830 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 831 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 832 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 833 } 834 } 835 return sizeCopied; 836} 837 838void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 839 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 840 bool foundEOS = false; 841 while (it != mAudioQueue.end()) { 842 int32_t eos; 843 QueueEntry *entry = &*it++; 844 if (entry->mBuffer == NULL 845 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 846 itEOS = it; 847 foundEOS = true; 848 } 849 } 850 851 if (foundEOS) { 852 // post all replies before EOS and drop the samples 853 for (it = mAudioQueue.begin(); it != itEOS; it++) { 854 if (it->mBuffer == NULL) { 855 // delay doesn't matter as we don't even have an AudioTrack 856 notifyEOS(true /* audio */, it->mFinalResult); 857 } else { 858 it->mNotifyConsumed->post(); 859 } 860 } 861 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 862 } 863} 864 865bool NuPlayer::Renderer::onDrainAudioQueue() { 866 // do not drain audio during teardown as queued buffers may be invalid. 867 if (mAudioTornDown) { 868 return false; 869 } 870 // TODO: This call to getPosition checks if AudioTrack has been created 871 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 872 // CHECKs on getPosition will fail. 873 // We still need to figure out why AudioTrack is not created when 874 // this function is called. One possible reason could be leftover 875 // audio. Another possible place is to check whether decoder 876 // has received INFO_FORMAT_CHANGED as the first buffer since 877 // AudioSink is opened there, and possible interactions with flush 878 // immediately after start. Investigate error message 879 // "vorbis_dsp_synthesis returned -135", along with RTSP. 880 uint32_t numFramesPlayed; 881 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 882 // When getPosition fails, renderer will not reschedule the draining 883 // unless new samples are queued. 884 // If we have pending EOS (or "eos" marker for discontinuities), we need 885 // to post these now as NuPlayerDecoder might be waiting for it. 886 drainAudioQueueUntilLastEOS(); 887 888 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 889 return false; 890 } 891 892#if 0 893 ssize_t numFramesAvailableToWrite = 894 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 895 896 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 897 ALOGI("audio sink underrun"); 898 } else { 899 ALOGV("audio queue has %d frames left to play", 900 mAudioSink->frameCount() - numFramesAvailableToWrite); 901 } 902#endif 903 904 uint32_t prevFramesWritten = mNumFramesWritten; 905 while (!mAudioQueue.empty()) { 906 QueueEntry *entry = &*mAudioQueue.begin(); 907 908 mLastAudioBufferDrained = entry->mBufferOrdinal; 909 910 if (entry->mBuffer == NULL) { 911 // EOS 912 int64_t postEOSDelayUs = 0; 913 if (mAudioSink->needsTrailingPadding()) { 914 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 915 } 916 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 917 mLastAudioMediaTimeUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 918 919 mAudioQueue.erase(mAudioQueue.begin()); 920 entry = NULL; 921 if (mAudioSink->needsTrailingPadding()) { 922 // If we're not in gapless playback (i.e. through setNextPlayer), we 923 // need to stop the track here, because that will play out the last 924 // little bit at the end of the file. Otherwise short files won't play. 925 mAudioSink->stop(); 926 mNumFramesWritten = 0; 927 } 928 return false; 929 } 930 931 // ignore 0-sized buffer which could be EOS marker with no data 932 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 933 int64_t mediaTimeUs; 934 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 935 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 936 mediaTimeUs / 1E6); 937 onNewAudioMediaTime(mediaTimeUs); 938 } 939 940 size_t copy = entry->mBuffer->size() - entry->mOffset; 941 942 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 943 copy, false /* blocking */); 944 if (written < 0) { 945 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 946 if (written == WOULD_BLOCK) { 947 ALOGV("AudioSink write would block when writing %zu bytes", copy); 948 } else { 949 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 950 // This can only happen when AudioSink was opened with doNotReconnect flag set to 951 // true, in which case the NuPlayer will handle the reconnect. 952 notifyAudioTearDown(kDueToError); 953 } 954 break; 955 } 956 957 entry->mOffset += written; 958 if (entry->mOffset == entry->mBuffer->size()) { 959 entry->mNotifyConsumed->post(); 960 mAudioQueue.erase(mAudioQueue.begin()); 961 962 entry = NULL; 963 } 964 965 size_t copiedFrames = written / mAudioSink->frameSize(); 966 mNumFramesWritten += copiedFrames; 967 968 { 969 Mutex::Autolock autoLock(mLock); 970 int64_t maxTimeMedia; 971 maxTimeMedia = 972 mAnchorTimeMediaUs + 973 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 974 * 1000LL * mAudioSink->msecsPerFrame()); 975 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 976 977 notifyIfMediaRenderingStarted_l(); 978 } 979 980 if (written != (ssize_t)copy) { 981 // A short count was received from AudioSink::write() 982 // 983 // AudioSink write is called in non-blocking mode. 984 // It may return with a short count when: 985 // 986 // 1) Size to be copied is not a multiple of the frame size. We consider this fatal. 987 // 2) The data to be copied exceeds the available buffer in AudioSink. 988 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 989 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 990 991 // (Case 1) 992 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 993 // needs to fail, as we should not carry over fractional frames between calls. 994 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 995 996 // (Case 2, 3, 4) 997 // Return early to the caller. 998 // Beware of calling immediately again as this may busy-loop if you are not careful. 999 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 1000 break; 1001 } 1002 } 1003 1004 // calculate whether we need to reschedule another write. 1005 bool reschedule = !mAudioQueue.empty() 1006 && (!mPaused 1007 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 1008 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 1009 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 1010 return reschedule; 1011} 1012 1013int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 1014 int32_t sampleRate = offloadingAudio() ? 1015 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 1016 if (sampleRate == 0) { 1017 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 1018 return 0; 1019 } 1020 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 1021 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 1022} 1023 1024// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 1025int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 1026 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 1027 if (mUseVirtualAudioSink) { 1028 int64_t nowUs = ALooper::GetNowUs(); 1029 int64_t mediaUs; 1030 if (mMediaClock->getMediaTime(nowUs, &mediaUs) != OK) { 1031 return 0ll; 1032 } else { 1033 return writtenAudioDurationUs - (mediaUs - mAudioFirstAnchorTimeMediaUs); 1034 } 1035 } 1036 return writtenAudioDurationUs - mAudioSink->getPlayedOutDurationUs(nowUs); 1037} 1038 1039int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 1040 int64_t realUs; 1041 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 1042 // If failed to get current position, e.g. due to audio clock is 1043 // not ready, then just play out video immediately without delay. 1044 return nowUs; 1045 } 1046 return realUs; 1047} 1048 1049void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 1050 Mutex::Autolock autoLock(mLock); 1051 // TRICKY: vorbis decoder generates multiple frames with the same 1052 // timestamp, so only update on the first frame with a given timestamp 1053 if (mediaTimeUs == mAnchorTimeMediaUs) { 1054 return; 1055 } 1056 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 1057 1058 // mNextAudioClockUpdateTimeUs is -1 if we're waiting for audio sink to start 1059 if (mNextAudioClockUpdateTimeUs == -1) { 1060 AudioTimestamp ts; 1061 if (mAudioSink->getTimestamp(ts) == OK && ts.mPosition > 0) { 1062 mNextAudioClockUpdateTimeUs = 0; // start our clock updates 1063 } 1064 } 1065 int64_t nowUs = ALooper::GetNowUs(); 1066 if (mNextAudioClockUpdateTimeUs >= 0) { 1067 if (nowUs >= mNextAudioClockUpdateTimeUs) { 1068 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 1069 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 1070 mUseVirtualAudioSink = false; 1071 mNextAudioClockUpdateTimeUs = nowUs + kMinimumAudioClockUpdatePeriodUs; 1072 } 1073 } else { 1074 int64_t unused; 1075 if ((mMediaClock->getMediaTime(nowUs, &unused) != OK) 1076 && (getDurationUsIfPlayedAtSampleRate(mNumFramesWritten) 1077 > kMaxAllowedAudioSinkDelayUs)) { 1078 // Enough data has been sent to AudioSink, but AudioSink has not rendered 1079 // any data yet. Something is wrong with AudioSink, e.g., the device is not 1080 // connected to audio out. 1081 // Switch to system clock. This essentially creates a virtual AudioSink with 1082 // initial latenty of getDurationUsIfPlayedAtSampleRate(mNumFramesWritten). 1083 // This virtual AudioSink renders audio data starting from the very first sample 1084 // and it's paced by system clock. 1085 ALOGW("AudioSink stuck. ARE YOU CONNECTED TO AUDIO OUT? Switching to system clock."); 1086 mMediaClock->updateAnchor(mAudioFirstAnchorTimeMediaUs, nowUs, mediaTimeUs); 1087 mUseVirtualAudioSink = true; 1088 } 1089 } 1090 mAnchorNumFramesWritten = mNumFramesWritten; 1091 mAnchorTimeMediaUs = mediaTimeUs; 1092} 1093 1094// Called without mLock acquired. 1095void NuPlayer::Renderer::postDrainVideoQueue() { 1096 if (mDrainVideoQueuePending 1097 || getSyncQueues() 1098 || (mPaused && mVideoSampleReceived)) { 1099 return; 1100 } 1101 1102 if (mVideoQueue.empty()) { 1103 return; 1104 } 1105 1106 QueueEntry &entry = *mVideoQueue.begin(); 1107 1108 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1109 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1110 1111 if (entry.mBuffer == NULL) { 1112 // EOS doesn't carry a timestamp. 1113 msg->post(); 1114 mDrainVideoQueuePending = true; 1115 return; 1116 } 1117 1118 bool needRepostDrainVideoQueue = false; 1119 int64_t delayUs; 1120 int64_t nowUs = ALooper::GetNowUs(); 1121 int64_t realTimeUs; 1122 if (mFlags & FLAG_REAL_TIME) { 1123 int64_t mediaTimeUs; 1124 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1125 realTimeUs = mediaTimeUs; 1126 } else { 1127 int64_t mediaTimeUs; 1128 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1129 1130 { 1131 Mutex::Autolock autoLock(mLock); 1132 if (mAnchorTimeMediaUs < 0) { 1133 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1134 mAnchorTimeMediaUs = mediaTimeUs; 1135 realTimeUs = nowUs; 1136 } else if (!mVideoSampleReceived) { 1137 // Always render the first video frame. 1138 realTimeUs = nowUs; 1139 } else if (mAudioFirstAnchorTimeMediaUs < 0 1140 || mMediaClock->getRealTimeFor(mediaTimeUs, &realTimeUs) == OK) { 1141 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1142 } else if (mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0) { 1143 needRepostDrainVideoQueue = true; 1144 realTimeUs = nowUs; 1145 } else { 1146 realTimeUs = nowUs; 1147 } 1148 } 1149 if (!mHasAudio) { 1150 // smooth out videos >= 10fps 1151 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1152 } 1153 1154 // Heuristics to handle situation when media time changed without a 1155 // discontinuity. If we have not drained an audio buffer that was 1156 // received after this buffer, repost in 10 msec. Otherwise repost 1157 // in 500 msec. 1158 delayUs = realTimeUs - nowUs; 1159 int64_t postDelayUs = -1; 1160 if (delayUs > 500000) { 1161 postDelayUs = 500000; 1162 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1163 postDelayUs = 10000; 1164 } 1165 } else if (needRepostDrainVideoQueue) { 1166 // CHECK(mPlaybackRate > 0); 1167 // CHECK(mAudioFirstAnchorTimeMediaUs >= 0); 1168 // CHECK(mediaTimeUs - mAudioFirstAnchorTimeMediaUs >= 0); 1169 postDelayUs = mediaTimeUs - mAudioFirstAnchorTimeMediaUs; 1170 postDelayUs /= mPlaybackRate; 1171 } 1172 1173 if (postDelayUs >= 0) { 1174 msg->setWhat(kWhatPostDrainVideoQueue); 1175 msg->post(postDelayUs); 1176 mVideoScheduler->restart(); 1177 ALOGI("possible video time jump of %dms or uninitialized media clock, retrying in %dms", 1178 (int)(delayUs / 1000), (int)(postDelayUs / 1000)); 1179 mDrainVideoQueuePending = true; 1180 return; 1181 } 1182 } 1183 1184 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1185 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1186 1187 delayUs = realTimeUs - nowUs; 1188 1189 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1190 // post 2 display refreshes before rendering is due 1191 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1192 1193 mDrainVideoQueuePending = true; 1194} 1195 1196void NuPlayer::Renderer::onDrainVideoQueue() { 1197 if (mVideoQueue.empty()) { 1198 return; 1199 } 1200 1201 QueueEntry *entry = &*mVideoQueue.begin(); 1202 1203 if (entry->mBuffer == NULL) { 1204 // EOS 1205 1206 notifyEOS(false /* audio */, entry->mFinalResult); 1207 1208 mVideoQueue.erase(mVideoQueue.begin()); 1209 entry = NULL; 1210 1211 setVideoLateByUs(0); 1212 return; 1213 } 1214 1215 int64_t nowUs = ALooper::GetNowUs(); 1216 int64_t realTimeUs; 1217 int64_t mediaTimeUs = -1; 1218 if (mFlags & FLAG_REAL_TIME) { 1219 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1220 } else { 1221 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1222 1223 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1224 } 1225 1226 bool tooLate = false; 1227 1228 if (!mPaused) { 1229 setVideoLateByUs(nowUs - realTimeUs); 1230 tooLate = (mVideoLateByUs > 40000); 1231 1232 if (tooLate) { 1233 ALOGV("video late by %lld us (%.2f secs)", 1234 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1235 } else { 1236 int64_t mediaUs = 0; 1237 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1238 ALOGV("rendering video at media time %.2f secs", 1239 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1240 mediaUs) / 1E6); 1241 1242 if (!(mFlags & FLAG_REAL_TIME) 1243 && mLastAudioMediaTimeUs != -1 1244 && mediaTimeUs > mLastAudioMediaTimeUs) { 1245 // If audio ends before video, video continues to drive media clock. 1246 // Also smooth out videos >= 10fps. 1247 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1248 } 1249 } 1250 } else { 1251 setVideoLateByUs(0); 1252 if (!mVideoSampleReceived && !mHasAudio) { 1253 // This will ensure that the first frame after a flush won't be used as anchor 1254 // when renderer is in paused state, because resume can happen any time after seek. 1255 Mutex::Autolock autoLock(mLock); 1256 clearAnchorTime_l(); 1257 } 1258 } 1259 1260 // Always render the first video frame while keeping stats on A/V sync. 1261 if (!mVideoSampleReceived) { 1262 realTimeUs = nowUs; 1263 tooLate = false; 1264 } 1265 1266 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1267 entry->mNotifyConsumed->setInt32("render", !tooLate); 1268 entry->mNotifyConsumed->post(); 1269 mVideoQueue.erase(mVideoQueue.begin()); 1270 entry = NULL; 1271 1272 mVideoSampleReceived = true; 1273 1274 if (!mPaused) { 1275 if (!mVideoRenderingStarted) { 1276 mVideoRenderingStarted = true; 1277 notifyVideoRenderingStart(); 1278 } 1279 Mutex::Autolock autoLock(mLock); 1280 notifyIfMediaRenderingStarted_l(); 1281 } 1282} 1283 1284void NuPlayer::Renderer::notifyVideoRenderingStart() { 1285 sp<AMessage> notify = mNotify->dup(); 1286 notify->setInt32("what", kWhatVideoRenderingStart); 1287 notify->post(); 1288} 1289 1290void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1291 if (audio && delayUs > 0) { 1292 sp<AMessage> msg = new AMessage(kWhatEOS, this); 1293 msg->setInt32("audioEOSGeneration", mAudioEOSGeneration); 1294 msg->setInt32("finalResult", finalResult); 1295 msg->post(delayUs); 1296 return; 1297 } 1298 sp<AMessage> notify = mNotify->dup(); 1299 notify->setInt32("what", kWhatEOS); 1300 notify->setInt32("audio", static_cast<int32_t>(audio)); 1301 notify->setInt32("finalResult", finalResult); 1302 notify->post(delayUs); 1303} 1304 1305void NuPlayer::Renderer::notifyAudioTearDown(AudioTearDownReason reason) { 1306 sp<AMessage> msg = new AMessage(kWhatAudioTearDown, this); 1307 msg->setInt32("reason", reason); 1308 msg->post(); 1309} 1310 1311void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1312 int32_t audio; 1313 CHECK(msg->findInt32("audio", &audio)); 1314 1315 if (dropBufferIfStale(audio, msg)) { 1316 return; 1317 } 1318 1319 if (audio) { 1320 mHasAudio = true; 1321 } else { 1322 mHasVideo = true; 1323 } 1324 1325 if (mHasVideo) { 1326 if (mVideoScheduler == NULL) { 1327 mVideoScheduler = new VideoFrameScheduler(); 1328 mVideoScheduler->init(); 1329 } 1330 } 1331 1332 sp<ABuffer> buffer; 1333 CHECK(msg->findBuffer("buffer", &buffer)); 1334 1335 sp<AMessage> notifyConsumed; 1336 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1337 1338 QueueEntry entry; 1339 entry.mBuffer = buffer; 1340 entry.mNotifyConsumed = notifyConsumed; 1341 entry.mOffset = 0; 1342 entry.mFinalResult = OK; 1343 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1344 1345 if (audio) { 1346 Mutex::Autolock autoLock(mLock); 1347 mAudioQueue.push_back(entry); 1348 postDrainAudioQueue_l(); 1349 } else { 1350 mVideoQueue.push_back(entry); 1351 postDrainVideoQueue(); 1352 } 1353 1354 Mutex::Autolock autoLock(mLock); 1355 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1356 return; 1357 } 1358 1359 sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1360 sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1361 1362 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1363 // EOS signalled on either queue. 1364 syncQueuesDone_l(); 1365 return; 1366 } 1367 1368 int64_t firstAudioTimeUs; 1369 int64_t firstVideoTimeUs; 1370 CHECK(firstAudioBuffer->meta() 1371 ->findInt64("timeUs", &firstAudioTimeUs)); 1372 CHECK(firstVideoBuffer->meta() 1373 ->findInt64("timeUs", &firstVideoTimeUs)); 1374 1375 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1376 1377 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1378 1379 if (diff > 100000ll) { 1380 // Audio data starts More than 0.1 secs before video. 1381 // Drop some audio. 1382 1383 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1384 mAudioQueue.erase(mAudioQueue.begin()); 1385 return; 1386 } 1387 1388 syncQueuesDone_l(); 1389} 1390 1391void NuPlayer::Renderer::syncQueuesDone_l() { 1392 if (!mSyncQueues) { 1393 return; 1394 } 1395 1396 mSyncQueues = false; 1397 1398 if (!mAudioQueue.empty()) { 1399 postDrainAudioQueue_l(); 1400 } 1401 1402 if (!mVideoQueue.empty()) { 1403 mLock.unlock(); 1404 postDrainVideoQueue(); 1405 mLock.lock(); 1406 } 1407} 1408 1409void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1410 int32_t audio; 1411 CHECK(msg->findInt32("audio", &audio)); 1412 1413 if (dropBufferIfStale(audio, msg)) { 1414 return; 1415 } 1416 1417 int32_t finalResult; 1418 CHECK(msg->findInt32("finalResult", &finalResult)); 1419 1420 QueueEntry entry; 1421 entry.mOffset = 0; 1422 entry.mFinalResult = finalResult; 1423 1424 if (audio) { 1425 Mutex::Autolock autoLock(mLock); 1426 if (mAudioQueue.empty() && mSyncQueues) { 1427 syncQueuesDone_l(); 1428 } 1429 mAudioQueue.push_back(entry); 1430 postDrainAudioQueue_l(); 1431 } else { 1432 if (mVideoQueue.empty() && getSyncQueues()) { 1433 Mutex::Autolock autoLock(mLock); 1434 syncQueuesDone_l(); 1435 } 1436 mVideoQueue.push_back(entry); 1437 postDrainVideoQueue(); 1438 } 1439} 1440 1441void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1442 int32_t audio, notifyComplete; 1443 CHECK(msg->findInt32("audio", &audio)); 1444 1445 { 1446 Mutex::Autolock autoLock(mLock); 1447 if (audio) { 1448 notifyComplete = mNotifyCompleteAudio; 1449 mNotifyCompleteAudio = false; 1450 mLastAudioMediaTimeUs = -1; 1451 } else { 1452 notifyComplete = mNotifyCompleteVideo; 1453 mNotifyCompleteVideo = false; 1454 } 1455 1456 // If we're currently syncing the queues, i.e. dropping audio while 1457 // aligning the first audio/video buffer times and only one of the 1458 // two queues has data, we may starve that queue by not requesting 1459 // more buffers from the decoder. If the other source then encounters 1460 // a discontinuity that leads to flushing, we'll never find the 1461 // corresponding discontinuity on the other queue. 1462 // Therefore we'll stop syncing the queues if at least one of them 1463 // is flushed. 1464 syncQueuesDone_l(); 1465 clearAnchorTime_l(); 1466 } 1467 1468 ALOGV("flushing %s", audio ? "audio" : "video"); 1469 if (audio) { 1470 { 1471 Mutex::Autolock autoLock(mLock); 1472 flushQueue(&mAudioQueue); 1473 1474 ++mAudioDrainGeneration; 1475 ++mAudioEOSGeneration; 1476 prepareForMediaRenderingStart_l(); 1477 1478 // the frame count will be reset after flush. 1479 clearAudioFirstAnchorTime_l(); 1480 } 1481 1482 mDrainAudioQueuePending = false; 1483 1484 if (offloadingAudio()) { 1485 mAudioSink->pause(); 1486 mAudioSink->flush(); 1487 if (!mPaused) { 1488 mAudioSink->start(); 1489 } 1490 } else { 1491 mAudioSink->pause(); 1492 mAudioSink->flush(); 1493 // Call stop() to signal to the AudioSink to completely fill the 1494 // internal buffer before resuming playback. 1495 // FIXME: this is ignored after flush(). 1496 mAudioSink->stop(); 1497 if (mPaused) { 1498 // Race condition: if renderer is paused and audio sink is stopped, 1499 // we need to make sure that the audio track buffer fully drains 1500 // before delivering data. 1501 // FIXME: remove this if we can detect if stop() is complete. 1502 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1503 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1504 } else { 1505 mAudioSink->start(); 1506 } 1507 mNumFramesWritten = 0; 1508 } 1509 mNextAudioClockUpdateTimeUs = -1; 1510 } else { 1511 flushQueue(&mVideoQueue); 1512 1513 mDrainVideoQueuePending = false; 1514 1515 if (mVideoScheduler != NULL) { 1516 mVideoScheduler->restart(); 1517 } 1518 1519 Mutex::Autolock autoLock(mLock); 1520 ++mVideoDrainGeneration; 1521 prepareForMediaRenderingStart_l(); 1522 } 1523 1524 mVideoSampleReceived = false; 1525 1526 if (notifyComplete) { 1527 notifyFlushComplete(audio); 1528 } 1529} 1530 1531void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1532 while (!queue->empty()) { 1533 QueueEntry *entry = &*queue->begin(); 1534 1535 if (entry->mBuffer != NULL) { 1536 entry->mNotifyConsumed->post(); 1537 } 1538 1539 queue->erase(queue->begin()); 1540 entry = NULL; 1541 } 1542} 1543 1544void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1545 sp<AMessage> notify = mNotify->dup(); 1546 notify->setInt32("what", kWhatFlushComplete); 1547 notify->setInt32("audio", static_cast<int32_t>(audio)); 1548 notify->post(); 1549} 1550 1551bool NuPlayer::Renderer::dropBufferIfStale( 1552 bool audio, const sp<AMessage> &msg) { 1553 int32_t queueGeneration; 1554 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1555 1556 if (queueGeneration == getQueueGeneration(audio)) { 1557 return false; 1558 } 1559 1560 sp<AMessage> notifyConsumed; 1561 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1562 notifyConsumed->post(); 1563 } 1564 1565 return true; 1566} 1567 1568void NuPlayer::Renderer::onAudioSinkChanged() { 1569 if (offloadingAudio()) { 1570 return; 1571 } 1572 CHECK(!mDrainAudioQueuePending); 1573 mNumFramesWritten = 0; 1574 { 1575 Mutex::Autolock autoLock(mLock); 1576 mAnchorNumFramesWritten = -1; 1577 } 1578 uint32_t written; 1579 if (mAudioSink->getFramesWritten(&written) == OK) { 1580 mNumFramesWritten = written; 1581 } 1582} 1583 1584void NuPlayer::Renderer::onDisableOffloadAudio() { 1585 Mutex::Autolock autoLock(mLock); 1586 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1587 ++mAudioDrainGeneration; 1588 if (mAudioRenderingStartGeneration != -1) { 1589 prepareForMediaRenderingStart_l(); 1590 } 1591} 1592 1593void NuPlayer::Renderer::onEnableOffloadAudio() { 1594 Mutex::Autolock autoLock(mLock); 1595 mFlags |= FLAG_OFFLOAD_AUDIO; 1596 ++mAudioDrainGeneration; 1597 if (mAudioRenderingStartGeneration != -1) { 1598 prepareForMediaRenderingStart_l(); 1599 } 1600} 1601 1602void NuPlayer::Renderer::onPause() { 1603 if (mPaused) { 1604 return; 1605 } 1606 1607 { 1608 Mutex::Autolock autoLock(mLock); 1609 // we do not increment audio drain generation so that we fill audio buffer during pause. 1610 ++mVideoDrainGeneration; 1611 prepareForMediaRenderingStart_l(); 1612 mPaused = true; 1613 mMediaClock->setPlaybackRate(0.0); 1614 } 1615 1616 mDrainAudioQueuePending = false; 1617 mDrainVideoQueuePending = false; 1618 1619 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1620 mAudioSink->pause(); 1621 startAudioOffloadPauseTimeout(); 1622 1623 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1624 mAudioQueue.size(), mVideoQueue.size()); 1625} 1626 1627void NuPlayer::Renderer::onResume() { 1628 if (!mPaused) { 1629 return; 1630 } 1631 1632 // Note: audio data may not have been decoded, and the AudioSink may not be opened. 1633 cancelAudioOffloadPauseTimeout(); 1634 if (mAudioSink->ready()) { 1635 status_t err = mAudioSink->start(); 1636 if (err != OK) { 1637 ALOGE("cannot start AudioSink err %d", err); 1638 notifyAudioTearDown(kDueToError); 1639 } 1640 } 1641 1642 { 1643 Mutex::Autolock autoLock(mLock); 1644 mPaused = false; 1645 // rendering started message may have been delayed if we were paused. 1646 if (mRenderingDataDelivered) { 1647 notifyIfMediaRenderingStarted_l(); 1648 } 1649 // configure audiosink as we did not do it when pausing 1650 if (mAudioSink != NULL && mAudioSink->ready()) { 1651 mAudioSink->setPlaybackRate(mPlaybackSettings); 1652 } 1653 1654 mMediaClock->setPlaybackRate(mPlaybackRate); 1655 1656 if (!mAudioQueue.empty()) { 1657 postDrainAudioQueue_l(); 1658 } 1659 } 1660 1661 if (!mVideoQueue.empty()) { 1662 postDrainVideoQueue(); 1663 } 1664} 1665 1666void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1667 if (mVideoScheduler == NULL) { 1668 mVideoScheduler = new VideoFrameScheduler(); 1669 } 1670 mVideoScheduler->init(fps); 1671} 1672 1673int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1674 Mutex::Autolock autoLock(mLock); 1675 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1676} 1677 1678int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1679 Mutex::Autolock autoLock(mLock); 1680 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1681} 1682 1683bool NuPlayer::Renderer::getSyncQueues() { 1684 Mutex::Autolock autoLock(mLock); 1685 return mSyncQueues; 1686} 1687 1688void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1689 if (mAudioTornDown) { 1690 return; 1691 } 1692 mAudioTornDown = true; 1693 1694 int64_t currentPositionUs; 1695 sp<AMessage> notify = mNotify->dup(); 1696 if (getCurrentPosition(¤tPositionUs) == OK) { 1697 notify->setInt64("positionUs", currentPositionUs); 1698 } 1699 1700 mAudioSink->stop(); 1701 mAudioSink->flush(); 1702 1703 notify->setInt32("what", kWhatAudioTearDown); 1704 notify->setInt32("reason", reason); 1705 notify->post(); 1706} 1707 1708void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1709 if (offloadingAudio()) { 1710 mWakeLock->acquire(); 1711 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1712 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1713 msg->post(kOffloadPauseMaxUs); 1714 } 1715} 1716 1717void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1718 if (offloadingAudio()) { 1719 mWakeLock->release(true); 1720 ++mAudioOffloadPauseTimeoutGeneration; 1721 } 1722} 1723 1724status_t NuPlayer::Renderer::onOpenAudioSink( 1725 const sp<AMessage> &format, 1726 bool offloadOnly, 1727 bool hasVideo, 1728 uint32_t flags) { 1729 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1730 offloadOnly, offloadingAudio()); 1731 bool audioSinkChanged = false; 1732 1733 int32_t numChannels; 1734 CHECK(format->findInt32("channel-count", &numChannels)); 1735 1736 int32_t channelMask; 1737 if (!format->findInt32("channel-mask", &channelMask)) { 1738 // signal to the AudioSink to derive the mask from count. 1739 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1740 } 1741 1742 int32_t sampleRate; 1743 CHECK(format->findInt32("sample-rate", &sampleRate)); 1744 1745 if (offloadingAudio()) { 1746 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1747 AString mime; 1748 CHECK(format->findString("mime", &mime)); 1749 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1750 1751 if (err != OK) { 1752 ALOGE("Couldn't map mime \"%s\" to a valid " 1753 "audio_format", mime.c_str()); 1754 onDisableOffloadAudio(); 1755 } else { 1756 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1757 mime.c_str(), audioFormat); 1758 1759 int avgBitRate = -1; 1760 format->findInt32("bit-rate", &avgBitRate); 1761 1762 int32_t aacProfile = -1; 1763 if (audioFormat == AUDIO_FORMAT_AAC 1764 && format->findInt32("aac-profile", &aacProfile)) { 1765 // Redefine AAC format as per aac profile 1766 mapAACProfileToAudioFormat( 1767 audioFormat, 1768 aacProfile); 1769 } 1770 1771 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1772 offloadInfo.duration_us = -1; 1773 format->findInt64( 1774 "durationUs", &offloadInfo.duration_us); 1775 offloadInfo.sample_rate = sampleRate; 1776 offloadInfo.channel_mask = channelMask; 1777 offloadInfo.format = audioFormat; 1778 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1779 offloadInfo.bit_rate = avgBitRate; 1780 offloadInfo.has_video = hasVideo; 1781 offloadInfo.is_streaming = true; 1782 1783 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1784 ALOGV("openAudioSink: no change in offload mode"); 1785 // no change from previous configuration, everything ok. 1786 return OK; 1787 } 1788 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1789 1790 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1791 uint32_t offloadFlags = flags; 1792 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1793 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1794 audioSinkChanged = true; 1795 mAudioSink->close(); 1796 1797 err = mAudioSink->open( 1798 sampleRate, 1799 numChannels, 1800 (audio_channel_mask_t)channelMask, 1801 audioFormat, 1802 0 /* bufferCount - unused */, 1803 &NuPlayer::Renderer::AudioSinkCallback, 1804 this, 1805 (audio_output_flags_t)offloadFlags, 1806 &offloadInfo); 1807 1808 if (err == OK) { 1809 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1810 } 1811 1812 if (err == OK) { 1813 // If the playback is offloaded to h/w, we pass 1814 // the HAL some metadata information. 1815 // We don't want to do this for PCM because it 1816 // will be going through the AudioFlinger mixer 1817 // before reaching the hardware. 1818 // TODO 1819 mCurrentOffloadInfo = offloadInfo; 1820 if (!mPaused) { // for preview mode, don't start if paused 1821 err = mAudioSink->start(); 1822 } 1823 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1824 } 1825 if (err != OK) { 1826 // Clean up, fall back to non offload mode. 1827 mAudioSink->close(); 1828 onDisableOffloadAudio(); 1829 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1830 ALOGV("openAudioSink: offload failed"); 1831 if (offloadOnly) { 1832 notifyAudioTearDown(kForceNonOffload); 1833 } 1834 } else { 1835 mUseAudioCallback = true; // offload mode transfers data through callback 1836 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1837 } 1838 } 1839 } 1840 if (!offloadOnly && !offloadingAudio()) { 1841 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1842 uint32_t pcmFlags = flags; 1843 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1844 1845 const PcmInfo info = { 1846 (audio_channel_mask_t)channelMask, 1847 (audio_output_flags_t)pcmFlags, 1848 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1849 numChannels, 1850 sampleRate 1851 }; 1852 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1853 ALOGV("openAudioSink: no change in pcm mode"); 1854 // no change from previous configuration, everything ok. 1855 return OK; 1856 } 1857 1858 audioSinkChanged = true; 1859 mAudioSink->close(); 1860 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1861 // Note: It is possible to set up the callback, but not use it to send audio data. 1862 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1863 mUseAudioCallback = getUseAudioCallbackSetting(); 1864 if (mUseAudioCallback) { 1865 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1866 } 1867 1868 // Compute the desired buffer size. 1869 // For callback mode, the amount of time before wakeup is about half the buffer size. 1870 const uint32_t frameCount = 1871 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1872 1873 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1874 // AudioSink. We don't want this when there's video because it will cause a video seek to 1875 // the previous I frame. But we do want this when there's only audio because it will give 1876 // NuPlayer a chance to switch from non-offload mode to offload mode. 1877 // So we only set doNotReconnect when there's no video. 1878 const bool doNotReconnect = !hasVideo; 1879 1880 // We should always be able to set our playback settings if the sink is closed. 1881 LOG_ALWAYS_FATAL_IF(mAudioSink->setPlaybackRate(mPlaybackSettings) != OK, 1882 "onOpenAudioSink: can't set playback rate on closed sink"); 1883 status_t err = mAudioSink->open( 1884 sampleRate, 1885 numChannels, 1886 (audio_channel_mask_t)channelMask, 1887 AUDIO_FORMAT_PCM_16_BIT, 1888 0 /* bufferCount - unused */, 1889 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 1890 mUseAudioCallback ? this : NULL, 1891 (audio_output_flags_t)pcmFlags, 1892 NULL, 1893 doNotReconnect, 1894 frameCount); 1895 if (err != OK) { 1896 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 1897 mAudioSink->close(); 1898 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1899 return err; 1900 } 1901 mCurrentPcmInfo = info; 1902 if (!mPaused) { // for preview mode, don't start if paused 1903 mAudioSink->start(); 1904 } 1905 } 1906 if (audioSinkChanged) { 1907 onAudioSinkChanged(); 1908 } 1909 mAudioTornDown = false; 1910 return OK; 1911} 1912 1913void NuPlayer::Renderer::onCloseAudioSink() { 1914 mAudioSink->close(); 1915 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1916 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1917} 1918 1919} // namespace android 1920 1921