NuPlayerRenderer.cpp revision 25d696f31bfcbb24459f5d68c2288101bb5f7875
1/* 2 * Copyright (C) 2010 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define LOG_NDEBUG 0 18#define LOG_TAG "NuPlayerRenderer" 19#include <utils/Log.h> 20 21#include "NuPlayerRenderer.h" 22#include <cutils/properties.h> 23#include <media/stagefright/foundation/ABuffer.h> 24#include <media/stagefright/foundation/ADebug.h> 25#include <media/stagefright/foundation/AMessage.h> 26#include <media/stagefright/foundation/AUtils.h> 27#include <media/stagefright/foundation/AWakeLock.h> 28#include <media/stagefright/MediaClock.h> 29#include <media/stagefright/MediaErrors.h> 30#include <media/stagefright/MetaData.h> 31#include <media/stagefright/Utils.h> 32#include <media/stagefright/VideoFrameScheduler.h> 33 34#include <inttypes.h> 35 36namespace android { 37 38/* 39 * Example of common configuration settings in shell script form 40 41 #Turn offload audio off (use PCM for Play Music) -- AudioPolicyManager 42 adb shell setprop audio.offload.disable 1 43 44 #Allow offload audio with video (requires offloading to be enabled) -- AudioPolicyManager 45 adb shell setprop audio.offload.video 1 46 47 #Use audio callbacks for PCM data 48 adb shell setprop media.stagefright.audio.cbk 1 49 50 #Use deep buffer for PCM data with video (it is generally enabled for audio-only) 51 adb shell setprop media.stagefright.audio.deep 1 52 53 #Set size of buffers for pcm audio sink in msec (example: 1000 msec) 54 adb shell setprop media.stagefright.audio.sink 1000 55 56 * These configurations take effect for the next track played (not the current track). 57 */ 58 59static inline bool getUseAudioCallbackSetting() { 60 return property_get_bool("media.stagefright.audio.cbk", false /* default_value */); 61} 62 63static inline int32_t getAudioSinkPcmMsSetting() { 64 return property_get_int32( 65 "media.stagefright.audio.sink", 500 /* default_value */); 66} 67 68// Maximum time in paused state when offloading audio decompression. When elapsed, the AudioSink 69// is closed to allow the audio DSP to power down. 70static const int64_t kOffloadPauseMaxUs = 10000000ll; 71 72// static 73const NuPlayer::Renderer::PcmInfo NuPlayer::Renderer::AUDIO_PCMINFO_INITIALIZER = { 74 AUDIO_CHANNEL_NONE, 75 AUDIO_OUTPUT_FLAG_NONE, 76 AUDIO_FORMAT_INVALID, 77 0, // mNumChannels 78 0 // mSampleRate 79}; 80 81// static 82const int64_t NuPlayer::Renderer::kMinPositionUpdateDelayUs = 100000ll; 83 84NuPlayer::Renderer::Renderer( 85 const sp<MediaPlayerBase::AudioSink> &sink, 86 const sp<AMessage> ¬ify, 87 uint32_t flags) 88 : mAudioSink(sink), 89 mNotify(notify), 90 mFlags(flags), 91 mNumFramesWritten(0), 92 mDrainAudioQueuePending(false), 93 mDrainVideoQueuePending(false), 94 mAudioQueueGeneration(0), 95 mVideoQueueGeneration(0), 96 mAudioDrainGeneration(0), 97 mVideoDrainGeneration(0), 98 mPlaybackSettings(AUDIO_PLAYBACK_RATE_DEFAULT), 99 mAudioFirstAnchorTimeMediaUs(-1), 100 mAnchorTimeMediaUs(-1), 101 mAnchorNumFramesWritten(-1), 102 mVideoLateByUs(0ll), 103 mHasAudio(false), 104 mHasVideo(false), 105 mNotifyCompleteAudio(false), 106 mNotifyCompleteVideo(false), 107 mSyncQueues(false), 108 mPaused(false), 109 mPauseDrainAudioAllowedUs(0), 110 mVideoSampleReceived(false), 111 mVideoRenderingStarted(false), 112 mVideoRenderingStartGeneration(0), 113 mAudioRenderingStartGeneration(0), 114 mAudioOffloadPauseTimeoutGeneration(0), 115 mAudioTornDown(false), 116 mCurrentOffloadInfo(AUDIO_INFO_INITIALIZER), 117 mCurrentPcmInfo(AUDIO_PCMINFO_INITIALIZER), 118 mTotalBuffersQueued(0), 119 mLastAudioBufferDrained(0), 120 mUseAudioCallback(false), 121 mWakeLock(new AWakeLock()) { 122 mMediaClock = new MediaClock; 123 mPlaybackRate = mPlaybackSettings.mSpeed; 124 mMediaClock->setPlaybackRate(mPlaybackRate); 125} 126 127NuPlayer::Renderer::~Renderer() { 128 if (offloadingAudio()) { 129 mAudioSink->stop(); 130 mAudioSink->flush(); 131 mAudioSink->close(); 132 } 133} 134 135void NuPlayer::Renderer::queueBuffer( 136 bool audio, 137 const sp<ABuffer> &buffer, 138 const sp<AMessage> ¬ifyConsumed) { 139 sp<AMessage> msg = new AMessage(kWhatQueueBuffer, this); 140 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 141 msg->setInt32("audio", static_cast<int32_t>(audio)); 142 msg->setBuffer("buffer", buffer); 143 msg->setMessage("notifyConsumed", notifyConsumed); 144 msg->post(); 145} 146 147void NuPlayer::Renderer::queueEOS(bool audio, status_t finalResult) { 148 CHECK_NE(finalResult, (status_t)OK); 149 150 sp<AMessage> msg = new AMessage(kWhatQueueEOS, this); 151 msg->setInt32("queueGeneration", getQueueGeneration(audio)); 152 msg->setInt32("audio", static_cast<int32_t>(audio)); 153 msg->setInt32("finalResult", finalResult); 154 msg->post(); 155} 156 157status_t NuPlayer::Renderer::setPlaybackSettings(const AudioPlaybackRate &rate) { 158 sp<AMessage> msg = new AMessage(kWhatConfigPlayback, this); 159 writeToAMessage(msg, rate); 160 sp<AMessage> response; 161 status_t err = msg->postAndAwaitResponse(&response); 162 if (err == OK && response != NULL) { 163 CHECK(response->findInt32("err", &err)); 164 } 165 return err; 166} 167 168status_t NuPlayer::Renderer::onConfigPlayback(const AudioPlaybackRate &rate /* sanitized */) { 169 if (rate.mSpeed == 0.f) { 170 onPause(); 171 // don't call audiosink's setPlaybackRate if pausing, as pitch does not 172 // have to correspond to the any non-0 speed (e.g old speed). Keep 173 // settings nonetheless, using the old speed, in case audiosink changes. 174 AudioPlaybackRate newRate = rate; 175 newRate.mSpeed = mPlaybackSettings.mSpeed; 176 mPlaybackSettings = newRate; 177 return OK; 178 } 179 180 if (mAudioSink != NULL && mAudioSink->ready()) { 181 status_t err = mAudioSink->setPlaybackRate(rate); 182 if (err != OK) { 183 return err; 184 } 185 } 186 mPlaybackSettings = rate; 187 mPlaybackRate = rate.mSpeed; 188 mMediaClock->setPlaybackRate(mPlaybackRate); 189 return OK; 190} 191 192status_t NuPlayer::Renderer::getPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 193 sp<AMessage> msg = new AMessage(kWhatGetPlaybackSettings, this); 194 sp<AMessage> response; 195 status_t err = msg->postAndAwaitResponse(&response); 196 if (err == OK && response != NULL) { 197 CHECK(response->findInt32("err", &err)); 198 if (err == OK) { 199 readFromAMessage(response, rate); 200 } 201 } 202 return err; 203} 204 205status_t NuPlayer::Renderer::onGetPlaybackSettings(AudioPlaybackRate *rate /* nonnull */) { 206 if (mAudioSink != NULL && mAudioSink->ready()) { 207 status_t err = mAudioSink->getPlaybackRate(rate); 208 if (err == OK) { 209 if (!isAudioPlaybackRateEqual(*rate, mPlaybackSettings)) { 210 ALOGW("correcting mismatch in internal/external playback rate"); 211 } 212 // get playback settings used by audiosink, as it may be 213 // slightly off due to audiosink not taking small changes. 214 mPlaybackSettings = *rate; 215 if (mPaused) { 216 rate->mSpeed = 0.f; 217 } 218 } 219 return err; 220 } 221 *rate = mPlaybackSettings; 222 return OK; 223} 224 225status_t NuPlayer::Renderer::setSyncSettings(const AVSyncSettings &sync, float videoFpsHint) { 226 sp<AMessage> msg = new AMessage(kWhatConfigSync, this); 227 writeToAMessage(msg, sync, videoFpsHint); 228 sp<AMessage> response; 229 status_t err = msg->postAndAwaitResponse(&response); 230 if (err == OK && response != NULL) { 231 CHECK(response->findInt32("err", &err)); 232 } 233 return err; 234} 235 236status_t NuPlayer::Renderer::onConfigSync(const AVSyncSettings &sync, float videoFpsHint __unused) { 237 if (sync.mSource != AVSYNC_SOURCE_DEFAULT) { 238 return BAD_VALUE; 239 } 240 // TODO: support sync sources 241 return INVALID_OPERATION; 242} 243 244status_t NuPlayer::Renderer::getSyncSettings(AVSyncSettings *sync, float *videoFps) { 245 sp<AMessage> msg = new AMessage(kWhatGetSyncSettings, this); 246 sp<AMessage> response; 247 status_t err = msg->postAndAwaitResponse(&response); 248 if (err == OK && response != NULL) { 249 CHECK(response->findInt32("err", &err)); 250 if (err == OK) { 251 readFromAMessage(response, sync, videoFps); 252 } 253 } 254 return err; 255} 256 257status_t NuPlayer::Renderer::onGetSyncSettings( 258 AVSyncSettings *sync /* nonnull */, float *videoFps /* nonnull */) { 259 *sync = mSyncSettings; 260 *videoFps = -1.f; 261 return OK; 262} 263 264void NuPlayer::Renderer::flush(bool audio, bool notifyComplete) { 265 { 266 Mutex::Autolock autoLock(mLock); 267 if (audio) { 268 mNotifyCompleteAudio |= notifyComplete; 269 clearAudioFirstAnchorTime_l(); 270 ++mAudioQueueGeneration; 271 ++mAudioDrainGeneration; 272 } else { 273 mNotifyCompleteVideo |= notifyComplete; 274 ++mVideoQueueGeneration; 275 ++mVideoDrainGeneration; 276 } 277 278 clearAnchorTime_l(); 279 mVideoLateByUs = 0; 280 mSyncQueues = false; 281 } 282 283 sp<AMessage> msg = new AMessage(kWhatFlush, this); 284 msg->setInt32("audio", static_cast<int32_t>(audio)); 285 msg->post(); 286} 287 288void NuPlayer::Renderer::signalTimeDiscontinuity() { 289} 290 291void NuPlayer::Renderer::signalDisableOffloadAudio() { 292 (new AMessage(kWhatDisableOffloadAudio, this))->post(); 293} 294 295void NuPlayer::Renderer::signalEnableOffloadAudio() { 296 (new AMessage(kWhatEnableOffloadAudio, this))->post(); 297} 298 299void NuPlayer::Renderer::pause() { 300 (new AMessage(kWhatPause, this))->post(); 301} 302 303void NuPlayer::Renderer::resume() { 304 (new AMessage(kWhatResume, this))->post(); 305} 306 307void NuPlayer::Renderer::setVideoFrameRate(float fps) { 308 sp<AMessage> msg = new AMessage(kWhatSetVideoFrameRate, this); 309 msg->setFloat("frame-rate", fps); 310 msg->post(); 311} 312 313// Called on any threads. 314status_t NuPlayer::Renderer::getCurrentPosition(int64_t *mediaUs) { 315 return mMediaClock->getMediaTime(ALooper::GetNowUs(), mediaUs); 316} 317 318void NuPlayer::Renderer::clearAudioFirstAnchorTime_l() { 319 mAudioFirstAnchorTimeMediaUs = -1; 320 mMediaClock->setStartingTimeMedia(-1); 321} 322 323void NuPlayer::Renderer::setAudioFirstAnchorTimeIfNeeded_l(int64_t mediaUs) { 324 if (mAudioFirstAnchorTimeMediaUs == -1) { 325 mAudioFirstAnchorTimeMediaUs = mediaUs; 326 mMediaClock->setStartingTimeMedia(mediaUs); 327 } 328} 329 330void NuPlayer::Renderer::clearAnchorTime_l() { 331 mMediaClock->clearAnchor(); 332 mAnchorTimeMediaUs = -1; 333 mAnchorNumFramesWritten = -1; 334} 335 336void NuPlayer::Renderer::setVideoLateByUs(int64_t lateUs) { 337 Mutex::Autolock autoLock(mLock); 338 mVideoLateByUs = lateUs; 339} 340 341int64_t NuPlayer::Renderer::getVideoLateByUs() { 342 Mutex::Autolock autoLock(mLock); 343 return mVideoLateByUs; 344} 345 346status_t NuPlayer::Renderer::openAudioSink( 347 const sp<AMessage> &format, 348 bool offloadOnly, 349 bool hasVideo, 350 uint32_t flags, 351 bool *isOffloaded) { 352 sp<AMessage> msg = new AMessage(kWhatOpenAudioSink, this); 353 msg->setMessage("format", format); 354 msg->setInt32("offload-only", offloadOnly); 355 msg->setInt32("has-video", hasVideo); 356 msg->setInt32("flags", flags); 357 358 sp<AMessage> response; 359 msg->postAndAwaitResponse(&response); 360 361 int32_t err; 362 if (!response->findInt32("err", &err)) { 363 err = INVALID_OPERATION; 364 } else if (err == OK && isOffloaded != NULL) { 365 int32_t offload; 366 CHECK(response->findInt32("offload", &offload)); 367 *isOffloaded = (offload != 0); 368 } 369 return err; 370} 371 372void NuPlayer::Renderer::closeAudioSink() { 373 sp<AMessage> msg = new AMessage(kWhatCloseAudioSink, this); 374 375 sp<AMessage> response; 376 msg->postAndAwaitResponse(&response); 377} 378 379void NuPlayer::Renderer::onMessageReceived(const sp<AMessage> &msg) { 380 switch (msg->what()) { 381 case kWhatOpenAudioSink: 382 { 383 sp<AMessage> format; 384 CHECK(msg->findMessage("format", &format)); 385 386 int32_t offloadOnly; 387 CHECK(msg->findInt32("offload-only", &offloadOnly)); 388 389 int32_t hasVideo; 390 CHECK(msg->findInt32("has-video", &hasVideo)); 391 392 uint32_t flags; 393 CHECK(msg->findInt32("flags", (int32_t *)&flags)); 394 395 status_t err = onOpenAudioSink(format, offloadOnly, hasVideo, flags); 396 397 sp<AMessage> response = new AMessage; 398 response->setInt32("err", err); 399 response->setInt32("offload", offloadingAudio()); 400 401 sp<AReplyToken> replyID; 402 CHECK(msg->senderAwaitsResponse(&replyID)); 403 response->postReply(replyID); 404 405 break; 406 } 407 408 case kWhatCloseAudioSink: 409 { 410 sp<AReplyToken> replyID; 411 CHECK(msg->senderAwaitsResponse(&replyID)); 412 413 onCloseAudioSink(); 414 415 sp<AMessage> response = new AMessage; 416 response->postReply(replyID); 417 break; 418 } 419 420 case kWhatStopAudioSink: 421 { 422 mAudioSink->stop(); 423 break; 424 } 425 426 case kWhatDrainAudioQueue: 427 { 428 mDrainAudioQueuePending = false; 429 430 int32_t generation; 431 CHECK(msg->findInt32("drainGeneration", &generation)); 432 if (generation != getDrainGeneration(true /* audio */)) { 433 break; 434 } 435 436 if (onDrainAudioQueue()) { 437 uint32_t numFramesPlayed; 438 CHECK_EQ(mAudioSink->getPosition(&numFramesPlayed), 439 (status_t)OK); 440 441 uint32_t numFramesPendingPlayout = 442 mNumFramesWritten - numFramesPlayed; 443 444 // This is how long the audio sink will have data to 445 // play back. 446 int64_t delayUs = 447 mAudioSink->msecsPerFrame() 448 * numFramesPendingPlayout * 1000ll; 449 if (mPlaybackRate > 1.0f) { 450 delayUs /= mPlaybackRate; 451 } 452 453 // Let's give it more data after about half that time 454 // has elapsed. 455 Mutex::Autolock autoLock(mLock); 456 postDrainAudioQueue_l(delayUs / 2); 457 } 458 break; 459 } 460 461 case kWhatDrainVideoQueue: 462 { 463 int32_t generation; 464 CHECK(msg->findInt32("drainGeneration", &generation)); 465 if (generation != getDrainGeneration(false /* audio */)) { 466 break; 467 } 468 469 mDrainVideoQueuePending = false; 470 471 onDrainVideoQueue(); 472 473 postDrainVideoQueue(); 474 break; 475 } 476 477 case kWhatPostDrainVideoQueue: 478 { 479 int32_t generation; 480 CHECK(msg->findInt32("drainGeneration", &generation)); 481 if (generation != getDrainGeneration(false /* audio */)) { 482 break; 483 } 484 485 mDrainVideoQueuePending = false; 486 postDrainVideoQueue(); 487 break; 488 } 489 490 case kWhatQueueBuffer: 491 { 492 onQueueBuffer(msg); 493 break; 494 } 495 496 case kWhatQueueEOS: 497 { 498 onQueueEOS(msg); 499 break; 500 } 501 502 case kWhatConfigPlayback: 503 { 504 sp<AReplyToken> replyID; 505 CHECK(msg->senderAwaitsResponse(&replyID)); 506 AudioPlaybackRate rate; 507 readFromAMessage(msg, &rate); 508 status_t err = onConfigPlayback(rate); 509 sp<AMessage> response = new AMessage; 510 response->setInt32("err", err); 511 response->postReply(replyID); 512 break; 513 } 514 515 case kWhatGetPlaybackSettings: 516 { 517 sp<AReplyToken> replyID; 518 CHECK(msg->senderAwaitsResponse(&replyID)); 519 AudioPlaybackRate rate = AUDIO_PLAYBACK_RATE_DEFAULT; 520 status_t err = onGetPlaybackSettings(&rate); 521 sp<AMessage> response = new AMessage; 522 if (err == OK) { 523 writeToAMessage(response, rate); 524 } 525 response->setInt32("err", err); 526 response->postReply(replyID); 527 break; 528 } 529 530 case kWhatConfigSync: 531 { 532 sp<AReplyToken> replyID; 533 CHECK(msg->senderAwaitsResponse(&replyID)); 534 AVSyncSettings sync; 535 float videoFpsHint; 536 readFromAMessage(msg, &sync, &videoFpsHint); 537 status_t err = onConfigSync(sync, videoFpsHint); 538 sp<AMessage> response = new AMessage; 539 response->setInt32("err", err); 540 response->postReply(replyID); 541 break; 542 } 543 544 case kWhatGetSyncSettings: 545 { 546 sp<AReplyToken> replyID; 547 CHECK(msg->senderAwaitsResponse(&replyID)); 548 549 ALOGV("kWhatGetSyncSettings"); 550 AVSyncSettings sync; 551 float videoFps = -1.f; 552 status_t err = onGetSyncSettings(&sync, &videoFps); 553 sp<AMessage> response = new AMessage; 554 if (err == OK) { 555 writeToAMessage(response, sync, videoFps); 556 } 557 response->setInt32("err", err); 558 response->postReply(replyID); 559 break; 560 } 561 562 case kWhatFlush: 563 { 564 onFlush(msg); 565 break; 566 } 567 568 case kWhatDisableOffloadAudio: 569 { 570 onDisableOffloadAudio(); 571 break; 572 } 573 574 case kWhatEnableOffloadAudio: 575 { 576 onEnableOffloadAudio(); 577 break; 578 } 579 580 case kWhatPause: 581 { 582 onPause(); 583 break; 584 } 585 586 case kWhatResume: 587 { 588 onResume(); 589 break; 590 } 591 592 case kWhatSetVideoFrameRate: 593 { 594 float fps; 595 CHECK(msg->findFloat("frame-rate", &fps)); 596 onSetVideoFrameRate(fps); 597 break; 598 } 599 600 case kWhatAudioTearDown: 601 { 602 onAudioTearDown(kDueToError); 603 break; 604 } 605 606 case kWhatAudioOffloadPauseTimeout: 607 { 608 int32_t generation; 609 CHECK(msg->findInt32("drainGeneration", &generation)); 610 if (generation != mAudioOffloadPauseTimeoutGeneration) { 611 break; 612 } 613 ALOGV("Audio Offload tear down due to pause timeout."); 614 onAudioTearDown(kDueToTimeout); 615 mWakeLock->release(); 616 break; 617 } 618 619 default: 620 TRESPASS(); 621 break; 622 } 623} 624 625void NuPlayer::Renderer::postDrainAudioQueue_l(int64_t delayUs) { 626 if (mDrainAudioQueuePending || mSyncQueues || mUseAudioCallback) { 627 return; 628 } 629 630 if (mAudioQueue.empty()) { 631 return; 632 } 633 634 // FIXME: if paused, wait until AudioTrack stop() is complete before delivering data. 635 if (mPaused) { 636 const int64_t diffUs = mPauseDrainAudioAllowedUs - ALooper::GetNowUs(); 637 if (diffUs > delayUs) { 638 delayUs = diffUs; 639 } 640 } 641 642 mDrainAudioQueuePending = true; 643 sp<AMessage> msg = new AMessage(kWhatDrainAudioQueue, this); 644 msg->setInt32("drainGeneration", mAudioDrainGeneration); 645 msg->post(delayUs); 646} 647 648void NuPlayer::Renderer::prepareForMediaRenderingStart_l() { 649 mAudioRenderingStartGeneration = mAudioDrainGeneration; 650 mVideoRenderingStartGeneration = mVideoDrainGeneration; 651} 652 653void NuPlayer::Renderer::notifyIfMediaRenderingStarted_l() { 654 if (mVideoRenderingStartGeneration == mVideoDrainGeneration && 655 mAudioRenderingStartGeneration == mAudioDrainGeneration) { 656 mVideoRenderingStartGeneration = -1; 657 mAudioRenderingStartGeneration = -1; 658 659 sp<AMessage> notify = mNotify->dup(); 660 notify->setInt32("what", kWhatMediaRenderingStart); 661 notify->post(); 662 } 663} 664 665// static 666size_t NuPlayer::Renderer::AudioSinkCallback( 667 MediaPlayerBase::AudioSink * /* audioSink */, 668 void *buffer, 669 size_t size, 670 void *cookie, 671 MediaPlayerBase::AudioSink::cb_event_t event) { 672 NuPlayer::Renderer *me = (NuPlayer::Renderer *)cookie; 673 674 switch (event) { 675 case MediaPlayerBase::AudioSink::CB_EVENT_FILL_BUFFER: 676 { 677 return me->fillAudioBuffer(buffer, size); 678 break; 679 } 680 681 case MediaPlayerBase::AudioSink::CB_EVENT_STREAM_END: 682 { 683 ALOGV("AudioSink::CB_EVENT_STREAM_END"); 684 me->notifyEOS(true /* audio */, ERROR_END_OF_STREAM); 685 break; 686 } 687 688 case MediaPlayerBase::AudioSink::CB_EVENT_TEAR_DOWN: 689 { 690 ALOGV("AudioSink::CB_EVENT_TEAR_DOWN"); 691 me->notifyAudioTearDown(); 692 break; 693 } 694 } 695 696 return 0; 697} 698 699size_t NuPlayer::Renderer::fillAudioBuffer(void *buffer, size_t size) { 700 Mutex::Autolock autoLock(mLock); 701 702 if (!mUseAudioCallback) { 703 return 0; 704 } 705 706 bool hasEOS = false; 707 708 size_t sizeCopied = 0; 709 bool firstEntry = true; 710 QueueEntry *entry; // will be valid after while loop if hasEOS is set. 711 while (sizeCopied < size && !mAudioQueue.empty()) { 712 entry = &*mAudioQueue.begin(); 713 714 if (entry->mBuffer == NULL) { // EOS 715 hasEOS = true; 716 mAudioQueue.erase(mAudioQueue.begin()); 717 break; 718 } 719 720 if (firstEntry && entry->mOffset == 0) { 721 firstEntry = false; 722 int64_t mediaTimeUs; 723 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 724 ALOGV("fillAudioBuffer: rendering audio at media time %.2f secs", mediaTimeUs / 1E6); 725 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 726 } 727 728 size_t copy = entry->mBuffer->size() - entry->mOffset; 729 size_t sizeRemaining = size - sizeCopied; 730 if (copy > sizeRemaining) { 731 copy = sizeRemaining; 732 } 733 734 memcpy((char *)buffer + sizeCopied, 735 entry->mBuffer->data() + entry->mOffset, 736 copy); 737 738 entry->mOffset += copy; 739 if (entry->mOffset == entry->mBuffer->size()) { 740 entry->mNotifyConsumed->post(); 741 mAudioQueue.erase(mAudioQueue.begin()); 742 entry = NULL; 743 } 744 sizeCopied += copy; 745 746 notifyIfMediaRenderingStarted_l(); 747 } 748 749 if (mAudioFirstAnchorTimeMediaUs >= 0) { 750 int64_t nowUs = ALooper::GetNowUs(); 751 int64_t nowMediaUs = 752 mAudioFirstAnchorTimeMediaUs + getPlayedOutAudioDurationUs(nowUs); 753 // we don't know how much data we are queueing for offloaded tracks. 754 mMediaClock->updateAnchor(nowMediaUs, nowUs, INT64_MAX); 755 } 756 757 // for non-offloaded audio, we need to compute the frames written because 758 // there is no EVENT_STREAM_END notification. The frames written gives 759 // an estimate on the pending played out duration. 760 if (!offloadingAudio()) { 761 mNumFramesWritten += sizeCopied / mAudioSink->frameSize(); 762 } 763 764 if (hasEOS) { 765 (new AMessage(kWhatStopAudioSink, this))->post(); 766 // As there is currently no EVENT_STREAM_END callback notification for 767 // non-offloaded audio tracks, we need to post the EOS ourselves. 768 if (!offloadingAudio()) { 769 int64_t postEOSDelayUs = 0; 770 if (mAudioSink->needsTrailingPadding()) { 771 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 772 } 773 ALOGV("fillAudioBuffer: notifyEOS " 774 "mNumFramesWritten:%u finalResult:%d postEOSDelay:%lld", 775 mNumFramesWritten, entry->mFinalResult, (long long)postEOSDelayUs); 776 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 777 } 778 } 779 return sizeCopied; 780} 781 782void NuPlayer::Renderer::drainAudioQueueUntilLastEOS() { 783 List<QueueEntry>::iterator it = mAudioQueue.begin(), itEOS = it; 784 bool foundEOS = false; 785 while (it != mAudioQueue.end()) { 786 int32_t eos; 787 QueueEntry *entry = &*it++; 788 if (entry->mBuffer == NULL 789 || (entry->mNotifyConsumed->findInt32("eos", &eos) && eos != 0)) { 790 itEOS = it; 791 foundEOS = true; 792 } 793 } 794 795 if (foundEOS) { 796 // post all replies before EOS and drop the samples 797 for (it = mAudioQueue.begin(); it != itEOS; it++) { 798 if (it->mBuffer == NULL) { 799 // delay doesn't matter as we don't even have an AudioTrack 800 notifyEOS(true /* audio */, it->mFinalResult); 801 } else { 802 it->mNotifyConsumed->post(); 803 } 804 } 805 mAudioQueue.erase(mAudioQueue.begin(), itEOS); 806 } 807} 808 809bool NuPlayer::Renderer::onDrainAudioQueue() { 810 // do not drain audio during teardown as queued buffers may be invalid. 811 if (mAudioTornDown) { 812 return false; 813 } 814 // TODO: This call to getPosition checks if AudioTrack has been created 815 // in AudioSink before draining audio. If AudioTrack doesn't exist, then 816 // CHECKs on getPosition will fail. 817 // We still need to figure out why AudioTrack is not created when 818 // this function is called. One possible reason could be leftover 819 // audio. Another possible place is to check whether decoder 820 // has received INFO_FORMAT_CHANGED as the first buffer since 821 // AudioSink is opened there, and possible interactions with flush 822 // immediately after start. Investigate error message 823 // "vorbis_dsp_synthesis returned -135", along with RTSP. 824 uint32_t numFramesPlayed; 825 if (mAudioSink->getPosition(&numFramesPlayed) != OK) { 826 // When getPosition fails, renderer will not reschedule the draining 827 // unless new samples are queued. 828 // If we have pending EOS (or "eos" marker for discontinuities), we need 829 // to post these now as NuPlayerDecoder might be waiting for it. 830 drainAudioQueueUntilLastEOS(); 831 832 ALOGW("onDrainAudioQueue(): audio sink is not ready"); 833 return false; 834 } 835 836#if 0 837 ssize_t numFramesAvailableToWrite = 838 mAudioSink->frameCount() - (mNumFramesWritten - numFramesPlayed); 839 840 if (numFramesAvailableToWrite == mAudioSink->frameCount()) { 841 ALOGI("audio sink underrun"); 842 } else { 843 ALOGV("audio queue has %d frames left to play", 844 mAudioSink->frameCount() - numFramesAvailableToWrite); 845 } 846#endif 847 848 uint32_t prevFramesWritten = mNumFramesWritten; 849 while (!mAudioQueue.empty()) { 850 QueueEntry *entry = &*mAudioQueue.begin(); 851 852 mLastAudioBufferDrained = entry->mBufferOrdinal; 853 854 if (entry->mBuffer == NULL) { 855 // EOS 856 int64_t postEOSDelayUs = 0; 857 if (mAudioSink->needsTrailingPadding()) { 858 postEOSDelayUs = getPendingAudioPlayoutDurationUs(ALooper::GetNowUs()); 859 } 860 notifyEOS(true /* audio */, entry->mFinalResult, postEOSDelayUs); 861 862 mAudioQueue.erase(mAudioQueue.begin()); 863 entry = NULL; 864 if (mAudioSink->needsTrailingPadding()) { 865 // If we're not in gapless playback (i.e. through setNextPlayer), we 866 // need to stop the track here, because that will play out the last 867 // little bit at the end of the file. Otherwise short files won't play. 868 mAudioSink->stop(); 869 mNumFramesWritten = 0; 870 } 871 return false; 872 } 873 874 // ignore 0-sized buffer which could be EOS marker with no data 875 if (entry->mOffset == 0 && entry->mBuffer->size() > 0) { 876 int64_t mediaTimeUs; 877 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 878 ALOGV("onDrainAudioQueue: rendering audio at media time %.2f secs", 879 mediaTimeUs / 1E6); 880 onNewAudioMediaTime(mediaTimeUs); 881 } 882 883 size_t copy = entry->mBuffer->size() - entry->mOffset; 884 885 ssize_t written = mAudioSink->write(entry->mBuffer->data() + entry->mOffset, 886 copy, false /* blocking */); 887 if (written < 0) { 888 // An error in AudioSink write. Perhaps the AudioSink was not properly opened. 889 if (written == WOULD_BLOCK) { 890 ALOGV("AudioSink write would block when writing %zu bytes", copy); 891 } else { 892 ALOGE("AudioSink write error(%zd) when writing %zu bytes", written, copy); 893 // This can only happen when AudioSink was opened with doNotReconnect flag set to 894 // true, in which case the NuPlayer will handle the reconnect. 895 notifyAudioTearDown(); 896 } 897 break; 898 } 899 900 entry->mOffset += written; 901 if (entry->mOffset == entry->mBuffer->size()) { 902 entry->mNotifyConsumed->post(); 903 mAudioQueue.erase(mAudioQueue.begin()); 904 905 entry = NULL; 906 } 907 908 size_t copiedFrames = written / mAudioSink->frameSize(); 909 mNumFramesWritten += copiedFrames; 910 911 { 912 Mutex::Autolock autoLock(mLock); 913 notifyIfMediaRenderingStarted_l(); 914 } 915 916 if (written != (ssize_t)copy) { 917 // A short count was received from AudioSink::write() 918 // 919 // AudioSink write is called in non-blocking mode. 920 // It may return with a short count when: 921 // 922 // 1) Size to be copied is not a multiple of the frame size. We consider this fatal. 923 // 2) The data to be copied exceeds the available buffer in AudioSink. 924 // 3) An error occurs and data has been partially copied to the buffer in AudioSink. 925 // 4) AudioSink is an AudioCache for data retrieval, and the AudioCache is exceeded. 926 927 // (Case 1) 928 // Must be a multiple of the frame size. If it is not a multiple of a frame size, it 929 // needs to fail, as we should not carry over fractional frames between calls. 930 CHECK_EQ(copy % mAudioSink->frameSize(), 0); 931 932 // (Case 2, 3, 4) 933 // Return early to the caller. 934 // Beware of calling immediately again as this may busy-loop if you are not careful. 935 ALOGV("AudioSink write short frame count %zd < %zu", written, copy); 936 break; 937 } 938 } 939 int64_t maxTimeMedia; 940 { 941 Mutex::Autolock autoLock(mLock); 942 maxTimeMedia = 943 mAnchorTimeMediaUs + 944 (int64_t)(max((long long)mNumFramesWritten - mAnchorNumFramesWritten, 0LL) 945 * 1000LL * mAudioSink->msecsPerFrame()); 946 } 947 mMediaClock->updateMaxTimeMedia(maxTimeMedia); 948 949 // calculate whether we need to reschedule another write. 950 bool reschedule = !mAudioQueue.empty() 951 && (!mPaused 952 || prevFramesWritten != mNumFramesWritten); // permit pause to fill buffers 953 //ALOGD("reschedule:%d empty:%d mPaused:%d prevFramesWritten:%u mNumFramesWritten:%u", 954 // reschedule, mAudioQueue.empty(), mPaused, prevFramesWritten, mNumFramesWritten); 955 return reschedule; 956} 957 958int64_t NuPlayer::Renderer::getDurationUsIfPlayedAtSampleRate(uint32_t numFrames) { 959 int32_t sampleRate = offloadingAudio() ? 960 mCurrentOffloadInfo.sample_rate : mCurrentPcmInfo.mSampleRate; 961 if (sampleRate == 0) { 962 ALOGE("sampleRate is 0 in %s mode", offloadingAudio() ? "offload" : "non-offload"); 963 return 0; 964 } 965 // TODO: remove the (int32_t) casting below as it may overflow at 12.4 hours. 966 return (int64_t)((int32_t)numFrames * 1000000LL / sampleRate); 967} 968 969// Calculate duration of pending samples if played at normal rate (i.e., 1.0). 970int64_t NuPlayer::Renderer::getPendingAudioPlayoutDurationUs(int64_t nowUs) { 971 int64_t writtenAudioDurationUs = getDurationUsIfPlayedAtSampleRate(mNumFramesWritten); 972 return writtenAudioDurationUs - getPlayedOutAudioDurationUs(nowUs); 973} 974 975int64_t NuPlayer::Renderer::getRealTimeUs(int64_t mediaTimeUs, int64_t nowUs) { 976 int64_t realUs; 977 if (mMediaClock->getRealTimeFor(mediaTimeUs, &realUs) != OK) { 978 // If failed to get current position, e.g. due to audio clock is 979 // not ready, then just play out video immediately without delay. 980 return nowUs; 981 } 982 return realUs; 983} 984 985void NuPlayer::Renderer::onNewAudioMediaTime(int64_t mediaTimeUs) { 986 Mutex::Autolock autoLock(mLock); 987 // TRICKY: vorbis decoder generates multiple frames with the same 988 // timestamp, so only update on the first frame with a given timestamp 989 if (mediaTimeUs == mAnchorTimeMediaUs) { 990 return; 991 } 992 setAudioFirstAnchorTimeIfNeeded_l(mediaTimeUs); 993 int64_t nowUs = ALooper::GetNowUs(); 994 int64_t nowMediaUs = mediaTimeUs - getPendingAudioPlayoutDurationUs(nowUs); 995 mMediaClock->updateAnchor(nowMediaUs, nowUs, mediaTimeUs); 996 mAnchorNumFramesWritten = mNumFramesWritten; 997 mAnchorTimeMediaUs = mediaTimeUs; 998} 999 1000// Called without mLock acquired. 1001void NuPlayer::Renderer::postDrainVideoQueue() { 1002 if (mDrainVideoQueuePending 1003 || getSyncQueues() 1004 || (mPaused && mVideoSampleReceived)) { 1005 return; 1006 } 1007 1008 if (mVideoQueue.empty()) { 1009 return; 1010 } 1011 1012 QueueEntry &entry = *mVideoQueue.begin(); 1013 1014 sp<AMessage> msg = new AMessage(kWhatDrainVideoQueue, this); 1015 msg->setInt32("drainGeneration", getDrainGeneration(false /* audio */)); 1016 1017 if (entry.mBuffer == NULL) { 1018 // EOS doesn't carry a timestamp. 1019 msg->post(); 1020 mDrainVideoQueuePending = true; 1021 return; 1022 } 1023 1024 int64_t delayUs; 1025 int64_t nowUs = ALooper::GetNowUs(); 1026 int64_t realTimeUs; 1027 if (mFlags & FLAG_REAL_TIME) { 1028 int64_t mediaTimeUs; 1029 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1030 realTimeUs = mediaTimeUs; 1031 } else { 1032 int64_t mediaTimeUs; 1033 CHECK(entry.mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1034 1035 { 1036 Mutex::Autolock autoLock(mLock); 1037 if (mAnchorTimeMediaUs < 0) { 1038 mMediaClock->updateAnchor(mediaTimeUs, nowUs, mediaTimeUs); 1039 mAnchorTimeMediaUs = mediaTimeUs; 1040 realTimeUs = nowUs; 1041 } else if (!mVideoSampleReceived) { 1042 // Always render the first video frame. 1043 realTimeUs = nowUs; 1044 } else { 1045 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1046 } 1047 } 1048 if (!mHasAudio) { 1049 // smooth out videos >= 10fps 1050 mMediaClock->updateMaxTimeMedia(mediaTimeUs + 100000); 1051 } 1052 1053 // Heuristics to handle situation when media time changed without a 1054 // discontinuity. If we have not drained an audio buffer that was 1055 // received after this buffer, repost in 10 msec. Otherwise repost 1056 // in 500 msec. 1057 delayUs = realTimeUs - nowUs; 1058 if (delayUs > 500000) { 1059 int64_t postDelayUs = 500000; 1060 if (mHasAudio && (mLastAudioBufferDrained - entry.mBufferOrdinal) <= 0) { 1061 postDelayUs = 10000; 1062 } 1063 msg->setWhat(kWhatPostDrainVideoQueue); 1064 msg->post(postDelayUs); 1065 mVideoScheduler->restart(); 1066 ALOGI("possible video time jump of %dms, retrying in %dms", 1067 (int)(delayUs / 1000), (int)(postDelayUs / 1000)); 1068 mDrainVideoQueuePending = true; 1069 return; 1070 } 1071 } 1072 1073 realTimeUs = mVideoScheduler->schedule(realTimeUs * 1000) / 1000; 1074 int64_t twoVsyncsUs = 2 * (mVideoScheduler->getVsyncPeriod() / 1000); 1075 1076 delayUs = realTimeUs - nowUs; 1077 1078 ALOGW_IF(delayUs > 500000, "unusually high delayUs: %" PRId64, delayUs); 1079 // post 2 display refreshes before rendering is due 1080 msg->post(delayUs > twoVsyncsUs ? delayUs - twoVsyncsUs : 0); 1081 1082 mDrainVideoQueuePending = true; 1083} 1084 1085void NuPlayer::Renderer::onDrainVideoQueue() { 1086 if (mVideoQueue.empty()) { 1087 return; 1088 } 1089 1090 QueueEntry *entry = &*mVideoQueue.begin(); 1091 1092 if (entry->mBuffer == NULL) { 1093 // EOS 1094 1095 notifyEOS(false /* audio */, entry->mFinalResult); 1096 1097 mVideoQueue.erase(mVideoQueue.begin()); 1098 entry = NULL; 1099 1100 setVideoLateByUs(0); 1101 return; 1102 } 1103 1104 int64_t nowUs = ALooper::GetNowUs(); 1105 int64_t realTimeUs; 1106 if (mFlags & FLAG_REAL_TIME) { 1107 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &realTimeUs)); 1108 } else { 1109 int64_t mediaTimeUs; 1110 CHECK(entry->mBuffer->meta()->findInt64("timeUs", &mediaTimeUs)); 1111 1112 realTimeUs = getRealTimeUs(mediaTimeUs, nowUs); 1113 } 1114 1115 bool tooLate = false; 1116 1117 if (!mPaused) { 1118 setVideoLateByUs(nowUs - realTimeUs); 1119 tooLate = (mVideoLateByUs > 40000); 1120 1121 if (tooLate) { 1122 ALOGV("video late by %lld us (%.2f secs)", 1123 (long long)mVideoLateByUs, mVideoLateByUs / 1E6); 1124 } else { 1125 int64_t mediaUs = 0; 1126 mMediaClock->getMediaTime(realTimeUs, &mediaUs); 1127 ALOGV("rendering video at media time %.2f secs", 1128 (mFlags & FLAG_REAL_TIME ? realTimeUs : 1129 mediaUs) / 1E6); 1130 } 1131 } else { 1132 setVideoLateByUs(0); 1133 if (!mVideoSampleReceived && !mHasAudio) { 1134 // This will ensure that the first frame after a flush won't be used as anchor 1135 // when renderer is in paused state, because resume can happen any time after seek. 1136 Mutex::Autolock autoLock(mLock); 1137 clearAnchorTime_l(); 1138 } 1139 } 1140 1141 // Always render the first video frame while keeping stats on A/V sync. 1142 if (!mVideoSampleReceived) { 1143 realTimeUs = nowUs; 1144 tooLate = false; 1145 } 1146 1147 entry->mNotifyConsumed->setInt64("timestampNs", realTimeUs * 1000ll); 1148 entry->mNotifyConsumed->setInt32("render", !tooLate); 1149 entry->mNotifyConsumed->post(); 1150 mVideoQueue.erase(mVideoQueue.begin()); 1151 entry = NULL; 1152 1153 mVideoSampleReceived = true; 1154 1155 if (!mPaused) { 1156 if (!mVideoRenderingStarted) { 1157 mVideoRenderingStarted = true; 1158 notifyVideoRenderingStart(); 1159 } 1160 Mutex::Autolock autoLock(mLock); 1161 notifyIfMediaRenderingStarted_l(); 1162 } 1163} 1164 1165void NuPlayer::Renderer::notifyVideoRenderingStart() { 1166 sp<AMessage> notify = mNotify->dup(); 1167 notify->setInt32("what", kWhatVideoRenderingStart); 1168 notify->post(); 1169} 1170 1171void NuPlayer::Renderer::notifyEOS(bool audio, status_t finalResult, int64_t delayUs) { 1172 sp<AMessage> notify = mNotify->dup(); 1173 notify->setInt32("what", kWhatEOS); 1174 notify->setInt32("audio", static_cast<int32_t>(audio)); 1175 notify->setInt32("finalResult", finalResult); 1176 notify->post(delayUs); 1177} 1178 1179void NuPlayer::Renderer::notifyAudioTearDown() { 1180 (new AMessage(kWhatAudioTearDown, this))->post(); 1181} 1182 1183void NuPlayer::Renderer::onQueueBuffer(const sp<AMessage> &msg) { 1184 int32_t audio; 1185 CHECK(msg->findInt32("audio", &audio)); 1186 1187 if (dropBufferIfStale(audio, msg)) { 1188 return; 1189 } 1190 1191 if (audio) { 1192 mHasAudio = true; 1193 } else { 1194 mHasVideo = true; 1195 } 1196 1197 if (mHasVideo) { 1198 if (mVideoScheduler == NULL) { 1199 mVideoScheduler = new VideoFrameScheduler(); 1200 mVideoScheduler->init(); 1201 } 1202 } 1203 1204 sp<ABuffer> buffer; 1205 CHECK(msg->findBuffer("buffer", &buffer)); 1206 1207 sp<AMessage> notifyConsumed; 1208 CHECK(msg->findMessage("notifyConsumed", ¬ifyConsumed)); 1209 1210 QueueEntry entry; 1211 entry.mBuffer = buffer; 1212 entry.mNotifyConsumed = notifyConsumed; 1213 entry.mOffset = 0; 1214 entry.mFinalResult = OK; 1215 entry.mBufferOrdinal = ++mTotalBuffersQueued; 1216 1217 if (audio) { 1218 Mutex::Autolock autoLock(mLock); 1219 mAudioQueue.push_back(entry); 1220 postDrainAudioQueue_l(); 1221 } else { 1222 mVideoQueue.push_back(entry); 1223 postDrainVideoQueue(); 1224 } 1225 1226 Mutex::Autolock autoLock(mLock); 1227 if (!mSyncQueues || mAudioQueue.empty() || mVideoQueue.empty()) { 1228 return; 1229 } 1230 1231 sp<ABuffer> firstAudioBuffer = (*mAudioQueue.begin()).mBuffer; 1232 sp<ABuffer> firstVideoBuffer = (*mVideoQueue.begin()).mBuffer; 1233 1234 if (firstAudioBuffer == NULL || firstVideoBuffer == NULL) { 1235 // EOS signalled on either queue. 1236 syncQueuesDone_l(); 1237 return; 1238 } 1239 1240 int64_t firstAudioTimeUs; 1241 int64_t firstVideoTimeUs; 1242 CHECK(firstAudioBuffer->meta() 1243 ->findInt64("timeUs", &firstAudioTimeUs)); 1244 CHECK(firstVideoBuffer->meta() 1245 ->findInt64("timeUs", &firstVideoTimeUs)); 1246 1247 int64_t diff = firstVideoTimeUs - firstAudioTimeUs; 1248 1249 ALOGV("queueDiff = %.2f secs", diff / 1E6); 1250 1251 if (diff > 100000ll) { 1252 // Audio data starts More than 0.1 secs before video. 1253 // Drop some audio. 1254 1255 (*mAudioQueue.begin()).mNotifyConsumed->post(); 1256 mAudioQueue.erase(mAudioQueue.begin()); 1257 return; 1258 } 1259 1260 syncQueuesDone_l(); 1261} 1262 1263void NuPlayer::Renderer::syncQueuesDone_l() { 1264 if (!mSyncQueues) { 1265 return; 1266 } 1267 1268 mSyncQueues = false; 1269 1270 if (!mAudioQueue.empty()) { 1271 postDrainAudioQueue_l(); 1272 } 1273 1274 if (!mVideoQueue.empty()) { 1275 mLock.unlock(); 1276 postDrainVideoQueue(); 1277 mLock.lock(); 1278 } 1279} 1280 1281void NuPlayer::Renderer::onQueueEOS(const sp<AMessage> &msg) { 1282 int32_t audio; 1283 CHECK(msg->findInt32("audio", &audio)); 1284 1285 if (dropBufferIfStale(audio, msg)) { 1286 return; 1287 } 1288 1289 int32_t finalResult; 1290 CHECK(msg->findInt32("finalResult", &finalResult)); 1291 1292 QueueEntry entry; 1293 entry.mOffset = 0; 1294 entry.mFinalResult = finalResult; 1295 1296 if (audio) { 1297 Mutex::Autolock autoLock(mLock); 1298 if (mAudioQueue.empty() && mSyncQueues) { 1299 syncQueuesDone_l(); 1300 } 1301 mAudioQueue.push_back(entry); 1302 postDrainAudioQueue_l(); 1303 } else { 1304 if (mVideoQueue.empty() && getSyncQueues()) { 1305 Mutex::Autolock autoLock(mLock); 1306 syncQueuesDone_l(); 1307 } 1308 mVideoQueue.push_back(entry); 1309 postDrainVideoQueue(); 1310 } 1311} 1312 1313void NuPlayer::Renderer::onFlush(const sp<AMessage> &msg) { 1314 int32_t audio, notifyComplete; 1315 CHECK(msg->findInt32("audio", &audio)); 1316 1317 { 1318 Mutex::Autolock autoLock(mLock); 1319 if (audio) { 1320 notifyComplete = mNotifyCompleteAudio; 1321 mNotifyCompleteAudio = false; 1322 } else { 1323 notifyComplete = mNotifyCompleteVideo; 1324 mNotifyCompleteVideo = false; 1325 } 1326 1327 // If we're currently syncing the queues, i.e. dropping audio while 1328 // aligning the first audio/video buffer times and only one of the 1329 // two queues has data, we may starve that queue by not requesting 1330 // more buffers from the decoder. If the other source then encounters 1331 // a discontinuity that leads to flushing, we'll never find the 1332 // corresponding discontinuity on the other queue. 1333 // Therefore we'll stop syncing the queues if at least one of them 1334 // is flushed. 1335 syncQueuesDone_l(); 1336 clearAnchorTime_l(); 1337 } 1338 1339 ALOGV("flushing %s", audio ? "audio" : "video"); 1340 if (audio) { 1341 { 1342 Mutex::Autolock autoLock(mLock); 1343 flushQueue(&mAudioQueue); 1344 1345 ++mAudioDrainGeneration; 1346 prepareForMediaRenderingStart_l(); 1347 1348 // the frame count will be reset after flush. 1349 clearAudioFirstAnchorTime_l(); 1350 } 1351 1352 mDrainAudioQueuePending = false; 1353 1354 if (offloadingAudio()) { 1355 mAudioSink->pause(); 1356 mAudioSink->flush(); 1357 if (!mPaused) { 1358 mAudioSink->start(); 1359 } 1360 } else { 1361 mAudioSink->pause(); 1362 mAudioSink->flush(); 1363 // Call stop() to signal to the AudioSink to completely fill the 1364 // internal buffer before resuming playback. 1365 // FIXME: this is ignored after flush(). 1366 mAudioSink->stop(); 1367 if (mPaused) { 1368 // Race condition: if renderer is paused and audio sink is stopped, 1369 // we need to make sure that the audio track buffer fully drains 1370 // before delivering data. 1371 // FIXME: remove this if we can detect if stop() is complete. 1372 const int delayUs = 2 * 50 * 1000; // (2 full mixer thread cycles at 50ms) 1373 mPauseDrainAudioAllowedUs = ALooper::GetNowUs() + delayUs; 1374 } else { 1375 mAudioSink->start(); 1376 } 1377 mNumFramesWritten = 0; 1378 } 1379 } else { 1380 flushQueue(&mVideoQueue); 1381 1382 mDrainVideoQueuePending = false; 1383 1384 if (mVideoScheduler != NULL) { 1385 mVideoScheduler->restart(); 1386 } 1387 1388 Mutex::Autolock autoLock(mLock); 1389 ++mVideoDrainGeneration; 1390 prepareForMediaRenderingStart_l(); 1391 } 1392 1393 mVideoSampleReceived = false; 1394 1395 if (notifyComplete) { 1396 notifyFlushComplete(audio); 1397 } 1398} 1399 1400void NuPlayer::Renderer::flushQueue(List<QueueEntry> *queue) { 1401 while (!queue->empty()) { 1402 QueueEntry *entry = &*queue->begin(); 1403 1404 if (entry->mBuffer != NULL) { 1405 entry->mNotifyConsumed->post(); 1406 } 1407 1408 queue->erase(queue->begin()); 1409 entry = NULL; 1410 } 1411} 1412 1413void NuPlayer::Renderer::notifyFlushComplete(bool audio) { 1414 sp<AMessage> notify = mNotify->dup(); 1415 notify->setInt32("what", kWhatFlushComplete); 1416 notify->setInt32("audio", static_cast<int32_t>(audio)); 1417 notify->post(); 1418} 1419 1420bool NuPlayer::Renderer::dropBufferIfStale( 1421 bool audio, const sp<AMessage> &msg) { 1422 int32_t queueGeneration; 1423 CHECK(msg->findInt32("queueGeneration", &queueGeneration)); 1424 1425 if (queueGeneration == getQueueGeneration(audio)) { 1426 return false; 1427 } 1428 1429 sp<AMessage> notifyConsumed; 1430 if (msg->findMessage("notifyConsumed", ¬ifyConsumed)) { 1431 notifyConsumed->post(); 1432 } 1433 1434 return true; 1435} 1436 1437void NuPlayer::Renderer::onAudioSinkChanged() { 1438 if (offloadingAudio()) { 1439 return; 1440 } 1441 CHECK(!mDrainAudioQueuePending); 1442 mNumFramesWritten = 0; 1443 { 1444 Mutex::Autolock autoLock(mLock); 1445 mAnchorNumFramesWritten = -1; 1446 } 1447 uint32_t written; 1448 if (mAudioSink->getFramesWritten(&written) == OK) { 1449 mNumFramesWritten = written; 1450 } 1451} 1452 1453void NuPlayer::Renderer::onDisableOffloadAudio() { 1454 Mutex::Autolock autoLock(mLock); 1455 mFlags &= ~FLAG_OFFLOAD_AUDIO; 1456 ++mAudioDrainGeneration; 1457 if (mAudioRenderingStartGeneration != -1) { 1458 prepareForMediaRenderingStart_l(); 1459 } 1460} 1461 1462void NuPlayer::Renderer::onEnableOffloadAudio() { 1463 Mutex::Autolock autoLock(mLock); 1464 mFlags |= FLAG_OFFLOAD_AUDIO; 1465 ++mAudioDrainGeneration; 1466 if (mAudioRenderingStartGeneration != -1) { 1467 prepareForMediaRenderingStart_l(); 1468 } 1469} 1470 1471void NuPlayer::Renderer::onPause() { 1472 if (mPaused) { 1473 return; 1474 } 1475 1476 { 1477 Mutex::Autolock autoLock(mLock); 1478 // we do not increment audio drain generation so that we fill audio buffer during pause. 1479 ++mVideoDrainGeneration; 1480 prepareForMediaRenderingStart_l(); 1481 mPaused = true; 1482 mMediaClock->setPlaybackRate(0.0); 1483 } 1484 1485 mDrainAudioQueuePending = false; 1486 mDrainVideoQueuePending = false; 1487 1488 if (mHasAudio) { 1489 mAudioSink->pause(); 1490 startAudioOffloadPauseTimeout(); 1491 } 1492 1493 ALOGV("now paused audio queue has %zu entries, video has %zu entries", 1494 mAudioQueue.size(), mVideoQueue.size()); 1495} 1496 1497void NuPlayer::Renderer::onResume() { 1498 if (!mPaused) { 1499 return; 1500 } 1501 1502 if (mHasAudio) { 1503 cancelAudioOffloadPauseTimeout(); 1504 status_t err = mAudioSink->start(); 1505 if (err != OK) { 1506 ALOGE("cannot start AudioSink err %d", err); 1507 notifyAudioTearDown(); 1508 } 1509 } 1510 1511 { 1512 Mutex::Autolock autoLock(mLock); 1513 mPaused = false; 1514 1515 // configure audiosink as we did not do it when pausing 1516 if (mAudioSink != NULL && mAudioSink->ready()) { 1517 mAudioSink->setPlaybackRate(mPlaybackSettings); 1518 } 1519 1520 mMediaClock->setPlaybackRate(mPlaybackRate); 1521 1522 if (!mAudioQueue.empty()) { 1523 postDrainAudioQueue_l(); 1524 } 1525 } 1526 1527 if (!mVideoQueue.empty()) { 1528 postDrainVideoQueue(); 1529 } 1530} 1531 1532void NuPlayer::Renderer::onSetVideoFrameRate(float fps) { 1533 if (mVideoScheduler == NULL) { 1534 mVideoScheduler = new VideoFrameScheduler(); 1535 } 1536 mVideoScheduler->init(fps); 1537} 1538 1539int32_t NuPlayer::Renderer::getQueueGeneration(bool audio) { 1540 Mutex::Autolock autoLock(mLock); 1541 return (audio ? mAudioQueueGeneration : mVideoQueueGeneration); 1542} 1543 1544int32_t NuPlayer::Renderer::getDrainGeneration(bool audio) { 1545 Mutex::Autolock autoLock(mLock); 1546 return (audio ? mAudioDrainGeneration : mVideoDrainGeneration); 1547} 1548 1549bool NuPlayer::Renderer::getSyncQueues() { 1550 Mutex::Autolock autoLock(mLock); 1551 return mSyncQueues; 1552} 1553 1554// TODO: Remove unnecessary calls to getPlayedOutAudioDurationUs() 1555// as it acquires locks and may query the audio driver. 1556// 1557// Some calls could conceivably retrieve extrapolated data instead of 1558// accessing getTimestamp() or getPosition() every time a data buffer with 1559// a media time is received. 1560// 1561// Calculate duration of played samples if played at normal rate (i.e., 1.0). 1562int64_t NuPlayer::Renderer::getPlayedOutAudioDurationUs(int64_t nowUs) { 1563 uint32_t numFramesPlayed; 1564 int64_t numFramesPlayedAt; 1565 AudioTimestamp ts; 1566 static const int64_t kStaleTimestamp100ms = 100000; 1567 1568 status_t res = mAudioSink->getTimestamp(ts); 1569 if (res == OK) { // case 1: mixing audio tracks and offloaded tracks. 1570 numFramesPlayed = ts.mPosition; 1571 numFramesPlayedAt = 1572 ts.mTime.tv_sec * 1000000LL + ts.mTime.tv_nsec / 1000; 1573 const int64_t timestampAge = nowUs - numFramesPlayedAt; 1574 if (timestampAge > kStaleTimestamp100ms) { 1575 // This is an audio FIXME. 1576 // getTimestamp returns a timestamp which may come from audio mixing threads. 1577 // After pausing, the MixerThread may go idle, thus the mTime estimate may 1578 // become stale. Assuming that the MixerThread runs 20ms, with FastMixer at 5ms, 1579 // the max latency should be about 25ms with an average around 12ms (to be verified). 1580 // For safety we use 100ms. 1581 ALOGV("getTimestamp: returned stale timestamp nowUs(%lld) numFramesPlayedAt(%lld)", 1582 (long long)nowUs, (long long)numFramesPlayedAt); 1583 numFramesPlayedAt = nowUs - kStaleTimestamp100ms; 1584 } 1585 //ALOGD("getTimestamp: OK %d %lld", numFramesPlayed, (long long)numFramesPlayedAt); 1586 } else if (res == WOULD_BLOCK) { // case 2: transitory state on start of a new track 1587 numFramesPlayed = 0; 1588 numFramesPlayedAt = nowUs; 1589 //ALOGD("getTimestamp: WOULD_BLOCK %d %lld", 1590 // numFramesPlayed, (long long)numFramesPlayedAt); 1591 } else { // case 3: transitory at new track or audio fast tracks. 1592 res = mAudioSink->getPosition(&numFramesPlayed); 1593 CHECK_EQ(res, (status_t)OK); 1594 numFramesPlayedAt = nowUs; 1595 numFramesPlayedAt += 1000LL * mAudioSink->latency() / 2; /* XXX */ 1596 //ALOGD("getPosition: %u %lld", numFramesPlayed, (long long)numFramesPlayedAt); 1597 } 1598 1599 //CHECK_EQ(numFramesPlayed & (1 << 31), 0); // can't be negative until 12.4 hrs, test 1600 int64_t durationUs = getDurationUsIfPlayedAtSampleRate(numFramesPlayed) 1601 + nowUs - numFramesPlayedAt; 1602 if (durationUs < 0) { 1603 // Occurs when numFramesPlayed position is very small and the following: 1604 // (1) In case 1, the time nowUs is computed before getTimestamp() is called and 1605 // numFramesPlayedAt is greater than nowUs by time more than numFramesPlayed. 1606 // (2) In case 3, using getPosition and adding mAudioSink->latency() to 1607 // numFramesPlayedAt, by a time amount greater than numFramesPlayed. 1608 // 1609 // Both of these are transitory conditions. 1610 ALOGV("getPlayedOutAudioDurationUs: negative duration %lld set to zero", (long long)durationUs); 1611 durationUs = 0; 1612 } 1613 ALOGV("getPlayedOutAudioDurationUs(%lld) nowUs(%lld) frames(%u) framesAt(%lld)", 1614 (long long)durationUs, (long long)nowUs, numFramesPlayed, (long long)numFramesPlayedAt); 1615 return durationUs; 1616} 1617 1618void NuPlayer::Renderer::onAudioTearDown(AudioTearDownReason reason) { 1619 if (mAudioTornDown) { 1620 return; 1621 } 1622 mAudioTornDown = true; 1623 1624 int64_t currentPositionUs; 1625 sp<AMessage> notify = mNotify->dup(); 1626 if (getCurrentPosition(¤tPositionUs) == OK) { 1627 notify->setInt64("positionUs", currentPositionUs); 1628 } 1629 1630 mAudioSink->stop(); 1631 mAudioSink->flush(); 1632 1633 notify->setInt32("what", kWhatAudioTearDown); 1634 notify->setInt32("reason", reason); 1635 notify->post(); 1636} 1637 1638void NuPlayer::Renderer::startAudioOffloadPauseTimeout() { 1639 if (offloadingAudio()) { 1640 mWakeLock->acquire(); 1641 sp<AMessage> msg = new AMessage(kWhatAudioOffloadPauseTimeout, this); 1642 msg->setInt32("drainGeneration", mAudioOffloadPauseTimeoutGeneration); 1643 msg->post(kOffloadPauseMaxUs); 1644 } 1645} 1646 1647void NuPlayer::Renderer::cancelAudioOffloadPauseTimeout() { 1648 if (offloadingAudio()) { 1649 mWakeLock->release(true); 1650 ++mAudioOffloadPauseTimeoutGeneration; 1651 } 1652} 1653 1654status_t NuPlayer::Renderer::onOpenAudioSink( 1655 const sp<AMessage> &format, 1656 bool offloadOnly, 1657 bool hasVideo, 1658 uint32_t flags) { 1659 ALOGV("openAudioSink: offloadOnly(%d) offloadingAudio(%d)", 1660 offloadOnly, offloadingAudio()); 1661 bool audioSinkChanged = false; 1662 1663 int32_t numChannels; 1664 CHECK(format->findInt32("channel-count", &numChannels)); 1665 1666 int32_t channelMask; 1667 if (!format->findInt32("channel-mask", &channelMask)) { 1668 // signal to the AudioSink to derive the mask from count. 1669 channelMask = CHANNEL_MASK_USE_CHANNEL_ORDER; 1670 } 1671 1672 int32_t sampleRate; 1673 CHECK(format->findInt32("sample-rate", &sampleRate)); 1674 1675 if (offloadingAudio()) { 1676 audio_format_t audioFormat = AUDIO_FORMAT_PCM_16_BIT; 1677 AString mime; 1678 CHECK(format->findString("mime", &mime)); 1679 status_t err = mapMimeToAudioFormat(audioFormat, mime.c_str()); 1680 1681 if (err != OK) { 1682 ALOGE("Couldn't map mime \"%s\" to a valid " 1683 "audio_format", mime.c_str()); 1684 onDisableOffloadAudio(); 1685 } else { 1686 ALOGV("Mime \"%s\" mapped to audio_format 0x%x", 1687 mime.c_str(), audioFormat); 1688 1689 int avgBitRate = -1; 1690 format->findInt32("bit-rate", &avgBitRate); 1691 1692 int32_t aacProfile = -1; 1693 if (audioFormat == AUDIO_FORMAT_AAC 1694 && format->findInt32("aac-profile", &aacProfile)) { 1695 // Redefine AAC format as per aac profile 1696 mapAACProfileToAudioFormat( 1697 audioFormat, 1698 aacProfile); 1699 } 1700 1701 audio_offload_info_t offloadInfo = AUDIO_INFO_INITIALIZER; 1702 offloadInfo.duration_us = -1; 1703 format->findInt64( 1704 "durationUs", &offloadInfo.duration_us); 1705 offloadInfo.sample_rate = sampleRate; 1706 offloadInfo.channel_mask = channelMask; 1707 offloadInfo.format = audioFormat; 1708 offloadInfo.stream_type = AUDIO_STREAM_MUSIC; 1709 offloadInfo.bit_rate = avgBitRate; 1710 offloadInfo.has_video = hasVideo; 1711 offloadInfo.is_streaming = true; 1712 1713 if (memcmp(&mCurrentOffloadInfo, &offloadInfo, sizeof(offloadInfo)) == 0) { 1714 ALOGV("openAudioSink: no change in offload mode"); 1715 // no change from previous configuration, everything ok. 1716 return OK; 1717 } 1718 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1719 1720 ALOGV("openAudioSink: try to open AudioSink in offload mode"); 1721 uint32_t offloadFlags = flags; 1722 offloadFlags |= AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1723 offloadFlags &= ~AUDIO_OUTPUT_FLAG_DEEP_BUFFER; 1724 audioSinkChanged = true; 1725 mAudioSink->close(); 1726 1727 err = mAudioSink->open( 1728 sampleRate, 1729 numChannels, 1730 (audio_channel_mask_t)channelMask, 1731 audioFormat, 1732 0 /* bufferCount - unused */, 1733 &NuPlayer::Renderer::AudioSinkCallback, 1734 this, 1735 (audio_output_flags_t)offloadFlags, 1736 &offloadInfo); 1737 1738 if (err == OK) { 1739 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1740 } 1741 1742 if (err == OK) { 1743 // If the playback is offloaded to h/w, we pass 1744 // the HAL some metadata information. 1745 // We don't want to do this for PCM because it 1746 // will be going through the AudioFlinger mixer 1747 // before reaching the hardware. 1748 // TODO 1749 mCurrentOffloadInfo = offloadInfo; 1750 if (!mPaused) { // for preview mode, don't start if paused 1751 err = mAudioSink->start(); 1752 } 1753 ALOGV_IF(err == OK, "openAudioSink: offload succeeded"); 1754 } 1755 if (err != OK) { 1756 // Clean up, fall back to non offload mode. 1757 mAudioSink->close(); 1758 onDisableOffloadAudio(); 1759 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1760 ALOGV("openAudioSink: offload failed"); 1761 } else { 1762 mUseAudioCallback = true; // offload mode transfers data through callback 1763 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1764 } 1765 } 1766 } 1767 if (!offloadOnly && !offloadingAudio()) { 1768 ALOGV("openAudioSink: open AudioSink in NON-offload mode"); 1769 uint32_t pcmFlags = flags; 1770 pcmFlags &= ~AUDIO_OUTPUT_FLAG_COMPRESS_OFFLOAD; 1771 1772 const PcmInfo info = { 1773 (audio_channel_mask_t)channelMask, 1774 (audio_output_flags_t)pcmFlags, 1775 AUDIO_FORMAT_PCM_16_BIT, // TODO: change to audioFormat 1776 numChannels, 1777 sampleRate 1778 }; 1779 if (memcmp(&mCurrentPcmInfo, &info, sizeof(info)) == 0) { 1780 ALOGV("openAudioSink: no change in pcm mode"); 1781 // no change from previous configuration, everything ok. 1782 return OK; 1783 } 1784 1785 audioSinkChanged = true; 1786 mAudioSink->close(); 1787 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1788 // Note: It is possible to set up the callback, but not use it to send audio data. 1789 // This requires a fix in AudioSink to explicitly specify the transfer mode. 1790 mUseAudioCallback = getUseAudioCallbackSetting(); 1791 if (mUseAudioCallback) { 1792 ++mAudioDrainGeneration; // discard pending kWhatDrainAudioQueue message. 1793 } 1794 1795 // Compute the desired buffer size. 1796 // For callback mode, the amount of time before wakeup is about half the buffer size. 1797 const uint32_t frameCount = 1798 (unsigned long long)sampleRate * getAudioSinkPcmMsSetting() / 1000; 1799 1800 // The doNotReconnect means AudioSink will signal back and let NuPlayer to re-construct 1801 // AudioSink. We don't want this when there's video because it will cause a video seek to 1802 // the previous I frame. But we do want this when there's only audio because it will give 1803 // NuPlayer a chance to switch from non-offload mode to offload mode. 1804 // So we only set doNotReconnect when there's no video. 1805 const bool doNotReconnect = !hasVideo; 1806 status_t err = mAudioSink->open( 1807 sampleRate, 1808 numChannels, 1809 (audio_channel_mask_t)channelMask, 1810 AUDIO_FORMAT_PCM_16_BIT, 1811 0 /* bufferCount - unused */, 1812 mUseAudioCallback ? &NuPlayer::Renderer::AudioSinkCallback : NULL, 1813 mUseAudioCallback ? this : NULL, 1814 (audio_output_flags_t)pcmFlags, 1815 NULL, 1816 doNotReconnect, 1817 frameCount); 1818 if (err == OK) { 1819 err = mAudioSink->setPlaybackRate(mPlaybackSettings); 1820 } 1821 if (err != OK) { 1822 ALOGW("openAudioSink: non offloaded open failed status: %d", err); 1823 mAudioSink->close(); 1824 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1825 return err; 1826 } 1827 mCurrentPcmInfo = info; 1828 if (!mPaused) { // for preview mode, don't start if paused 1829 mAudioSink->start(); 1830 } 1831 } 1832 if (audioSinkChanged) { 1833 onAudioSinkChanged(); 1834 } 1835 mAudioTornDown = false; 1836 return OK; 1837} 1838 1839void NuPlayer::Renderer::onCloseAudioSink() { 1840 mAudioSink->close(); 1841 mCurrentOffloadInfo = AUDIO_INFO_INITIALIZER; 1842 mCurrentPcmInfo = AUDIO_PCMINFO_INITIALIZER; 1843} 1844 1845} // namespace android 1846 1847