android_AudioSfDecoder.cpp revision 833251ab9e5e59a6ea5ac325122cf3abdf7cd944
1/* 2 * Copyright (C) 2011 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17//#define USE_LOG SLAndroidLogLevel_Verbose 18 19#include "sles_allinclusive.h" 20#include "android/android_AudioSfDecoder.h" 21 22#include <media/stagefright/foundation/ADebug.h> 23 24 25#define SIZE_CACHED_HIGH_BYTES 1000000 26#define SIZE_CACHED_MED_BYTES 700000 27#define SIZE_CACHED_LOW_BYTES 400000 28 29namespace android { 30 31//-------------------------------------------------------------------------------------------------- 32AudioSfDecoder::AudioSfDecoder(const AudioPlayback_Parameters* params) : GenericPlayer(params), 33 mDataSource(0), 34 mAudioSource(0), 35 mAudioSourceStarted(false), 36 mBitrate(-1), 37 mChannelMask(UNKNOWN_CHANNELMASK), 38 mDurationUsec(ANDROID_UNKNOWN_TIME), 39 mDecodeBuffer(NULL), 40 mSeekTimeMsec(0), 41 mLastDecodedPositionUs(ANDROID_UNKNOWN_TIME), 42 mPcmFormatKeyCount(0) 43{ 44 SL_LOGD("AudioSfDecoder::AudioSfDecoder()"); 45} 46 47 48AudioSfDecoder::~AudioSfDecoder() { 49 SL_LOGD("AudioSfDecoder::~AudioSfDecoder()"); 50} 51 52 53void AudioSfDecoder::preDestroy() { 54 GenericPlayer::preDestroy(); 55 SL_LOGD("AudioSfDecoder::preDestroy()"); 56 { 57 Mutex::Autolock _l(mBufferSourceLock); 58 59 if (NULL != mDecodeBuffer) { 60 mDecodeBuffer->release(); 61 mDecodeBuffer = NULL; 62 } 63 64 if ((mAudioSource != 0) && mAudioSourceStarted) { 65 mAudioSource->stop(); 66 mAudioSourceStarted = false; 67 } 68 } 69} 70 71 72//-------------------------------------------------- 73void AudioSfDecoder::play() { 74 SL_LOGD("AudioSfDecoder::play"); 75 76 GenericPlayer::play(); 77 (new AMessage(kWhatDecode, id()))->post(); 78} 79 80 81void AudioSfDecoder::getPositionMsec(int* msec) { 82 int64_t timeUsec = getPositionUsec(); 83 if (timeUsec == ANDROID_UNKNOWN_TIME) { 84 *msec = ANDROID_UNKNOWN_TIME; 85 } else { 86 *msec = timeUsec / 1000; 87 } 88} 89 90 91void AudioSfDecoder::startPrefetch_async() { 92 SL_LOGV("AudioSfDecoder::startPrefetch_async()"); 93 94 if (wantPrefetch()) { 95 SL_LOGV("AudioSfDecoder::startPrefetch_async(): sending check cache msg"); 96 97 mStateFlags |= kFlagPreparing | kFlagBuffering; 98 99 (new AMessage(kWhatCheckCache, id()))->post(); 100 } 101} 102 103 104//-------------------------------------------------- 105uint32_t AudioSfDecoder::getPcmFormatKeyCount() { 106 android::Mutex::Autolock autoLock(mPcmFormatLock); 107 return mPcmFormatKeyCount; 108} 109 110 111//-------------------------------------------------- 112bool AudioSfDecoder::getPcmFormatKeySize(uint32_t index, uint32_t* pKeySize) { 113 uint32_t keyCount = getPcmFormatKeyCount(); 114 if (index >= keyCount) { 115 return false; 116 } else { 117 *pKeySize = strlen(kPcmDecodeMetadataKeys[index]) +1; 118 return true; 119 } 120} 121 122 123//-------------------------------------------------- 124bool AudioSfDecoder::getPcmFormatKeyName(uint32_t index, uint32_t keySize, char* keyName) { 125 uint32_t actualKeySize; 126 if (!getPcmFormatKeySize(index, &actualKeySize)) { 127 return false; 128 } 129 if (keySize < actualKeySize) { 130 return false; 131 } 132 strncpy(keyName, kPcmDecodeMetadataKeys[index], actualKeySize); 133 return true; 134} 135 136 137//-------------------------------------------------- 138bool AudioSfDecoder::getPcmFormatValueSize(uint32_t index, uint32_t* pValueSize) { 139 uint32_t keyCount = getPcmFormatKeyCount(); 140 if (index >= keyCount) { 141 *pValueSize = 0; 142 return false; 143 } else { 144 *pValueSize = sizeof(uint32_t); 145 return true; 146 } 147} 148 149 150//-------------------------------------------------- 151bool AudioSfDecoder::getPcmFormatKeyValue(uint32_t index, uint32_t size, uint32_t* pValue) { 152 uint32_t valueSize = 0; 153 if (!getPcmFormatValueSize(index, &valueSize)) { 154 return false; 155 } else if (size != valueSize) { 156 // this ensures we are accessing mPcmFormatValues with a valid size for that index 157 SL_LOGE("Error retrieving metadata value at index %d: using size of %d, should be %d", 158 index, size, valueSize); 159 return false; 160 } else { 161 *pValue = mPcmFormatValues[index]; 162 return true; 163 } 164} 165 166 167//-------------------------------------------------- 168// Event handlers 169// it is strictly verboten to call those methods outside of the event loop 170 171// Initializes the data and audio sources, and update the PCM format info 172// post-condition: upon successful initialization based on the player data locator 173// GenericPlayer::onPrepare() was called 174// mDataSource != 0 175// mAudioSource != 0 176// mAudioSourceStarted == true 177// All error returns from this method are via notifyPrepared(status) followed by "return". 178void AudioSfDecoder::onPrepare() { 179 SL_LOGD("AudioSfDecoder::onPrepare()"); 180 Mutex::Autolock _l(mBufferSourceLock); 181 182 // Initialize the PCM format info with the known parameters before the start of the decode 183 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_BITSPERSAMPLE] = SL_PCMSAMPLEFORMAT_FIXED_16; 184 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CONTAINERSIZE] = 16; 185 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_ENDIANNESS] = SL_BYTEORDER_LITTLEENDIAN; 186 // initialization with the default values: they will be replaced by the actual values 187 // once the decoder has figured them out 188 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = mChannelCount; 189 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLESPERSEC] = mSampleRateHz; 190 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CHANNELMASK] = mChannelMask; 191 192 //--------------------------------- 193 // Instantiate and initialize the data source for the decoder 194 sp<DataSource> dataSource; 195 196 switch (mDataLocatorType) { 197 198 case kDataLocatorNone: 199 SL_LOGE("AudioSfDecoder::onPrepare: no data locator set"); 200 notifyPrepared(MEDIA_ERROR_BASE); 201 return; 202 203 case kDataLocatorUri: 204 dataSource = DataSource::CreateFromURI(mDataLocator.uriRef); 205 if (dataSource == NULL) { 206 SL_LOGE("AudioSfDecoder::onPrepare(): Error opening %s", mDataLocator.uriRef); 207 notifyPrepared(MEDIA_ERROR_BASE); 208 return; 209 } 210 break; 211 212 case kDataLocatorFd: 213 { 214 // As FileSource unconditionally takes ownership of the fd and closes it, then 215 // we have to make a dup for FileSource if the app wants to keep ownership itself 216 int fd = mDataLocator.fdi.fd; 217 if (mDataLocator.fdi.mCloseAfterUse) { 218 mDataLocator.fdi.mCloseAfterUse = false; 219 } else { 220 fd = ::dup(fd); 221 } 222 dataSource = new FileSource(fd, mDataLocator.fdi.offset, mDataLocator.fdi.length); 223 status_t err = dataSource->initCheck(); 224 if (err != OK) { 225 notifyPrepared(err); 226 return; 227 } 228 break; 229 } 230 231 default: 232 TRESPASS(); 233 } 234 235 //--------------------------------- 236 // Instanciate and initialize the decoder attached to the data source 237 sp<MediaExtractor> extractor = MediaExtractor::Create(dataSource); 238 if (extractor == NULL) { 239 SL_LOGE("AudioSfDecoder::onPrepare: Could not instantiate extractor."); 240 notifyPrepared(ERROR_UNSUPPORTED); 241 return; 242 } 243 244 ssize_t audioTrackIndex = -1; 245 bool isRawAudio = false; 246 for (size_t i = 0; i < extractor->countTracks(); ++i) { 247 sp<MetaData> meta = extractor->getTrackMetaData(i); 248 249 const char *mime; 250 CHECK(meta->findCString(kKeyMIMEType, &mime)); 251 252 if (!strncasecmp("audio/", mime, 6)) { 253 audioTrackIndex = i; 254 255 if (!strcasecmp(MEDIA_MIMETYPE_AUDIO_RAW, mime)) { 256 isRawAudio = true; 257 } 258 break; 259 } 260 } 261 262 if (audioTrackIndex < 0) { 263 SL_LOGE("AudioSfDecoder::onPrepare: Could not find a supported audio track."); 264 notifyPrepared(ERROR_UNSUPPORTED); 265 return; 266 } 267 268 sp<MediaSource> source = extractor->getTrack(audioTrackIndex); 269 sp<MetaData> meta = source->getFormat(); 270 271 // we can't trust the OMXCodec (if there is one) to issue a INFO_FORMAT_CHANGED so we want 272 // to have some meaningful values as soon as possible. 273 bool hasChannelCount = meta->findInt32(kKeyChannelCount, &mChannelCount); 274 int32_t sr; 275 bool hasSampleRate = meta->findInt32(kKeySampleRate, &sr); 276 if (hasSampleRate) { 277 mSampleRateHz = (uint32_t) sr; 278 } 279 280 off64_t size; 281 int64_t durationUs; 282 if (dataSource->getSize(&size) == OK 283 && meta->findInt64(kKeyDuration, &durationUs)) { 284 if (durationUs != 0) { 285 mBitrate = size * 8000000ll / durationUs; // in bits/sec 286 } else { 287 mBitrate = -1; 288 } 289 mDurationUsec = durationUs; 290 mDurationMsec = durationUs / 1000; 291 } else { 292 mBitrate = -1; 293 mDurationUsec = ANDROID_UNKNOWN_TIME; 294 mDurationMsec = ANDROID_UNKNOWN_TIME; 295 } 296 297 // the audio content is not raw PCM, so we need a decoder 298 if (!isRawAudio) { 299 OMXClient client; 300 CHECK_EQ(client.connect(), (status_t)OK); 301 302 source = OMXCodec::Create( 303 client.interface(), meta, false /* createEncoder */, 304 source); 305 306 if (source == NULL) { 307 SL_LOGE("AudioSfDecoder::onPrepare: Could not instantiate decoder."); 308 notifyPrepared(ERROR_UNSUPPORTED); 309 return; 310 } 311 312 meta = source->getFormat(); 313 } 314 315 316 if (source->start() != OK) { 317 SL_LOGE("AudioSfDecoder::onPrepare: Failed to start source/decoder."); 318 notifyPrepared(MEDIA_ERROR_BASE); 319 return; 320 } 321 322 //--------------------------------- 323 // The data source, and audio source (a decoder if required) are ready to be used 324 mDataSource = dataSource; 325 mAudioSource = source; 326 mAudioSourceStarted = true; 327 328 if (!hasChannelCount) { 329 CHECK(meta->findInt32(kKeyChannelCount, &mChannelCount)); 330 } 331 332 if (!hasSampleRate) { 333 CHECK(meta->findInt32(kKeySampleRate, &sr)); 334 mSampleRateHz = (uint32_t) sr; 335 } 336 // FIXME add code below once channel mask support is in, currently initialized to default 337 // if (meta->findInt32(kKeyChannelMask, &mChannelMask)) { 338 // mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_CHANNELMASK] = mChannelMask; 339 // } 340 341 if (!wantPrefetch()) { 342 SL_LOGV("AudioSfDecoder::onPrepare: no need to prefetch"); 343 // doesn't need prefetching, notify good to go 344 mCacheStatus = kStatusHigh; 345 mCacheFill = 1000; 346 notifyStatus(); 347 notifyCacheFill(); 348 } 349 350 { 351 android::Mutex::Autolock autoLock(mPcmFormatLock); 352 mPcmFormatKeyCount = NB_PCMMETADATA_KEYS; 353 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLESPERSEC] = mSampleRateHz; 354 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = mChannelCount; 355 } 356 357 // at this point we have enough information about the source to create the sink that 358 // will consume the data 359 createAudioSink(); 360 361 // signal successful completion of prepare 362 mStateFlags |= kFlagPrepared; 363 364 GenericPlayer::onPrepare(); 365 SL_LOGD("AudioSfDecoder::onPrepare() done, mStateFlags=0x%x", mStateFlags); 366} 367 368 369void AudioSfDecoder::onPause() { 370 SL_LOGV("AudioSfDecoder::onPause()"); 371 GenericPlayer::onPause(); 372 pauseAudioSink(); 373} 374 375 376void AudioSfDecoder::onPlay() { 377 SL_LOGV("AudioSfDecoder::onPlay()"); 378 GenericPlayer::onPlay(); 379 startAudioSink(); 380} 381 382 383void AudioSfDecoder::onSeek(const sp<AMessage> &msg) { 384 SL_LOGV("AudioSfDecoder::onSeek"); 385 int64_t timeMsec; 386 CHECK(msg->findInt64(WHATPARAM_SEEK_SEEKTIME_MS, &timeMsec)); 387 388 Mutex::Autolock _l(mTimeLock); 389 mStateFlags |= kFlagSeeking; 390 mSeekTimeMsec = timeMsec; 391 mLastDecodedPositionUs = ANDROID_UNKNOWN_TIME; 392} 393 394 395void AudioSfDecoder::onLoop(const sp<AMessage> &msg) { 396 SL_LOGV("AudioSfDecoder::onLoop"); 397 int32_t loop; 398 CHECK(msg->findInt32(WHATPARAM_LOOP_LOOPING, &loop)); 399 400 if (loop) { 401 //SL_LOGV("AudioSfDecoder::onLoop start looping"); 402 mStateFlags |= kFlagLooping; 403 } else { 404 //SL_LOGV("AudioSfDecoder::onLoop stop looping"); 405 mStateFlags &= ~kFlagLooping; 406 } 407} 408 409 410void AudioSfDecoder::onCheckCache(const sp<AMessage> &msg) { 411 //SL_LOGV("AudioSfDecoder::onCheckCache"); 412 bool eos; 413 CacheStatus_t status = getCacheRemaining(&eos); 414 415 if (eos || status == kStatusHigh 416 || ((mStateFlags & kFlagPreparing) && (status >= kStatusEnough))) { 417 if (mStateFlags & kFlagPlaying) { 418 startAudioSink(); 419 } 420 mStateFlags &= ~kFlagBuffering; 421 422 SL_LOGV("AudioSfDecoder::onCheckCache: buffering done."); 423 424 if (mStateFlags & kFlagPreparing) { 425 //SL_LOGV("AudioSfDecoder::onCheckCache: preparation done."); 426 mStateFlags &= ~kFlagPreparing; 427 } 428 429 if (mStateFlags & kFlagPlaying) { 430 (new AMessage(kWhatDecode, id()))->post(); 431 } 432 return; 433 } 434 435 msg->post(100000); 436} 437 438 439void AudioSfDecoder::onDecode() { 440 SL_LOGV("AudioSfDecoder::onDecode"); 441 442 //-------------------------------- Need to buffer some more before decoding? 443 bool eos; 444 if (mDataSource == 0) { 445 // application set play state to paused which failed, then set play state to playing 446 return; 447 } 448 449 if (wantPrefetch() 450 && (getCacheRemaining(&eos) == kStatusLow) 451 && !eos) { 452 SL_LOGV("buffering more."); 453 454 if (mStateFlags & kFlagPlaying) { 455 pauseAudioSink(); 456 } 457 mStateFlags |= kFlagBuffering; 458 (new AMessage(kWhatCheckCache, id()))->post(100000); 459 return; 460 } 461 462 if (!(mStateFlags & (kFlagPlaying | kFlagBuffering | kFlagPreparing))) { 463 // don't decode if we're not buffering, prefetching or playing 464 //SL_LOGV("don't decode: not buffering, prefetching or playing"); 465 return; 466 } 467 468 //-------------------------------- Decode 469 status_t err; 470 MediaSource::ReadOptions readOptions; 471 if (mStateFlags & kFlagSeeking) { 472 assert(mSeekTimeMsec != ANDROID_UNKNOWN_TIME); 473 readOptions.setSeekTo(mSeekTimeMsec * 1000); 474 } 475 476 int64_t timeUsec = ANDROID_UNKNOWN_TIME; 477 { 478 Mutex::Autolock _l(mBufferSourceLock); 479 480 if (NULL != mDecodeBuffer) { 481 // the current decoded buffer hasn't been rendered, drop it 482 mDecodeBuffer->release(); 483 mDecodeBuffer = NULL; 484 } 485 if(!mAudioSourceStarted) { 486 return; 487 } 488 err = mAudioSource->read(&mDecodeBuffer, &readOptions); 489 if (err == OK) { 490 CHECK(mDecodeBuffer->meta_data()->findInt64(kKeyTime, &timeUsec)); 491 } 492 } 493 494 { 495 Mutex::Autolock _l(mTimeLock); 496 if (mStateFlags & kFlagSeeking) { 497 mStateFlags &= ~kFlagSeeking; 498 mSeekTimeMsec = ANDROID_UNKNOWN_TIME; 499 } 500 if (timeUsec != ANDROID_UNKNOWN_TIME) { 501 mLastDecodedPositionUs = timeUsec; 502 } 503 } 504 505 //-------------------------------- Handle return of decode 506 if (err != OK) { 507 bool continueDecoding = false; 508 switch(err) { 509 case ERROR_END_OF_STREAM: 510 if (0 < mDurationUsec) { 511 Mutex::Autolock _l(mTimeLock); 512 mLastDecodedPositionUs = mDurationUsec; 513 } 514 // handle notification and looping at end of stream 515 if (mStateFlags & kFlagPlaying) { 516 notify(PLAYEREVENT_ENDOFSTREAM, 1, true); 517 } 518 if (mStateFlags & kFlagLooping) { 519 seek(0); 520 // kick-off decoding again 521 continueDecoding = true; 522 } 523 break; 524 case INFO_FORMAT_CHANGED: 525 SL_LOGD("MediaSource::read encountered INFO_FORMAT_CHANGED"); 526 // reconfigure output 527 { 528 Mutex::Autolock _l(mBufferSourceLock); 529 hasNewDecodeParams(); 530 } 531 continueDecoding = true; 532 break; 533 case INFO_DISCONTINUITY: 534 SL_LOGD("MediaSource::read encountered INFO_DISCONTINUITY"); 535 continueDecoding = true; 536 break; 537 default: 538 SL_LOGE("MediaSource::read returned error %d", err); 539 break; 540 } 541 if (continueDecoding) { 542 if (NULL == mDecodeBuffer) { 543 (new AMessage(kWhatDecode, id()))->post(); 544 return; 545 } 546 } else { 547 return; 548 } 549 } 550 551 //-------------------------------- Render 552 sp<AMessage> msg = new AMessage(kWhatRender, id()); 553 msg->post(); 554} 555 556 557void AudioSfDecoder::onRender() { 558 //SL_LOGV("AudioSfDecoder::onRender"); 559 560 Mutex::Autolock _l(mBufferSourceLock); 561 562 if (NULL == mDecodeBuffer) { 563 // nothing to render, move along 564 SL_LOGV("AudioSfDecoder::onRender NULL buffer, exiting"); 565 return; 566 } 567 568 mDecodeBuffer->release(); 569 mDecodeBuffer = NULL; 570 571} 572 573 574void AudioSfDecoder::onMessageReceived(const sp<AMessage> &msg) { 575 switch (msg->what()) { 576 case kWhatPrepare: 577 onPrepare(); 578 break; 579 580 case kWhatDecode: 581 onDecode(); 582 break; 583 584 case kWhatRender: 585 onRender(); 586 break; 587 588 case kWhatCheckCache: 589 onCheckCache(msg); 590 break; 591 592 case kWhatNotif: 593 onNotify(msg); 594 break; 595 596 case kWhatPlay: 597 onPlay(); 598 break; 599 600 case kWhatPause: 601 onPause(); 602 break; 603 604/* 605 case kWhatSeek: 606 onSeek(msg); 607 break; 608 609 case kWhatLoop: 610 onLoop(msg); 611 break; 612*/ 613 default: 614 GenericPlayer::onMessageReceived(msg); 615 break; 616 } 617} 618 619//-------------------------------------------------- 620// Prepared state, prefetch status notifications 621void AudioSfDecoder::notifyPrepared(status_t prepareRes) { 622 assert(!(mStateFlags & (kFlagPrepared | kFlagPreparedUnsuccessfully))); 623 if (NO_ERROR == prepareRes) { 624 // The "then" fork is not currently used, but is kept here to make it easier 625 // to replace by a new signalPrepareCompletion(status) if we re-visit this later. 626 mStateFlags |= kFlagPrepared; 627 } else { 628 mStateFlags |= kFlagPreparedUnsuccessfully; 629 } 630 // Do not call the superclass onPrepare to notify, because it uses a default error 631 // status code but we can provide a more specific one. 632 // GenericPlayer::onPrepare(); 633 notify(PLAYEREVENT_PREPARED, (int32_t)prepareRes, true); 634 SL_LOGD("AudioSfDecoder::onPrepare() done, mStateFlags=0x%x", mStateFlags); 635} 636 637 638void AudioSfDecoder::onNotify(const sp<AMessage> &msg) { 639 notif_cbf_t notifyClient; 640 void* notifyUser; 641 { 642 android::Mutex::Autolock autoLock(mNotifyClientLock); 643 if (NULL == mNotifyClient) { 644 return; 645 } else { 646 notifyClient = mNotifyClient; 647 notifyUser = mNotifyUser; 648 } 649 } 650 int32_t val; 651 if (msg->findInt32(PLAYEREVENT_PREFETCHSTATUSCHANGE, &val)) { 652 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_PREFETCHSTATUSCHANGE, val); 653 notifyClient(kEventPrefetchStatusChange, val, 0, notifyUser); 654 } 655 else if (msg->findInt32(PLAYEREVENT_PREFETCHFILLLEVELUPDATE, &val)) { 656 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_PREFETCHFILLLEVELUPDATE, val); 657 notifyClient(kEventPrefetchFillLevelUpdate, val, 0, notifyUser); 658 } 659 else if (msg->findInt32(PLAYEREVENT_ENDOFSTREAM, &val)) { 660 SL_LOGV("\tASfPlayer notifying %s = %d", PLAYEREVENT_ENDOFSTREAM, val); 661 notifyClient(kEventEndOfStream, val, 0, notifyUser); 662 } 663 else { 664 GenericPlayer::onNotify(msg); 665 } 666} 667 668 669//-------------------------------------------------- 670// Private utility functions 671 672bool AudioSfDecoder::wantPrefetch() { 673 if (mDataSource != 0) { 674 return (mDataSource->flags() & DataSource::kWantsPrefetching); 675 } else { 676 // happens if an improper data locator was passed, if the media extractor couldn't be 677 // initialized, if there is no audio track in the media, if the OMX decoder couldn't be 678 // instantiated, if the source couldn't be opened, or if the MediaSource 679 // couldn't be started 680 SL_LOGV("AudioSfDecoder::wantPrefetch() tries to access NULL mDataSource"); 681 return false; 682 } 683} 684 685 686int64_t AudioSfDecoder::getPositionUsec() { 687 Mutex::Autolock _l(mTimeLock); 688 if (mStateFlags & kFlagSeeking) { 689 return mSeekTimeMsec * 1000; 690 } else { 691 if (mLastDecodedPositionUs < 0) { 692 return ANDROID_UNKNOWN_TIME; 693 } else { 694 return mLastDecodedPositionUs; 695 } 696 } 697} 698 699 700CacheStatus_t AudioSfDecoder::getCacheRemaining(bool *eos) { 701 sp<NuCachedSource2> cachedSource = 702 static_cast<NuCachedSource2 *>(mDataSource.get()); 703 704 CacheStatus_t oldStatus = mCacheStatus; 705 706 status_t finalStatus; 707 size_t dataRemaining = cachedSource->approxDataRemaining(&finalStatus); 708 *eos = (finalStatus != OK); 709 710 CHECK_GE(mBitrate, 0); 711 712 int64_t dataRemainingUs = dataRemaining * 8000000ll / mBitrate; 713 //SL_LOGV("AudioSfDecoder::getCacheRemaining: approx %.2f secs remaining (eos=%d)", 714 // dataRemainingUs / 1E6, *eos); 715 716 if (*eos) { 717 // data is buffered up to the end of the stream, it can't get any better than this 718 mCacheStatus = kStatusHigh; 719 mCacheFill = 1000; 720 721 } else { 722 if (mDurationUsec > 0) { 723 // known duration: 724 725 // fill level is ratio of how much has been played + how much is 726 // cached, divided by total duration 727 uint32_t currentPositionUsec = getPositionUsec(); 728 if (currentPositionUsec == ANDROID_UNKNOWN_TIME) { 729 // if we don't know where we are, assume the worst for the fill ratio 730 currentPositionUsec = 0; 731 } 732 if (mDurationUsec > 0) { 733 mCacheFill = (int16_t) ((1000.0 734 * (double)(currentPositionUsec + dataRemainingUs) / mDurationUsec)); 735 } else { 736 mCacheFill = 0; 737 } 738 //SL_LOGV("cacheFill = %d", mCacheFill); 739 740 // cache status is evaluated against duration thresholds 741 if (dataRemainingUs > DURATION_CACHED_HIGH_MS*1000) { 742 mCacheStatus = kStatusHigh; 743 //LOGV("high"); 744 } else if (dataRemainingUs > DURATION_CACHED_MED_MS*1000) { 745 //LOGV("enough"); 746 mCacheStatus = kStatusEnough; 747 } else if (dataRemainingUs < DURATION_CACHED_LOW_MS*1000) { 748 //LOGV("low"); 749 mCacheStatus = kStatusLow; 750 } else { 751 mCacheStatus = kStatusIntermediate; 752 } 753 754 } else { 755 // unknown duration: 756 757 // cache status is evaluated against cache amount thresholds 758 // (no duration so we don't have the bitrate either, could be derived from format?) 759 if (dataRemaining > SIZE_CACHED_HIGH_BYTES) { 760 mCacheStatus = kStatusHigh; 761 } else if (dataRemaining > SIZE_CACHED_MED_BYTES) { 762 mCacheStatus = kStatusEnough; 763 } else if (dataRemaining < SIZE_CACHED_LOW_BYTES) { 764 mCacheStatus = kStatusLow; 765 } else { 766 mCacheStatus = kStatusIntermediate; 767 } 768 } 769 770 } 771 772 if (oldStatus != mCacheStatus) { 773 notifyStatus(); 774 } 775 776 if (abs(mCacheFill - mLastNotifiedCacheFill) > mCacheFillNotifThreshold) { 777 notifyCacheFill(); 778 } 779 780 return mCacheStatus; 781} 782 783 784void AudioSfDecoder::hasNewDecodeParams() { 785 786 if ((mAudioSource != 0) && mAudioSourceStarted) { 787 sp<MetaData> meta = mAudioSource->getFormat(); 788 789 SL_LOGV("old sample rate = %d, channel count = %d", mSampleRateHz, mChannelCount); 790 791 CHECK(meta->findInt32(kKeyChannelCount, &mChannelCount)); 792 int32_t sr; 793 CHECK(meta->findInt32(kKeySampleRate, &sr)); 794 mSampleRateHz = (uint32_t) sr; 795 SL_LOGV("format changed: new sample rate = %d, channel count = %d", 796 mSampleRateHz, mChannelCount); 797 798 { 799 android::Mutex::Autolock autoLock(mPcmFormatLock); 800 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_NUMCHANNELS] = mChannelCount; 801 mPcmFormatValues[ANDROID_KEY_INDEX_PCMFORMAT_SAMPLESPERSEC] = mSampleRateHz; 802 } 803 } 804 805 // alert users of those params 806 updateAudioSink(); 807} 808 809} // namespace android 810