FastMixer.cpp revision 0a14c4ce1a41bc09eb7855fa531a3af629a69139
1/* 2 * Copyright (C) 2012 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define LOG_TAG "FastMixer" 18//#define LOG_NDEBUG 0 19 20#include <sys/atomics.h> 21#include <time.h> 22#include <utils/Log.h> 23#include <utils/Trace.h> 24#include <system/audio.h> 25#ifdef FAST_MIXER_STATISTICS 26#include <cpustats/CentralTendencyStatistics.h> 27#ifdef CPU_FREQUENCY_STATISTICS 28#include <cpustats/ThreadCpuUsage.h> 29#endif 30#endif 31#include "AudioMixer.h" 32#include "FastMixer.h" 33 34#define FAST_HOT_IDLE_NS 1000000L // 1 ms: time to sleep while hot idling 35#define FAST_DEFAULT_NS 999999999L // ~1 sec: default time to sleep 36#define MAX_WARMUP_CYCLES 10 // maximum number of loop cycles to wait for warmup 37 38namespace android { 39 40// Fast mixer thread 41bool FastMixer::threadLoop() 42{ 43 static const FastMixerState initial; 44 const FastMixerState *previous = &initial, *current = &initial; 45 FastMixerState preIdle; // copy of state before we went into idle 46 struct timespec oldTs = {0, 0}; 47 bool oldTsValid = false; 48 long slopNs = 0; // accumulated time we've woken up too early (> 0) or too late (< 0) 49 long sleepNs = -1; // -1: busy wait, 0: sched_yield, > 0: nanosleep 50 int fastTrackNames[FastMixerState::kMaxFastTracks]; // handles used by mixer to identify tracks 51 int generations[FastMixerState::kMaxFastTracks]; // last observed mFastTracks[i].mGeneration 52 unsigned i; 53 for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { 54 fastTrackNames[i] = -1; 55 generations[i] = 0; 56 } 57 NBAIO_Sink *outputSink = NULL; 58 int outputSinkGen = 0; 59 AudioMixer* mixer = NULL; 60 short *mixBuffer = NULL; 61 enum {UNDEFINED, MIXED, ZEROED} mixBufferState = UNDEFINED; 62 NBAIO_Format format = Format_Invalid; 63 unsigned sampleRate = 0; 64 int fastTracksGen = 0; 65 long periodNs = 0; // expected period; the time required to render one mix buffer 66 long underrunNs = 0; // underrun likely when write cycle is greater than this value 67 long overrunNs = 0; // overrun likely when write cycle is less than this value 68 long warmupNs = 0; // warmup complete when write cycle is greater than to this value 69 FastMixerDumpState dummyDumpState, *dumpState = &dummyDumpState; 70 bool ignoreNextOverrun = true; // used to ignore initial overrun and first after an underrun 71#ifdef FAST_MIXER_STATISTICS 72 struct timespec oldLoad = {0, 0}; // previous value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) 73 bool oldLoadValid = false; // whether oldLoad is valid 74 uint32_t bounds = 0; 75 bool full = false; // whether we have collected at least kSamplingN samples 76#ifdef CPU_FREQUENCY_STATISTICS 77 ThreadCpuUsage tcu; // for reading the current CPU clock frequency in kHz 78#endif 79#endif 80 unsigned coldGen = 0; // last observed mColdGen 81 bool isWarm = false; // true means ready to mix, false means wait for warmup before mixing 82 struct timespec measuredWarmupTs = {0, 0}; // how long did it take for warmup to complete 83 uint32_t warmupCycles = 0; // counter of number of loop cycles required to warmup 84 NBAIO_Sink* teeSink = NULL; // if non-NULL, then duplicate write() to this non-blocking sink 85 86 for (;;) { 87 88 // either nanosleep, sched_yield, or busy wait 89 if (sleepNs >= 0) { 90 if (sleepNs > 0) { 91 ALOG_ASSERT(sleepNs < 1000000000); 92 const struct timespec req = {0, sleepNs}; 93 nanosleep(&req, NULL); 94 } else { 95 sched_yield(); 96 } 97 } 98 // default to long sleep for next cycle 99 sleepNs = FAST_DEFAULT_NS; 100 101 // poll for state change 102 const FastMixerState *next = mSQ.poll(); 103 if (next == NULL) { 104 // continue to use the default initial state until a real state is available 105 ALOG_ASSERT(current == &initial && previous == &initial); 106 next = current; 107 } 108 109 FastMixerState::Command command = next->mCommand; 110 if (next != current) { 111 112 // As soon as possible of learning of a new dump area, start using it 113 dumpState = next->mDumpState != NULL ? next->mDumpState : &dummyDumpState; 114 teeSink = next->mTeeSink; 115 116 // We want to always have a valid reference to the previous (non-idle) state. 117 // However, the state queue only guarantees access to current and previous states. 118 // So when there is a transition from a non-idle state into an idle state, we make a 119 // copy of the last known non-idle state so it is still available on return from idle. 120 // The possible transitions are: 121 // non-idle -> non-idle update previous from current in-place 122 // non-idle -> idle update previous from copy of current 123 // idle -> idle don't update previous 124 // idle -> non-idle don't update previous 125 if (!(current->mCommand & FastMixerState::IDLE)) { 126 if (command & FastMixerState::IDLE) { 127 preIdle = *current; 128 current = &preIdle; 129 oldTsValid = false; 130 oldLoadValid = false; 131 ignoreNextOverrun = true; 132 } 133 previous = current; 134 } 135 current = next; 136 } 137#if !LOG_NDEBUG 138 next = NULL; // not referenced again 139#endif 140 141 dumpState->mCommand = command; 142 143 switch (command) { 144 case FastMixerState::INITIAL: 145 case FastMixerState::HOT_IDLE: 146 sleepNs = FAST_HOT_IDLE_NS; 147 continue; 148 case FastMixerState::COLD_IDLE: 149 // only perform a cold idle command once 150 // FIXME consider checking previous state and only perform if previous != COLD_IDLE 151 if (current->mColdGen != coldGen) { 152 int32_t *coldFutexAddr = current->mColdFutexAddr; 153 ALOG_ASSERT(coldFutexAddr != NULL); 154 int32_t old = android_atomic_dec(coldFutexAddr); 155 if (old <= 0) { 156 __futex_syscall4(coldFutexAddr, FUTEX_WAIT_PRIVATE, old - 1, NULL); 157 } 158 // This may be overly conservative; there could be times that the normal mixer 159 // requests such a brief cold idle that it doesn't require resetting this flag. 160 isWarm = false; 161 measuredWarmupTs.tv_sec = 0; 162 measuredWarmupTs.tv_nsec = 0; 163 warmupCycles = 0; 164 sleepNs = -1; 165 coldGen = current->mColdGen; 166 bounds = 0; 167 full = false; 168 oldTsValid = !clock_gettime(CLOCK_MONOTONIC, &oldTs); 169 } else { 170 sleepNs = FAST_HOT_IDLE_NS; 171 } 172 continue; 173 case FastMixerState::EXIT: 174 delete mixer; 175 delete[] mixBuffer; 176 return false; 177 case FastMixerState::MIX: 178 case FastMixerState::WRITE: 179 case FastMixerState::MIX_WRITE: 180 break; 181 default: 182 LOG_FATAL("bad command %d", command); 183 } 184 185 // there is a non-idle state available to us; did the state change? 186 size_t frameCount = current->mFrameCount; 187 if (current != previous) { 188 189 // handle state change here, but since we want to diff the state, 190 // we're prepared for previous == &initial the first time through 191 unsigned previousTrackMask; 192 193 // check for change in output HAL configuration 194 NBAIO_Format previousFormat = format; 195 if (current->mOutputSinkGen != outputSinkGen) { 196 outputSink = current->mOutputSink; 197 outputSinkGen = current->mOutputSinkGen; 198 if (outputSink == NULL) { 199 format = Format_Invalid; 200 sampleRate = 0; 201 } else { 202 format = outputSink->format(); 203 sampleRate = Format_sampleRate(format); 204 ALOG_ASSERT(Format_channelCount(format) == 2); 205 } 206 dumpState->mSampleRate = sampleRate; 207 } 208 209 if ((format != previousFormat) || (frameCount != previous->mFrameCount)) { 210 // FIXME to avoid priority inversion, don't delete here 211 delete mixer; 212 mixer = NULL; 213 delete[] mixBuffer; 214 mixBuffer = NULL; 215 if (frameCount > 0 && sampleRate > 0) { 216 // FIXME new may block for unbounded time at internal mutex of the heap 217 // implementation; it would be better to have normal mixer allocate for us 218 // to avoid blocking here and to prevent possible priority inversion 219 mixer = new AudioMixer(frameCount, sampleRate, FastMixerState::kMaxFastTracks); 220 mixBuffer = new short[frameCount * 2]; 221 periodNs = (frameCount * 1000000000LL) / sampleRate; // 1.00 222 underrunNs = (frameCount * 1750000000LL) / sampleRate; // 1.75 223 overrunNs = (frameCount * 250000000LL) / sampleRate; // 0.25 224 warmupNs = (frameCount * 500000000LL) / sampleRate; // 0.50 225 } else { 226 periodNs = 0; 227 underrunNs = 0; 228 overrunNs = 0; 229 } 230 mixBufferState = UNDEFINED; 231#if !LOG_NDEBUG 232 for (i = 0; i < FastMixerState::kMaxFastTracks; ++i) { 233 fastTrackNames[i] = -1; 234 } 235#endif 236 // we need to reconfigure all active tracks 237 previousTrackMask = 0; 238 fastTracksGen = current->mFastTracksGen - 1; 239 dumpState->mFrameCount = frameCount; 240 } else { 241 previousTrackMask = previous->mTrackMask; 242 } 243 244 // check for change in active track set 245 unsigned currentTrackMask = current->mTrackMask; 246 dumpState->mTrackMask = currentTrackMask; 247 if (current->mFastTracksGen != fastTracksGen) { 248 ALOG_ASSERT(mixBuffer != NULL); 249 int name; 250 251 // process removed tracks first to avoid running out of track names 252 unsigned removedTracks = previousTrackMask & ~currentTrackMask; 253 while (removedTracks != 0) { 254 i = __builtin_ctz(removedTracks); 255 removedTracks &= ~(1 << i); 256 const FastTrack* fastTrack = ¤t->mFastTracks[i]; 257 ALOG_ASSERT(fastTrack->mBufferProvider == NULL); 258 if (mixer != NULL) { 259 name = fastTrackNames[i]; 260 ALOG_ASSERT(name >= 0); 261 mixer->deleteTrackName(name); 262 } 263#if !LOG_NDEBUG 264 fastTrackNames[i] = -1; 265#endif 266 // don't reset track dump state, since other side is ignoring it 267 generations[i] = fastTrack->mGeneration; 268 } 269 270 // now process added tracks 271 unsigned addedTracks = currentTrackMask & ~previousTrackMask; 272 while (addedTracks != 0) { 273 i = __builtin_ctz(addedTracks); 274 addedTracks &= ~(1 << i); 275 const FastTrack* fastTrack = ¤t->mFastTracks[i]; 276 AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; 277 ALOG_ASSERT(bufferProvider != NULL && fastTrackNames[i] == -1); 278 if (mixer != NULL) { 279 // calling getTrackName with default channel mask 280 name = mixer->getTrackName(AUDIO_CHANNEL_OUT_STEREO); 281 ALOG_ASSERT(name >= 0); 282 fastTrackNames[i] = name; 283 mixer->setBufferProvider(name, bufferProvider); 284 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::MAIN_BUFFER, 285 (void *) mixBuffer); 286 // newly allocated track names default to full scale volume 287 if (fastTrack->mSampleRate != 0 && fastTrack->mSampleRate != sampleRate) { 288 mixer->setParameter(name, AudioMixer::RESAMPLE, 289 AudioMixer::SAMPLE_RATE, (void*) fastTrack->mSampleRate); 290 } 291 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, 292 (void *) fastTrack->mChannelMask); 293 mixer->enable(name); 294 } 295 generations[i] = fastTrack->mGeneration; 296 } 297 298 // finally process modified tracks; these use the same slot 299 // but may have a different buffer provider or volume provider 300 unsigned modifiedTracks = currentTrackMask & previousTrackMask; 301 while (modifiedTracks != 0) { 302 i = __builtin_ctz(modifiedTracks); 303 modifiedTracks &= ~(1 << i); 304 const FastTrack* fastTrack = ¤t->mFastTracks[i]; 305 if (fastTrack->mGeneration != generations[i]) { 306 AudioBufferProvider *bufferProvider = fastTrack->mBufferProvider; 307 ALOG_ASSERT(bufferProvider != NULL); 308 if (mixer != NULL) { 309 name = fastTrackNames[i]; 310 ALOG_ASSERT(name >= 0); 311 mixer->setBufferProvider(name, bufferProvider); 312 if (fastTrack->mVolumeProvider == NULL) { 313 mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, 314 (void *)0x1000); 315 mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, 316 (void *)0x1000); 317 } 318 if (fastTrack->mSampleRate != 0 && 319 fastTrack->mSampleRate != sampleRate) { 320 mixer->setParameter(name, AudioMixer::RESAMPLE, 321 AudioMixer::SAMPLE_RATE, (void*) fastTrack->mSampleRate); 322 } else { 323 mixer->setParameter(name, AudioMixer::RESAMPLE, 324 AudioMixer::REMOVE, NULL); 325 } 326 mixer->setParameter(name, AudioMixer::TRACK, AudioMixer::CHANNEL_MASK, 327 (void *) fastTrack->mChannelMask); 328 // already enabled 329 } 330 generations[i] = fastTrack->mGeneration; 331 } 332 } 333 334 fastTracksGen = current->mFastTracksGen; 335 336 dumpState->mNumTracks = popcount(currentTrackMask); 337 } 338 339#if 1 // FIXME shouldn't need this 340 // only process state change once 341 previous = current; 342#endif 343 } 344 345 // do work using current state here 346 if ((command & FastMixerState::MIX) && (mixer != NULL) && isWarm) { 347 ALOG_ASSERT(mixBuffer != NULL); 348 // for each track, update volume and check for underrun 349 unsigned currentTrackMask = current->mTrackMask; 350 while (currentTrackMask != 0) { 351 i = __builtin_ctz(currentTrackMask); 352 currentTrackMask &= ~(1 << i); 353 const FastTrack* fastTrack = ¤t->mFastTracks[i]; 354 int name = fastTrackNames[i]; 355 ALOG_ASSERT(name >= 0); 356 if (fastTrack->mVolumeProvider != NULL) { 357 uint32_t vlr = fastTrack->mVolumeProvider->getVolumeLR(); 358 mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME0, 359 (void *)(vlr & 0xFFFF)); 360 mixer->setParameter(name, AudioMixer::VOLUME, AudioMixer::VOLUME1, 361 (void *)(vlr >> 16)); 362 } 363 // FIXME The current implementation of framesReady() for fast tracks 364 // takes a tryLock, which can block 365 // up to 1 ms. If enough active tracks all blocked in sequence, this would result 366 // in the overall fast mix cycle being delayed. Should use a non-blocking FIFO. 367 size_t framesReady = fastTrack->mBufferProvider->framesReady(); 368#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) 369 // I wish we had formatted trace names 370 char traceName[16]; 371 strcpy(traceName, "framesReady"); 372 traceName[11] = i + (i < 10 ? '0' : 'A' - 10); 373 traceName[12] = '\0'; 374 ATRACE_INT(traceName, framesReady); 375#endif 376 FastTrackDump *ftDump = &dumpState->mTracks[i]; 377 FastTrackUnderruns underruns = ftDump->mUnderruns; 378 if (framesReady < frameCount) { 379 if (framesReady == 0) { 380 underruns.mBitFields.mEmpty++; 381 underruns.mBitFields.mMostRecent = UNDERRUN_EMPTY; 382 mixer->disable(name); 383 } else { 384 // allow mixing partial buffer 385 underruns.mBitFields.mPartial++; 386 underruns.mBitFields.mMostRecent = UNDERRUN_PARTIAL; 387 mixer->enable(name); 388 } 389 } else { 390 underruns.mBitFields.mFull++; 391 underruns.mBitFields.mMostRecent = UNDERRUN_FULL; 392 mixer->enable(name); 393 } 394 ftDump->mUnderruns = underruns; 395 ftDump->mFramesReady = framesReady; 396 } 397 // process() is CPU-bound 398 mixer->process(AudioBufferProvider::kInvalidPTS); 399 mixBufferState = MIXED; 400 } else if (mixBufferState == MIXED) { 401 mixBufferState = UNDEFINED; 402 } 403 bool attemptedWrite = false; 404 //bool didFullWrite = false; // dumpsys could display a count of partial writes 405 if ((command & FastMixerState::WRITE) && (outputSink != NULL) && (mixBuffer != NULL)) { 406 if (mixBufferState == UNDEFINED) { 407 memset(mixBuffer, 0, frameCount * 2 * sizeof(short)); 408 mixBufferState = ZEROED; 409 } 410 if (teeSink != NULL) { 411 (void) teeSink->write(mixBuffer, frameCount); 412 } 413 // FIXME write() is non-blocking and lock-free for a properly implemented NBAIO sink, 414 // but this code should be modified to handle both non-blocking and blocking sinks 415 dumpState->mWriteSequence++; 416#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) 417 Tracer::traceBegin(ATRACE_TAG, "write"); 418#endif 419 ssize_t framesWritten = outputSink->write(mixBuffer, frameCount); 420#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) 421 Tracer::traceEnd(ATRACE_TAG); 422#endif 423 dumpState->mWriteSequence++; 424 if (framesWritten >= 0) { 425 ALOG_ASSERT(framesWritten <= frameCount); 426 dumpState->mFramesWritten += framesWritten; 427 //if ((size_t) framesWritten == frameCount) { 428 // didFullWrite = true; 429 //} 430 } else { 431 dumpState->mWriteErrors++; 432 } 433 attemptedWrite = true; 434 // FIXME count # of writes blocked excessively, CPU usage, etc. for dump 435 } 436 437 // To be exactly periodic, compute the next sleep time based on current time. 438 // This code doesn't have long-term stability when the sink is non-blocking. 439 // FIXME To avoid drift, use the local audio clock or watch the sink's fill status. 440 struct timespec newTs; 441 int rc = clock_gettime(CLOCK_MONOTONIC, &newTs); 442 if (rc == 0) { 443 if (oldTsValid) { 444 time_t sec = newTs.tv_sec - oldTs.tv_sec; 445 long nsec = newTs.tv_nsec - oldTs.tv_nsec; 446 if (nsec < 0) { 447 --sec; 448 nsec += 1000000000; 449 } 450 // To avoid an initial underrun on fast tracks after exiting standby, 451 // do not start pulling data from tracks and mixing until warmup is complete. 452 // Warmup is considered complete after the earlier of: 453 // first successful single write() that blocks for more than warmupNs 454 // MAX_WARMUP_CYCLES write() attempts. 455 // This is overly conservative, but to get better accuracy requires a new HAL API. 456 if (!isWarm && attemptedWrite) { 457 measuredWarmupTs.tv_sec += sec; 458 measuredWarmupTs.tv_nsec += nsec; 459 if (measuredWarmupTs.tv_nsec >= 1000000000) { 460 measuredWarmupTs.tv_sec++; 461 measuredWarmupTs.tv_nsec -= 1000000000; 462 } 463 ++warmupCycles; 464 if ((attemptedWrite && nsec > warmupNs) || 465 (warmupCycles >= MAX_WARMUP_CYCLES)) { 466 isWarm = true; 467 dumpState->mMeasuredWarmupTs = measuredWarmupTs; 468 dumpState->mWarmupCycles = warmupCycles; 469 } 470 } 471 if (sec > 0 || nsec > underrunNs) { 472#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) 473 ScopedTrace st(ATRACE_TAG, "underrun"); 474#endif 475 // FIXME only log occasionally 476 ALOGV("underrun: time since last cycle %d.%03ld sec", 477 (int) sec, nsec / 1000000L); 478 dumpState->mUnderruns++; 479 sleepNs = -1; 480 ignoreNextOverrun = true; 481 } else if (nsec < overrunNs) { 482 if (ignoreNextOverrun) { 483 ignoreNextOverrun = false; 484 } else { 485 // FIXME only log occasionally 486 ALOGV("overrun: time since last cycle %d.%03ld sec", 487 (int) sec, nsec / 1000000L); 488 dumpState->mOverruns++; 489 } 490 // Code for non blocking audio HAL. Sleep time must be tuned to allow 491 // catching up after an underrun 492 // sleepNs = periodNs - overrunNs; 493 sleepNs = -1; 494 } else { 495 sleepNs = -1; 496 ignoreNextOverrun = false; 497 } 498#ifdef FAST_MIXER_STATISTICS 499 // advance the FIFO queue bounds 500 size_t i = bounds & (FastMixerDumpState::kSamplingN - 1); 501 bounds = (bounds & 0xFFFF0000) | ((bounds + 1) & 0xFFFF); 502 if (full) { 503 bounds += 0x10000; 504 } else if (!(bounds & (FastMixerDumpState::kSamplingN - 1))) { 505 full = true; 506 } 507 // compute the delta value of clock_gettime(CLOCK_MONOTONIC) 508 uint32_t monotonicNs = nsec; 509 if (sec > 0 && sec < 4) { 510 monotonicNs += sec * 1000000000; 511 } 512 // compute the raw CPU load = delta value of clock_gettime(CLOCK_THREAD_CPUTIME_ID) 513 uint32_t loadNs = 0; 514 struct timespec newLoad; 515 rc = clock_gettime(CLOCK_THREAD_CPUTIME_ID, &newLoad); 516 if (rc == 0) { 517 if (oldLoadValid) { 518 sec = newLoad.tv_sec - oldLoad.tv_sec; 519 nsec = newLoad.tv_nsec - oldLoad.tv_nsec; 520 if (nsec < 0) { 521 --sec; 522 nsec += 1000000000; 523 } 524 loadNs = nsec; 525 if (sec > 0 && sec < 4) { 526 loadNs += sec * 1000000000; 527 } 528 } else { 529 // first time through the loop 530 oldLoadValid = true; 531 } 532 oldLoad = newLoad; 533 } 534#ifdef CPU_FREQUENCY_STATISTICS 535 // get the absolute value of CPU clock frequency in kHz 536 int cpuNum = sched_getcpu(); 537 uint32_t kHz = tcu.getCpukHz(cpuNum); 538 kHz = (kHz << 4) | (cpuNum & 0xF); 539#endif 540 // save values in FIFO queues for dumpsys 541 // these stores #1, #2, #3 are not atomic with respect to each other, 542 // or with respect to store #4 below 543 dumpState->mMonotonicNs[i] = monotonicNs; 544 dumpState->mLoadNs[i] = loadNs; 545#ifdef CPU_FREQUENCY_STATISTICS 546 dumpState->mCpukHz[i] = kHz; 547#endif 548 // this store #4 is not atomic with respect to stores #1, #2, #3 above, but 549 // the newest open and oldest closed halves are atomic with respect to each other 550 dumpState->mBounds = bounds; 551#if defined(ATRACE_TAG) && (ATRACE_TAG != ATRACE_TAG_NEVER) 552 ATRACE_INT("cycle_ms", monotonicNs / 1000000); 553 ATRACE_INT("load_us", loadNs / 1000); 554#endif 555#endif 556 } else { 557 // first time through the loop 558 oldTsValid = true; 559 sleepNs = periodNs; 560 ignoreNextOverrun = true; 561 } 562 oldTs = newTs; 563 } else { 564 // monotonic clock is broken 565 oldTsValid = false; 566 sleepNs = periodNs; 567 } 568 569 570 } // for (;;) 571 572 // never return 'true'; Thread::_threadLoop() locks mutex which can result in priority inversion 573} 574 575FastMixerDumpState::FastMixerDumpState() : 576 mCommand(FastMixerState::INITIAL), mWriteSequence(0), mFramesWritten(0), 577 mNumTracks(0), mWriteErrors(0), mUnderruns(0), mOverruns(0), 578 mSampleRate(0), mFrameCount(0), /* mMeasuredWarmupTs({0, 0}), */ mWarmupCycles(0), 579 mTrackMask(0) 580#ifdef FAST_MIXER_STATISTICS 581 , mBounds(0) 582#endif 583{ 584 mMeasuredWarmupTs.tv_sec = 0; 585 mMeasuredWarmupTs.tv_nsec = 0; 586 // sample arrays aren't accessed atomically with respect to the bounds, 587 // so clearing reduces chance for dumpsys to read random uninitialized samples 588 memset(&mMonotonicNs, 0, sizeof(mMonotonicNs)); 589 memset(&mLoadNs, 0, sizeof(mLoadNs)); 590#ifdef CPU_FREQUENCY_STATISTICS 591 memset(&mCpukHz, 0, sizeof(mCpukHz)); 592#endif 593} 594 595FastMixerDumpState::~FastMixerDumpState() 596{ 597} 598 599void FastMixerDumpState::dump(int fd) 600{ 601 if (mCommand == FastMixerState::INITIAL) { 602 fdprintf(fd, "FastMixer not initialized\n"); 603 return; 604 } 605#define COMMAND_MAX 32 606 char string[COMMAND_MAX]; 607 switch (mCommand) { 608 case FastMixerState::INITIAL: 609 strcpy(string, "INITIAL"); 610 break; 611 case FastMixerState::HOT_IDLE: 612 strcpy(string, "HOT_IDLE"); 613 break; 614 case FastMixerState::COLD_IDLE: 615 strcpy(string, "COLD_IDLE"); 616 break; 617 case FastMixerState::EXIT: 618 strcpy(string, "EXIT"); 619 break; 620 case FastMixerState::MIX: 621 strcpy(string, "MIX"); 622 break; 623 case FastMixerState::WRITE: 624 strcpy(string, "WRITE"); 625 break; 626 case FastMixerState::MIX_WRITE: 627 strcpy(string, "MIX_WRITE"); 628 break; 629 default: 630 snprintf(string, COMMAND_MAX, "%d", mCommand); 631 break; 632 } 633 double measuredWarmupMs = (mMeasuredWarmupTs.tv_sec * 1000.0) + 634 (mMeasuredWarmupTs.tv_nsec / 1000000.0); 635 double mixPeriodSec = (double) mFrameCount / (double) mSampleRate; 636 fdprintf(fd, "FastMixer command=%s writeSequence=%u framesWritten=%u\n" 637 " numTracks=%u writeErrors=%u underruns=%u overruns=%u\n" 638 " sampleRate=%u frameCount=%u measuredWarmup=%.3g ms, warmupCycles=%u\n" 639 " mixPeriod=%.2f ms\n", 640 string, mWriteSequence, mFramesWritten, 641 mNumTracks, mWriteErrors, mUnderruns, mOverruns, 642 mSampleRate, mFrameCount, measuredWarmupMs, mWarmupCycles, 643 mixPeriodSec * 1e3); 644#ifdef FAST_MIXER_STATISTICS 645 // find the interval of valid samples 646 uint32_t bounds = mBounds; 647 uint32_t newestOpen = bounds & 0xFFFF; 648 uint32_t oldestClosed = bounds >> 16; 649 uint32_t n = (newestOpen - oldestClosed) & 0xFFFF; 650 if (n > kSamplingN) { 651 ALOGE("too many samples %u", n); 652 n = kSamplingN; 653 } 654 // statistics for monotonic (wall clock) time, thread raw CPU load in time, CPU clock frequency, 655 // and adjusted CPU load in MHz normalized for CPU clock frequency 656 CentralTendencyStatistics wall, loadNs; 657#ifdef CPU_FREQUENCY_STATISTICS 658 CentralTendencyStatistics kHz, loadMHz; 659 uint32_t previousCpukHz = 0; 660#endif 661 // loop over all the samples 662 for (; n > 0; --n) { 663 size_t i = oldestClosed++ & (kSamplingN - 1); 664 uint32_t wallNs = mMonotonicNs[i]; 665 wall.sample(wallNs); 666 uint32_t sampleLoadNs = mLoadNs[i]; 667 loadNs.sample(sampleLoadNs); 668#ifdef CPU_FREQUENCY_STATISTICS 669 uint32_t sampleCpukHz = mCpukHz[i]; 670 // skip bad kHz samples 671 if ((sampleCpukHz & ~0xF) != 0) { 672 kHz.sample(sampleCpukHz >> 4); 673 if (sampleCpukHz == previousCpukHz) { 674 double megacycles = (double) sampleLoadNs * (double) (sampleCpukHz >> 4) * 1e-12; 675 double adjMHz = megacycles / mixPeriodSec; // _not_ wallNs * 1e9 676 loadMHz.sample(adjMHz); 677 } 678 } 679 previousCpukHz = sampleCpukHz; 680#endif 681 } 682 fdprintf(fd, "Simple moving statistics over last %.1f seconds:\n", wall.n() * mixPeriodSec); 683 fdprintf(fd, " wall clock time in ms per mix cycle:\n" 684 " mean=%.2f min=%.2f max=%.2f stddev=%.2f\n", 685 wall.mean()*1e-6, wall.minimum()*1e-6, wall.maximum()*1e-6, wall.stddev()*1e-6); 686 fdprintf(fd, " raw CPU load in us per mix cycle:\n" 687 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", 688 loadNs.mean()*1e-3, loadNs.minimum()*1e-3, loadNs.maximum()*1e-3, 689 loadNs.stddev()*1e-3); 690#ifdef CPU_FREQUENCY_STATISTICS 691 fdprintf(fd, " CPU clock frequency in MHz:\n" 692 " mean=%.0f min=%.0f max=%.0f stddev=%.0f\n", 693 kHz.mean()*1e-3, kHz.minimum()*1e-3, kHz.maximum()*1e-3, kHz.stddev()*1e-3); 694 fdprintf(fd, " adjusted CPU load in MHz (i.e. normalized for CPU clock frequency):\n" 695 " mean=%.1f min=%.1f max=%.1f stddev=%.1f\n", 696 loadMHz.mean(), loadMHz.minimum(), loadMHz.maximum(), loadMHz.stddev()); 697#endif 698#endif 699 // The active track mask and track states are updated non-atomically. 700 // So if we relied on isActive to decide whether to display, 701 // then we might display an obsolete track or omit an active track. 702 // Instead we always display all tracks, with an indication 703 // of whether we think the track is active. 704 uint32_t trackMask = mTrackMask; 705 fdprintf(fd, "Fast tracks: kMaxFastTracks=%u activeMask=%#x\n", 706 FastMixerState::kMaxFastTracks, trackMask); 707 fdprintf(fd, "Index Active Full Partial Empty Recent Ready\n"); 708 for (uint32_t i = 0; i < FastMixerState::kMaxFastTracks; ++i, trackMask >>= 1) { 709 bool isActive = trackMask & 1; 710 const FastTrackDump *ftDump = &mTracks[i]; 711 const FastTrackUnderruns& underruns = ftDump->mUnderruns; 712 const char *mostRecent; 713 switch (underruns.mBitFields.mMostRecent) { 714 case UNDERRUN_FULL: 715 mostRecent = "full"; 716 break; 717 case UNDERRUN_PARTIAL: 718 mostRecent = "partial"; 719 break; 720 case UNDERRUN_EMPTY: 721 mostRecent = "empty"; 722 break; 723 default: 724 mostRecent = "?"; 725 break; 726 } 727 fdprintf(fd, "%5u %6s %4u %7u %5u %7s %5u\n", i, isActive ? "yes" : "no", 728 (underruns.mBitFields.mFull) & UNDERRUN_MASK, 729 (underruns.mBitFields.mPartial) & UNDERRUN_MASK, 730 (underruns.mBitFields.mEmpty) & UNDERRUN_MASK, 731 mostRecent, ftDump->mFramesReady); 732 } 733} 734 735} // namespace android 736