DispSync.cpp revision fbc80aef0ba1b11982cf4ca88d218b65b6eca0f3
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_GRAPHICS 18//#define LOG_NDEBUG 0 19 20// This is needed for stdint.h to define INT64_MAX in C++ 21#define __STDC_LIMIT_MACROS 22 23#include <math.h> 24 25#include <algorithm> 26 27#include <log/log.h> 28#include <utils/String8.h> 29#include <utils/Thread.h> 30#include <utils/Trace.h> 31#include <utils/Vector.h> 32 33#include <ui/FenceTime.h> 34 35#include "DispSync.h" 36#include "SurfaceFlinger.h" 37#include "EventLog/EventLog.h" 38 39using std::max; 40using std::min; 41 42namespace android { 43 44// Setting this to true enables verbose tracing that can be used to debug 45// vsync event model or phase issues. 46static const bool kTraceDetailedInfo = false; 47 48// Setting this to true adds a zero-phase tracer for correlating with hardware 49// vsync events 50static const bool kEnableZeroPhaseTracer = false; 51 52// This is the threshold used to determine when hardware vsync events are 53// needed to re-synchronize the software vsync model with the hardware. The 54// error metric used is the mean of the squared difference between each 55// present time and the nearest software-predicted vsync. 56static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared 57 58#undef LOG_TAG 59#define LOG_TAG "DispSyncThread" 60class DispSyncThread: public Thread { 61public: 62 63 explicit DispSyncThread(const char* name): 64 mName(name), 65 mStop(false), 66 mPeriod(0), 67 mPhase(0), 68 mReferenceTime(0), 69 mWakeupLatency(0), 70 mFrameNumber(0) {} 71 72 virtual ~DispSyncThread() {} 73 74 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) { 75 if (kTraceDetailedInfo) ATRACE_CALL(); 76 Mutex::Autolock lock(mMutex); 77 mPeriod = period; 78 mPhase = phase; 79 mReferenceTime = referenceTime; 80 ALOGV("[%s] updateModel: mPeriod = %" PRId64 ", mPhase = %" PRId64 81 " mReferenceTime = %" PRId64, mName, ns2us(mPeriod), 82 ns2us(mPhase), ns2us(mReferenceTime)); 83 mCond.signal(); 84 } 85 86 void stop() { 87 if (kTraceDetailedInfo) ATRACE_CALL(); 88 Mutex::Autolock lock(mMutex); 89 mStop = true; 90 mCond.signal(); 91 } 92 93 virtual bool threadLoop() { 94 status_t err; 95 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 96 97 while (true) { 98 Vector<CallbackInvocation> callbackInvocations; 99 100 nsecs_t targetTime = 0; 101 102 { // Scope for lock 103 Mutex::Autolock lock(mMutex); 104 105 if (kTraceDetailedInfo) { 106 ATRACE_INT64("DispSync:Frame", mFrameNumber); 107 } 108 ALOGV("[%s] Frame %" PRId64, mName, mFrameNumber); 109 ++mFrameNumber; 110 111 if (mStop) { 112 return false; 113 } 114 115 if (mPeriod == 0) { 116 err = mCond.wait(mMutex); 117 if (err != NO_ERROR) { 118 ALOGE("error waiting for new events: %s (%d)", 119 strerror(-err), err); 120 return false; 121 } 122 continue; 123 } 124 125 targetTime = computeNextEventTimeLocked(now); 126 127 bool isWakeup = false; 128 129 if (now < targetTime) { 130 if (kTraceDetailedInfo) ATRACE_NAME("DispSync waiting"); 131 132 if (targetTime == INT64_MAX) { 133 ALOGV("[%s] Waiting forever", mName); 134 err = mCond.wait(mMutex); 135 } else { 136 ALOGV("[%s] Waiting until %" PRId64, mName, 137 ns2us(targetTime)); 138 err = mCond.waitRelative(mMutex, targetTime - now); 139 } 140 141 if (err == TIMED_OUT) { 142 isWakeup = true; 143 } else if (err != NO_ERROR) { 144 ALOGE("error waiting for next event: %s (%d)", 145 strerror(-err), err); 146 return false; 147 } 148 } 149 150 now = systemTime(SYSTEM_TIME_MONOTONIC); 151 152 // Don't correct by more than 1.5 ms 153 static const nsecs_t kMaxWakeupLatency = us2ns(1500); 154 155 if (isWakeup) { 156 mWakeupLatency = ((mWakeupLatency * 63) + 157 (now - targetTime)) / 64; 158 mWakeupLatency = min(mWakeupLatency, kMaxWakeupLatency); 159 if (kTraceDetailedInfo) { 160 ATRACE_INT64("DispSync:WakeupLat", now - targetTime); 161 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency); 162 } 163 } 164 165 callbackInvocations = gatherCallbackInvocationsLocked(now); 166 } 167 168 if (callbackInvocations.size() > 0) { 169 fireCallbackInvocations(callbackInvocations); 170 } 171 } 172 173 return false; 174 } 175 176 status_t addEventListener(const char* name, nsecs_t phase, 177 const sp<DispSync::Callback>& callback) { 178 if (kTraceDetailedInfo) ATRACE_CALL(); 179 Mutex::Autolock lock(mMutex); 180 181 for (size_t i = 0; i < mEventListeners.size(); i++) { 182 if (mEventListeners[i].mCallback == callback) { 183 return BAD_VALUE; 184 } 185 } 186 187 EventListener listener; 188 listener.mName = name; 189 listener.mPhase = phase; 190 listener.mCallback = callback; 191 192 // We want to allow the firstmost future event to fire without 193 // allowing any past events to fire 194 listener.mLastEventTime = systemTime() - mPeriod / 2 + mPhase - 195 mWakeupLatency; 196 197 mEventListeners.push(listener); 198 199 mCond.signal(); 200 201 return NO_ERROR; 202 } 203 204 status_t removeEventListener(const sp<DispSync::Callback>& callback) { 205 if (kTraceDetailedInfo) ATRACE_CALL(); 206 Mutex::Autolock lock(mMutex); 207 208 for (size_t i = 0; i < mEventListeners.size(); i++) { 209 if (mEventListeners[i].mCallback == callback) { 210 mEventListeners.removeAt(i); 211 mCond.signal(); 212 return NO_ERROR; 213 } 214 } 215 216 return BAD_VALUE; 217 } 218 219 // This method is only here to handle the !SurfaceFlinger::hasSyncFramework 220 // case. 221 bool hasAnyEventListeners() { 222 if (kTraceDetailedInfo) ATRACE_CALL(); 223 Mutex::Autolock lock(mMutex); 224 return !mEventListeners.empty(); 225 } 226 227private: 228 229 struct EventListener { 230 const char* mName; 231 nsecs_t mPhase; 232 nsecs_t mLastEventTime; 233 sp<DispSync::Callback> mCallback; 234 }; 235 236 struct CallbackInvocation { 237 sp<DispSync::Callback> mCallback; 238 nsecs_t mEventTime; 239 }; 240 241 nsecs_t computeNextEventTimeLocked(nsecs_t now) { 242 if (kTraceDetailedInfo) ATRACE_CALL(); 243 ALOGV("[%s] computeNextEventTimeLocked", mName); 244 nsecs_t nextEventTime = INT64_MAX; 245 for (size_t i = 0; i < mEventListeners.size(); i++) { 246 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 247 now); 248 249 if (t < nextEventTime) { 250 nextEventTime = t; 251 } 252 } 253 254 ALOGV("[%s] nextEventTime = %" PRId64, mName, ns2us(nextEventTime)); 255 return nextEventTime; 256 } 257 258 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) { 259 if (kTraceDetailedInfo) ATRACE_CALL(); 260 ALOGV("[%s] gatherCallbackInvocationsLocked @ %" PRId64, mName, 261 ns2us(now)); 262 263 Vector<CallbackInvocation> callbackInvocations; 264 nsecs_t onePeriodAgo = now - mPeriod; 265 266 for (size_t i = 0; i < mEventListeners.size(); i++) { 267 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 268 onePeriodAgo); 269 270 if (t < now) { 271 CallbackInvocation ci; 272 ci.mCallback = mEventListeners[i].mCallback; 273 ci.mEventTime = t; 274 ALOGV("[%s] [%s] Preparing to fire", mName, 275 mEventListeners[i].mName); 276 callbackInvocations.push(ci); 277 mEventListeners.editItemAt(i).mLastEventTime = t; 278 } 279 } 280 281 return callbackInvocations; 282 } 283 284 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, 285 nsecs_t baseTime) { 286 if (kTraceDetailedInfo) ATRACE_CALL(); 287 ALOGV("[%s] [%s] computeListenerNextEventTimeLocked(%" PRId64 ")", 288 mName, listener.mName, ns2us(baseTime)); 289 290 nsecs_t lastEventTime = listener.mLastEventTime + mWakeupLatency; 291 ALOGV("[%s] lastEventTime: %" PRId64, mName, ns2us(lastEventTime)); 292 if (baseTime < lastEventTime) { 293 baseTime = lastEventTime; 294 ALOGV("[%s] Clamping baseTime to lastEventTime -> %" PRId64, mName, 295 ns2us(baseTime)); 296 } 297 298 baseTime -= mReferenceTime; 299 ALOGV("[%s] Relative baseTime = %" PRId64, mName, ns2us(baseTime)); 300 nsecs_t phase = mPhase + listener.mPhase; 301 ALOGV("[%s] Phase = %" PRId64, mName, ns2us(phase)); 302 baseTime -= phase; 303 ALOGV("[%s] baseTime - phase = %" PRId64, mName, ns2us(baseTime)); 304 305 // If our previous time is before the reference (because the reference 306 // has since been updated), the division by mPeriod will truncate 307 // towards zero instead of computing the floor. Since in all cases 308 // before the reference we want the next time to be effectively now, we 309 // set baseTime to -mPeriod so that numPeriods will be -1. 310 // When we add 1 and the phase, we will be at the correct event time for 311 // this period. 312 if (baseTime < 0) { 313 ALOGV("[%s] Correcting negative baseTime", mName); 314 baseTime = -mPeriod; 315 } 316 317 nsecs_t numPeriods = baseTime / mPeriod; 318 ALOGV("[%s] numPeriods = %" PRId64, mName, numPeriods); 319 nsecs_t t = (numPeriods + 1) * mPeriod + phase; 320 ALOGV("[%s] t = %" PRId64, mName, ns2us(t)); 321 t += mReferenceTime; 322 ALOGV("[%s] Absolute t = %" PRId64, mName, ns2us(t)); 323 324 // Check that it's been slightly more than half a period since the last 325 // event so that we don't accidentally fall into double-rate vsyncs 326 if (t - listener.mLastEventTime < (3 * mPeriod / 5)) { 327 t += mPeriod; 328 ALOGV("[%s] Modifying t -> %" PRId64, mName, ns2us(t)); 329 } 330 331 t -= mWakeupLatency; 332 ALOGV("[%s] Corrected for wakeup latency -> %" PRId64, mName, ns2us(t)); 333 334 return t; 335 } 336 337 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) { 338 if (kTraceDetailedInfo) ATRACE_CALL(); 339 for (size_t i = 0; i < callbacks.size(); i++) { 340 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime); 341 } 342 } 343 344 const char* const mName; 345 346 bool mStop; 347 348 nsecs_t mPeriod; 349 nsecs_t mPhase; 350 nsecs_t mReferenceTime; 351 nsecs_t mWakeupLatency; 352 353 int64_t mFrameNumber; 354 355 Vector<EventListener> mEventListeners; 356 357 Mutex mMutex; 358 Condition mCond; 359}; 360 361#undef LOG_TAG 362#define LOG_TAG "DispSync" 363 364class ZeroPhaseTracer : public DispSync::Callback { 365public: 366 ZeroPhaseTracer() : mParity(false) {} 367 368 virtual void onDispSyncEvent(nsecs_t /*when*/) { 369 mParity = !mParity; 370 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0); 371 } 372 373private: 374 bool mParity; 375}; 376 377DispSync::DispSync(const char* name) : 378 mName(name), 379 mRefreshSkipCount(0), 380 mThread(new DispSyncThread(name)), 381 mIgnorePresentFences(!SurfaceFlinger::hasSyncFramework){ 382 383 mPresentTimeOffset = SurfaceFlinger::dispSyncPresentTimeOffset; 384 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE); 385 // set DispSync to SCHED_FIFO to minimize jitter 386 struct sched_param param = {0}; 387 param.sched_priority = 2; 388 if (sched_setscheduler(mThread->getTid(), SCHED_FIFO, ¶m) != 0) { 389 ALOGE("Couldn't set SCHED_FIFO for DispSyncThread"); 390 } 391 392 393 reset(); 394 beginResync(); 395 396 if (kTraceDetailedInfo) { 397 // If we're not getting present fences then the ZeroPhaseTracer 398 // would prevent HW vsync event from ever being turned off. 399 // Even if we're just ignoring the fences, the zero-phase tracing is 400 // not needed because any time there is an event registered we will 401 // turn on the HW vsync events. 402 if (!mIgnorePresentFences && kEnableZeroPhaseTracer) { 403 addEventListener("ZeroPhaseTracer", 0, new ZeroPhaseTracer()); 404 } 405 } 406} 407 408DispSync::~DispSync() {} 409 410void DispSync::reset() { 411 Mutex::Autolock lock(mMutex); 412 413 mPhase = 0; 414 mReferenceTime = 0; 415 mModelUpdated = false; 416 mNumResyncSamples = 0; 417 mFirstResyncSample = 0; 418 mNumResyncSamplesSincePresent = 0; 419 resetErrorLocked(); 420} 421 422bool DispSync::addPresentFence(const std::shared_ptr<FenceTime>& fenceTime) { 423 Mutex::Autolock lock(mMutex); 424 425 mPresentFences[mPresentSampleOffset] = fenceTime; 426 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES; 427 mNumResyncSamplesSincePresent = 0; 428 429 updateErrorLocked(); 430 431 return !mModelUpdated || mError > kErrorThreshold; 432} 433 434void DispSync::beginResync() { 435 Mutex::Autolock lock(mMutex); 436 ALOGV("[%s] beginResync", mName); 437 mModelUpdated = false; 438 mNumResyncSamples = 0; 439} 440 441bool DispSync::addResyncSample(nsecs_t timestamp) { 442 Mutex::Autolock lock(mMutex); 443 444 ALOGV("[%s] addResyncSample(%" PRId64 ")", mName, ns2us(timestamp)); 445 446 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; 447 mResyncSamples[idx] = timestamp; 448 if (mNumResyncSamples == 0) { 449 mPhase = 0; 450 mReferenceTime = timestamp; 451 ALOGV("[%s] First resync sample: mPeriod = %" PRId64 ", mPhase = 0, " 452 "mReferenceTime = %" PRId64, mName, ns2us(mPeriod), 453 ns2us(mReferenceTime)); 454 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 455 } 456 457 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) { 458 mNumResyncSamples++; 459 } else { 460 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES; 461 } 462 463 updateModelLocked(); 464 465 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) { 466 resetErrorLocked(); 467 } 468 469 if (mIgnorePresentFences) { 470 // If we don't have the sync framework we will never have 471 // addPresentFence called. This means we have no way to know whether 472 // or not we're synchronized with the HW vsyncs, so we just request 473 // that the HW vsync events be turned on whenever we need to generate 474 // SW vsync events. 475 return mThread->hasAnyEventListeners(); 476 } 477 478 // Check against kErrorThreshold / 2 to add some hysteresis before having to 479 // resync again 480 bool modelLocked = mModelUpdated && mError < (kErrorThreshold / 2); 481 ALOGV("[%s] addResyncSample returning %s", mName, 482 modelLocked ? "locked" : "unlocked"); 483 return !modelLocked; 484} 485 486void DispSync::endResync() { 487} 488 489status_t DispSync::addEventListener(const char* name, nsecs_t phase, 490 const sp<Callback>& callback) { 491 Mutex::Autolock lock(mMutex); 492 return mThread->addEventListener(name, phase, callback); 493} 494 495void DispSync::setRefreshSkipCount(int count) { 496 Mutex::Autolock lock(mMutex); 497 ALOGD("setRefreshSkipCount(%d)", count); 498 mRefreshSkipCount = count; 499 updateModelLocked(); 500} 501 502status_t DispSync::removeEventListener(const sp<Callback>& callback) { 503 Mutex::Autolock lock(mMutex); 504 return mThread->removeEventListener(callback); 505} 506 507void DispSync::setPeriod(nsecs_t period) { 508 Mutex::Autolock lock(mMutex); 509 mPeriod = period; 510 mPhase = 0; 511 mReferenceTime = 0; 512 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 513} 514 515nsecs_t DispSync::getPeriod() { 516 // lock mutex as mPeriod changes multiple times in updateModelLocked 517 Mutex::Autolock lock(mMutex); 518 return mPeriod; 519} 520 521void DispSync::updateModelLocked() { 522 ALOGV("[%s] updateModelLocked %zu", mName, mNumResyncSamples); 523 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) { 524 ALOGV("[%s] Computing...", mName); 525 nsecs_t durationSum = 0; 526 nsecs_t minDuration = INT64_MAX; 527 nsecs_t maxDuration = 0; 528 for (size_t i = 1; i < mNumResyncSamples; i++) { 529 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 530 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES; 531 nsecs_t duration = mResyncSamples[idx] - mResyncSamples[prev]; 532 durationSum += duration; 533 minDuration = min(minDuration, duration); 534 maxDuration = max(maxDuration, duration); 535 } 536 537 // Exclude the min and max from the average 538 durationSum -= minDuration + maxDuration; 539 mPeriod = durationSum / (mNumResyncSamples - 3); 540 541 ALOGV("[%s] mPeriod = %" PRId64, mName, ns2us(mPeriod)); 542 543 double sampleAvgX = 0; 544 double sampleAvgY = 0; 545 double scale = 2.0 * M_PI / double(mPeriod); 546 // Intentionally skip the first sample 547 for (size_t i = 1; i < mNumResyncSamples; i++) { 548 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 549 nsecs_t sample = mResyncSamples[idx] - mReferenceTime; 550 double samplePhase = double(sample % mPeriod) * scale; 551 sampleAvgX += cos(samplePhase); 552 sampleAvgY += sin(samplePhase); 553 } 554 555 sampleAvgX /= double(mNumResyncSamples - 1); 556 sampleAvgY /= double(mNumResyncSamples - 1); 557 558 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale); 559 560 ALOGV("[%s] mPhase = %" PRId64, mName, ns2us(mPhase)); 561 562 if (mPhase < -(mPeriod / 2)) { 563 mPhase += mPeriod; 564 ALOGV("[%s] Adjusting mPhase -> %" PRId64, mName, ns2us(mPhase)); 565 } 566 567 if (kTraceDetailedInfo) { 568 ATRACE_INT64("DispSync:Period", mPeriod); 569 ATRACE_INT64("DispSync:Phase", mPhase + mPeriod / 2); 570 } 571 572 // Artificially inflate the period if requested. 573 mPeriod += mPeriod * mRefreshSkipCount; 574 575 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 576 mModelUpdated = true; 577 } 578} 579 580void DispSync::updateErrorLocked() { 581 if (!mModelUpdated) { 582 return; 583 } 584 585 // Need to compare present fences against the un-adjusted refresh period, 586 // since they might arrive between two events. 587 nsecs_t period = mPeriod / (1 + mRefreshSkipCount); 588 589 int numErrSamples = 0; 590 nsecs_t sqErrSum = 0; 591 592 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 593 // Only check for the cached value of signal time to avoid unecessary 594 // syscalls. It is the responsibility of the DispSync owner to 595 // call getSignalTime() periodically so the cache is updated when the 596 // fence signals. 597 nsecs_t time = mPresentFences[i]->getCachedSignalTime(); 598 if (time == Fence::SIGNAL_TIME_PENDING || 599 time == Fence::SIGNAL_TIME_INVALID) { 600 continue; 601 } 602 603 nsecs_t sample = time - mReferenceTime; 604 if (sample <= mPhase) { 605 continue; 606 } 607 608 nsecs_t sampleErr = (sample - mPhase) % period; 609 if (sampleErr > period / 2) { 610 sampleErr -= period; 611 } 612 sqErrSum += sampleErr * sampleErr; 613 numErrSamples++; 614 } 615 616 if (numErrSamples > 0) { 617 mError = sqErrSum / numErrSamples; 618 mZeroErrSamplesCount = 0; 619 } else { 620 mError = 0; 621 // Use mod ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT to avoid log spam. 622 mZeroErrSamplesCount++; 623 ALOGE_IF( 624 (mZeroErrSamplesCount % ACCEPTABLE_ZERO_ERR_SAMPLES_COUNT) == 0, 625 "No present times for model error."); 626 } 627 628 if (kTraceDetailedInfo) { 629 ATRACE_INT64("DispSync:Error", mError); 630 } 631} 632 633void DispSync::resetErrorLocked() { 634 mPresentSampleOffset = 0; 635 mError = 0; 636 mZeroErrSamplesCount = 0; 637 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 638 mPresentFences[i] = FenceTime::NO_FENCE; 639 } 640} 641 642nsecs_t DispSync::computeNextRefresh(int periodOffset) const { 643 Mutex::Autolock lock(mMutex); 644 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 645 nsecs_t phase = mReferenceTime + mPhase; 646 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase; 647} 648 649void DispSync::dump(String8& result) const { 650 Mutex::Autolock lock(mMutex); 651 result.appendFormat("present fences are %s\n", 652 mIgnorePresentFences ? "ignored" : "used"); 653 result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", 654 mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount); 655 result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase); 656 result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n", 657 mError, sqrt(mError)); 658 result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n", 659 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT); 660 result.appendFormat("mNumResyncSamples: %zd (max %d)\n", 661 mNumResyncSamples, MAX_RESYNC_SAMPLES); 662 663 result.appendFormat("mResyncSamples:\n"); 664 nsecs_t previous = -1; 665 for (size_t i = 0; i < mNumResyncSamples; i++) { 666 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 667 nsecs_t sampleTime = mResyncSamples[idx]; 668 if (i == 0) { 669 result.appendFormat(" %" PRId64 "\n", sampleTime); 670 } else { 671 result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n", 672 sampleTime, sampleTime - previous); 673 } 674 previous = sampleTime; 675 } 676 677 result.appendFormat("mPresentFences [%d]:\n", 678 NUM_PRESENT_SAMPLES); 679 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 680 previous = Fence::SIGNAL_TIME_INVALID; 681 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 682 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES; 683 nsecs_t presentTime = mPresentFences[idx]->getSignalTime(); 684 if (presentTime == Fence::SIGNAL_TIME_PENDING) { 685 result.appendFormat(" [unsignaled fence]\n"); 686 } else if(presentTime == Fence::SIGNAL_TIME_INVALID) { 687 result.appendFormat(" [invalid fence]\n"); 688 } else if (previous == Fence::SIGNAL_TIME_PENDING || 689 previous == Fence::SIGNAL_TIME_INVALID) { 690 result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime, 691 (now - presentTime) / 1000000.0); 692 } else { 693 result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n", 694 presentTime, presentTime - previous, 695 (presentTime - previous) / (double) mPeriod, 696 (now - presentTime) / 1000000.0); 697 } 698 previous = presentTime; 699 } 700 701 result.appendFormat("current monotonic time: %" PRId64 "\n", now); 702} 703 704} // namespace android 705