DispSync.cpp revision 22279c44183a9c71301c770c4275545500a48345
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_GRAPHICS 18 19// This is needed for stdint.h to define INT64_MAX in C++ 20#define __STDC_LIMIT_MACROS 21 22#include <math.h> 23 24#include <cutils/log.h> 25 26#include <ui/Fence.h> 27 28#include <utils/String8.h> 29#include <utils/Thread.h> 30#include <utils/Trace.h> 31#include <utils/Vector.h> 32 33#include "DispSync.h" 34#include "EventLog/EventLog.h" 35 36namespace android { 37 38// Setting this to true enables verbose tracing that can be used to debug 39// vsync event model or phase issues. 40static const bool kTraceDetailedInfo = false; 41 42// This is the threshold used to determine when hardware vsync events are 43// needed to re-synchronize the software vsync model with the hardware. The 44// error metric used is the mean of the squared difference between each 45// present time and the nearest software-predicted vsync. 46static const nsecs_t kErrorThreshold = 160000000000; // 400 usec squared 47 48// This is the offset from the present fence timestamps to the corresponding 49// vsync event. 50static const int64_t kPresentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS; 51 52class DispSyncThread: public Thread { 53public: 54 55 DispSyncThread(): 56 mStop(false), 57 mPeriod(0), 58 mPhase(0), 59 mReferenceTime(0), 60 mWakeupLatency(0) { 61 } 62 63 virtual ~DispSyncThread() {} 64 65 void updateModel(nsecs_t period, nsecs_t phase, nsecs_t referenceTime) { 66 Mutex::Autolock lock(mMutex); 67 mPeriod = period; 68 mPhase = phase; 69 mReferenceTime = referenceTime; 70 mCond.signal(); 71 } 72 73 void stop() { 74 Mutex::Autolock lock(mMutex); 75 mStop = true; 76 mCond.signal(); 77 } 78 79 virtual bool threadLoop() { 80 status_t err; 81 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 82 nsecs_t nextEventTime = 0; 83 84 while (true) { 85 Vector<CallbackInvocation> callbackInvocations; 86 87 nsecs_t targetTime = 0; 88 89 { // Scope for lock 90 Mutex::Autolock lock(mMutex); 91 92 if (mStop) { 93 return false; 94 } 95 96 if (mPeriod == 0) { 97 err = mCond.wait(mMutex); 98 if (err != NO_ERROR) { 99 ALOGE("error waiting for new events: %s (%d)", 100 strerror(-err), err); 101 return false; 102 } 103 continue; 104 } 105 106 nextEventTime = computeNextEventTimeLocked(now); 107 targetTime = nextEventTime; 108 109 bool isWakeup = false; 110 111 if (now < targetTime) { 112 err = mCond.waitRelative(mMutex, targetTime - now); 113 114 if (err == TIMED_OUT) { 115 isWakeup = true; 116 } else if (err != NO_ERROR) { 117 ALOGE("error waiting for next event: %s (%d)", 118 strerror(-err), err); 119 return false; 120 } 121 } 122 123 now = systemTime(SYSTEM_TIME_MONOTONIC); 124 125 if (isWakeup) { 126 mWakeupLatency = ((mWakeupLatency * 63) + 127 (now - targetTime)) / 64; 128 if (mWakeupLatency > 500000) { 129 // Don't correct by more than 500 us 130 mWakeupLatency = 500000; 131 } 132 if (kTraceDetailedInfo) { 133 ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime); 134 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency); 135 } 136 } 137 138 callbackInvocations = gatherCallbackInvocationsLocked(now); 139 } 140 141 if (callbackInvocations.size() > 0) { 142 fireCallbackInvocations(callbackInvocations); 143 } 144 } 145 146 return false; 147 } 148 149 status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) { 150 Mutex::Autolock lock(mMutex); 151 152 for (size_t i = 0; i < mEventListeners.size(); i++) { 153 if (mEventListeners[i].mCallback == callback) { 154 return BAD_VALUE; 155 } 156 } 157 158 EventListener listener; 159 listener.mPhase = phase; 160 listener.mCallback = callback; 161 162 // We want to allow the firstmost future event to fire without 163 // allowing any past events to fire. Because 164 // computeListenerNextEventTimeLocked filters out events within a half 165 // a period of the last event time, we need to initialize the last 166 // event time to a half a period in the past. 167 listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2; 168 169 mEventListeners.push(listener); 170 171 mCond.signal(); 172 173 return NO_ERROR; 174 } 175 176 status_t removeEventListener(const sp<DispSync::Callback>& callback) { 177 Mutex::Autolock lock(mMutex); 178 179 for (size_t i = 0; i < mEventListeners.size(); i++) { 180 if (mEventListeners[i].mCallback == callback) { 181 mEventListeners.removeAt(i); 182 mCond.signal(); 183 return NO_ERROR; 184 } 185 } 186 187 return BAD_VALUE; 188 } 189 190 // This method is only here to handle the kIgnorePresentFences case. 191 bool hasAnyEventListeners() { 192 Mutex::Autolock lock(mMutex); 193 return !mEventListeners.empty(); 194 } 195 196private: 197 198 struct EventListener { 199 nsecs_t mPhase; 200 nsecs_t mLastEventTime; 201 sp<DispSync::Callback> mCallback; 202 }; 203 204 struct CallbackInvocation { 205 sp<DispSync::Callback> mCallback; 206 nsecs_t mEventTime; 207 }; 208 209 nsecs_t computeNextEventTimeLocked(nsecs_t now) { 210 nsecs_t nextEventTime = INT64_MAX; 211 for (size_t i = 0; i < mEventListeners.size(); i++) { 212 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 213 now); 214 215 if (t < nextEventTime) { 216 nextEventTime = t; 217 } 218 } 219 220 return nextEventTime; 221 } 222 223 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) { 224 Vector<CallbackInvocation> callbackInvocations; 225 nsecs_t ref = now - mPeriod; 226 227 for (size_t i = 0; i < mEventListeners.size(); i++) { 228 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 229 ref); 230 231 if (t < now) { 232 CallbackInvocation ci; 233 ci.mCallback = mEventListeners[i].mCallback; 234 ci.mEventTime = t; 235 callbackInvocations.push(ci); 236 mEventListeners.editItemAt(i).mLastEventTime = t; 237 } 238 } 239 240 return callbackInvocations; 241 } 242 243 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, 244 nsecs_t ref) { 245 246 nsecs_t lastEventTime = listener.mLastEventTime; 247 if (ref < lastEventTime) { 248 ref = lastEventTime; 249 } 250 251 nsecs_t phase = mReferenceTime + mPhase + listener.mPhase; 252 nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase; 253 254 if (t - listener.mLastEventTime < mPeriod / 2) { 255 t += mPeriod; 256 } 257 258 return t; 259 } 260 261 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) { 262 for (size_t i = 0; i < callbacks.size(); i++) { 263 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime); 264 } 265 } 266 267 bool mStop; 268 269 nsecs_t mPeriod; 270 nsecs_t mPhase; 271 nsecs_t mReferenceTime; 272 nsecs_t mWakeupLatency; 273 274 Vector<EventListener> mEventListeners; 275 276 Mutex mMutex; 277 Condition mCond; 278}; 279 280class ZeroPhaseTracer : public DispSync::Callback { 281public: 282 ZeroPhaseTracer() : mParity(false) {} 283 284 virtual void onDispSyncEvent(nsecs_t /*when*/) { 285 mParity = !mParity; 286 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0); 287 } 288 289private: 290 bool mParity; 291}; 292 293DispSync::DispSync() : 294 mRefreshSkipCount(0), 295 mThread(new DispSyncThread()) { 296 297 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE); 298 299 reset(); 300 beginResync(); 301 302 if (kTraceDetailedInfo) { 303 // If we're not getting present fences then the ZeroPhaseTracer 304 // would prevent HW vsync event from ever being turned off. 305 // Even if we're just ignoring the fences, the zero-phase tracing is 306 // not needed because any time there is an event registered we will 307 // turn on the HW vsync events. 308 if (!kIgnorePresentFences) { 309 addEventListener(0, new ZeroPhaseTracer()); 310 } 311 } 312} 313 314DispSync::~DispSync() {} 315 316void DispSync::reset() { 317 Mutex::Autolock lock(mMutex); 318 319 mPhase = 0; 320 mReferenceTime = 0; 321 mModelUpdated = false; 322 mNumResyncSamples = 0; 323 mFirstResyncSample = 0; 324 mNumResyncSamplesSincePresent = 0; 325 resetErrorLocked(); 326} 327 328bool DispSync::addPresentFence(const sp<Fence>& fence) { 329 Mutex::Autolock lock(mMutex); 330 331 mPresentFences[mPresentSampleOffset] = fence; 332 mPresentTimes[mPresentSampleOffset] = 0; 333 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES; 334 mNumResyncSamplesSincePresent = 0; 335 336 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 337 const sp<Fence>& f(mPresentFences[i]); 338 if (f != NULL) { 339 nsecs_t t = f->getSignalTime(); 340 if (t < INT64_MAX) { 341 mPresentFences[i].clear(); 342 mPresentTimes[i] = t + kPresentTimeOffset; 343 } 344 } 345 } 346 347 updateErrorLocked(); 348 349 return !mModelUpdated || mError > kErrorThreshold; 350} 351 352void DispSync::beginResync() { 353 Mutex::Autolock lock(mMutex); 354 355 mModelUpdated = false; 356 mNumResyncSamples = 0; 357} 358 359bool DispSync::addResyncSample(nsecs_t timestamp) { 360 Mutex::Autolock lock(mMutex); 361 362 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; 363 mResyncSamples[idx] = timestamp; 364 if (mNumResyncSamples == 0) { 365 mPhase = 0; 366 mReferenceTime = timestamp; 367 } 368 369 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) { 370 mNumResyncSamples++; 371 } else { 372 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES; 373 } 374 375 updateModelLocked(); 376 377 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) { 378 resetErrorLocked(); 379 } 380 381 if (kIgnorePresentFences) { 382 // If we don't have the sync framework we will never have 383 // addPresentFence called. This means we have no way to know whether 384 // or not we're synchronized with the HW vsyncs, so we just request 385 // that the HW vsync events be turned on whenever we need to generate 386 // SW vsync events. 387 return mThread->hasAnyEventListeners(); 388 } 389 390 return !mModelUpdated || mError > kErrorThreshold; 391} 392 393void DispSync::endResync() { 394} 395 396status_t DispSync::addEventListener(nsecs_t phase, 397 const sp<Callback>& callback) { 398 399 Mutex::Autolock lock(mMutex); 400 return mThread->addEventListener(phase, callback); 401} 402 403void DispSync::setRefreshSkipCount(int count) { 404 Mutex::Autolock lock(mMutex); 405 ALOGD("setRefreshSkipCount(%d)", count); 406 mRefreshSkipCount = count; 407 updateModelLocked(); 408} 409 410status_t DispSync::removeEventListener(const sp<Callback>& callback) { 411 Mutex::Autolock lock(mMutex); 412 return mThread->removeEventListener(callback); 413} 414 415void DispSync::setPeriod(nsecs_t period) { 416 Mutex::Autolock lock(mMutex); 417 mPeriod = period; 418 mPhase = 0; 419 mReferenceTime = 0; 420 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 421} 422 423nsecs_t DispSync::getPeriod() { 424 // lock mutex as mPeriod changes multiple times in updateModelLocked 425 Mutex::Autolock lock(mMutex); 426 return mPeriod; 427} 428 429void DispSync::updateModelLocked() { 430 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) { 431 nsecs_t durationSum = 0; 432 for (size_t i = 1; i < mNumResyncSamples; i++) { 433 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 434 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES; 435 durationSum += mResyncSamples[idx] - mResyncSamples[prev]; 436 } 437 438 mPeriod = durationSum / (mNumResyncSamples - 1); 439 440 double sampleAvgX = 0; 441 double sampleAvgY = 0; 442 double scale = 2.0 * M_PI / double(mPeriod); 443 for (size_t i = 0; i < mNumResyncSamples; i++) { 444 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 445 nsecs_t sample = mResyncSamples[idx] - mReferenceTime; 446 double samplePhase = double(sample % mPeriod) * scale; 447 sampleAvgX += cos(samplePhase); 448 sampleAvgY += sin(samplePhase); 449 } 450 451 sampleAvgX /= double(mNumResyncSamples); 452 sampleAvgY /= double(mNumResyncSamples); 453 454 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale); 455 456 if (mPhase < 0) { 457 mPhase += mPeriod; 458 } 459 460 if (kTraceDetailedInfo) { 461 ATRACE_INT64("DispSync:Period", mPeriod); 462 ATRACE_INT64("DispSync:Phase", mPhase); 463 } 464 465 // Artificially inflate the period if requested. 466 mPeriod += mPeriod * mRefreshSkipCount; 467 468 mThread->updateModel(mPeriod, mPhase, mReferenceTime); 469 mModelUpdated = true; 470 } 471} 472 473void DispSync::updateErrorLocked() { 474 if (!mModelUpdated) { 475 return; 476 } 477 478 // Need to compare present fences against the un-adjusted refresh period, 479 // since they might arrive between two events. 480 nsecs_t period = mPeriod / (1 + mRefreshSkipCount); 481 482 int numErrSamples = 0; 483 nsecs_t sqErrSum = 0; 484 485 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 486 nsecs_t sample = mPresentTimes[i] - mReferenceTime; 487 if (sample > mPhase) { 488 nsecs_t sampleErr = (sample - mPhase) % period; 489 if (sampleErr > period / 2) { 490 sampleErr -= period; 491 } 492 sqErrSum += sampleErr * sampleErr; 493 numErrSamples++; 494 } 495 } 496 497 if (numErrSamples > 0) { 498 mError = sqErrSum / numErrSamples; 499 } else { 500 mError = 0; 501 } 502 503 if (kTraceDetailedInfo) { 504 ATRACE_INT64("DispSync:Error", mError); 505 } 506} 507 508void DispSync::resetErrorLocked() { 509 mPresentSampleOffset = 0; 510 mError = 0; 511 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 512 mPresentFences[i].clear(); 513 mPresentTimes[i] = 0; 514 } 515} 516 517nsecs_t DispSync::computeNextRefresh(int periodOffset) const { 518 Mutex::Autolock lock(mMutex); 519 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 520 nsecs_t phase = mReferenceTime + mPhase; 521 return (((now - phase) / mPeriod) + periodOffset + 1) * mPeriod + phase; 522} 523 524void DispSync::dump(String8& result) const { 525 Mutex::Autolock lock(mMutex); 526 result.appendFormat("present fences are %s\n", 527 kIgnorePresentFences ? "ignored" : "used"); 528 result.appendFormat("mPeriod: %" PRId64 " ns (%.3f fps; skipCount=%d)\n", 529 mPeriod, 1000000000.0 / mPeriod, mRefreshSkipCount); 530 result.appendFormat("mPhase: %" PRId64 " ns\n", mPhase); 531 result.appendFormat("mError: %" PRId64 " ns (sqrt=%.1f)\n", 532 mError, sqrt(mError)); 533 result.appendFormat("mNumResyncSamplesSincePresent: %d (limit %d)\n", 534 mNumResyncSamplesSincePresent, MAX_RESYNC_SAMPLES_WITHOUT_PRESENT); 535 result.appendFormat("mNumResyncSamples: %zd (max %d)\n", 536 mNumResyncSamples, MAX_RESYNC_SAMPLES); 537 538 result.appendFormat("mResyncSamples:\n"); 539 nsecs_t previous = -1; 540 for (size_t i = 0; i < mNumResyncSamples; i++) { 541 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 542 nsecs_t sampleTime = mResyncSamples[idx]; 543 if (i == 0) { 544 result.appendFormat(" %" PRId64 "\n", sampleTime); 545 } else { 546 result.appendFormat(" %" PRId64 " (+%" PRId64 ")\n", 547 sampleTime, sampleTime - previous); 548 } 549 previous = sampleTime; 550 } 551 552 result.appendFormat("mPresentFences / mPresentTimes [%d]:\n", 553 NUM_PRESENT_SAMPLES); 554 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 555 previous = 0; 556 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 557 size_t idx = (i + mPresentSampleOffset) % NUM_PRESENT_SAMPLES; 558 bool signaled = mPresentFences[idx] == NULL; 559 nsecs_t presentTime = mPresentTimes[idx]; 560 if (!signaled) { 561 result.appendFormat(" [unsignaled fence]\n"); 562 } else if (presentTime == 0) { 563 result.appendFormat(" 0\n"); 564 } else if (previous == 0) { 565 result.appendFormat(" %" PRId64 " (%.3f ms ago)\n", presentTime, 566 (now - presentTime) / 1000000.0); 567 } else { 568 result.appendFormat(" %" PRId64 " (+%" PRId64 " / %.3f) (%.3f ms ago)\n", 569 presentTime, presentTime - previous, 570 (presentTime - previous) / (double) mPeriod, 571 (now - presentTime) / 1000000.0); 572 } 573 previous = presentTime; 574 } 575 576 result.appendFormat("current monotonic time: %" PRId64 "\n", now); 577} 578 579} // namespace android 580