DispSync.cpp revision f52b3c88f18c0546526996c839fbce74172e11c7
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#define ATRACE_TAG ATRACE_TAG_GRAPHICS 18 19// This is needed for stdint.h to define INT64_MAX in C++ 20#define __STDC_LIMIT_MACROS 21 22#include <math.h> 23 24#include <cutils/log.h> 25 26#include <ui/Fence.h> 27 28#include <utils/String8.h> 29#include <utils/Thread.h> 30#include <utils/Trace.h> 31#include <utils/Vector.h> 32 33#include "DispSync.h" 34#include "EventLog/EventLog.h" 35 36namespace android { 37 38// Setting this to true enables verbose tracing that can be used to debug 39// vsync event model or phase issues. 40static const bool traceDetailedInfo = false; 41 42// This is the threshold used to determine when hardware vsync events are 43// needed to re-synchronize the software vsync model with the hardware. The 44// error metric used is the mean of the squared difference between each 45// present time and the nearest software-predicted vsync. 46static const nsecs_t errorThreshold = 160000000000; 47 48// This works around the lack of support for the sync framework on some 49// devices. 50#ifdef RUNNING_WITHOUT_SYNC_FRAMEWORK 51static const bool runningWithoutSyncFramework = true; 52#else 53static const bool runningWithoutSyncFramework = false; 54#endif 55 56// This is the offset from the present fence timestamps to the corresponding 57// vsync event. 58static const int64_t presentTimeOffset = PRESENT_TIME_OFFSET_FROM_VSYNC_NS; 59 60class DispSyncThread: public Thread { 61public: 62 63 DispSyncThread(): 64 mLowPowerMode(false), 65 mStop(false), 66 mLastVsyncSent(false), 67 mLastBufferFull(false), 68 mPeriod(0), 69 mPhase(0), 70 mWakeupLatency(0) { 71 } 72 73 virtual ~DispSyncThread() {} 74 75 void updateModel(nsecs_t period, nsecs_t phase) { 76 Mutex::Autolock lock(mMutex); 77 mPeriod = period; 78 mPhase = phase; 79 mCond.signal(); 80 } 81 82 void stop() { 83 Mutex::Autolock lock(mMutex); 84 mStop = true; 85 mCond.signal(); 86 } 87 88 virtual bool threadLoop() { 89 status_t err; 90 nsecs_t now = systemTime(SYSTEM_TIME_MONOTONIC); 91 nsecs_t nextEventTime = 0; 92 93 while (true) { 94 Vector<CallbackInvocation> callbackInvocations; 95 96 nsecs_t targetTime = 0; 97 98 { // Scope for lock 99 Mutex::Autolock lock(mMutex); 100 101 if (mStop) { 102 return false; 103 } 104 105 if (mPeriod == 0) { 106 err = mCond.wait(mMutex); 107 if (err != NO_ERROR) { 108 ALOGE("error waiting for new events: %s (%d)", 109 strerror(-err), err); 110 return false; 111 } 112 continue; 113 } 114 115 nextEventTime = computeNextEventTimeLocked(now); 116 targetTime = nextEventTime; 117 118 bool isWakeup = false; 119 120 if (now < targetTime) { 121 err = mCond.waitRelative(mMutex, targetTime - now); 122 123 if (err == TIMED_OUT) { 124 isWakeup = true; 125 } else if (err != NO_ERROR) { 126 ALOGE("error waiting for next event: %s (%d)", 127 strerror(-err), err); 128 return false; 129 } 130 } 131 132 now = systemTime(SYSTEM_TIME_MONOTONIC); 133 134 if (isWakeup) { 135 mWakeupLatency = ((mWakeupLatency * 63) + 136 (now - targetTime)) / 64; 137 if (mWakeupLatency > 500000) { 138 // Don't correct by more than 500 us 139 mWakeupLatency = 500000; 140 } 141 if (traceDetailedInfo) { 142 ATRACE_INT64("DispSync:WakeupLat", now - nextEventTime); 143 ATRACE_INT64("DispSync:AvgWakeupLat", mWakeupLatency); 144 } 145 } 146 147 callbackInvocations = gatherCallbackInvocationsLocked(now); 148 } 149 150 if (callbackInvocations.size() > 0) { 151 if (mLowPowerMode) { 152 if (!mLastVsyncSent || !mLastBufferFull) { 153 fireCallbackInvocations(callbackInvocations); 154 mLastVsyncSent = true; 155 } else 156 mLastVsyncSent = false; 157 } else { 158 fireCallbackInvocations(callbackInvocations); 159 } 160 mLastBufferFull = true; 161 } else { 162 mLastBufferFull = false; 163 } 164 } 165 166 return false; 167 } 168 169 status_t addEventListener(nsecs_t phase, const sp<DispSync::Callback>& callback) { 170 Mutex::Autolock lock(mMutex); 171 172 for (size_t i = 0; i < mEventListeners.size(); i++) { 173 if (mEventListeners[i].mCallback == callback) { 174 return BAD_VALUE; 175 } 176 } 177 178 EventListener listener; 179 listener.mPhase = phase; 180 listener.mCallback = callback; 181 182 // We want to allow the firstmost future event to fire without 183 // allowing any past events to fire. Because 184 // computeListenerNextEventTimeLocked filters out events within a half 185 // a period of the last event time, we need to initialize the last 186 // event time to a half a period in the past. 187 listener.mLastEventTime = systemTime(SYSTEM_TIME_MONOTONIC) - mPeriod / 2; 188 189 mEventListeners.push(listener); 190 191 mCond.signal(); 192 193 return NO_ERROR; 194 } 195 196 status_t removeEventListener(const sp<DispSync::Callback>& callback) { 197 Mutex::Autolock lock(mMutex); 198 199 for (size_t i = 0; i < mEventListeners.size(); i++) { 200 if (mEventListeners[i].mCallback == callback) { 201 mEventListeners.removeAt(i); 202 mCond.signal(); 203 return NO_ERROR; 204 } 205 } 206 207 return BAD_VALUE; 208 } 209 210 // This method is only here to handle the runningWithoutSyncFramework 211 // case. 212 bool hasAnyEventListeners() { 213 Mutex::Autolock lock(mMutex); 214 return !mEventListeners.empty(); 215 } 216 217 bool mLowPowerMode; 218private: 219 220 struct EventListener { 221 nsecs_t mPhase; 222 nsecs_t mLastEventTime; 223 sp<DispSync::Callback> mCallback; 224 }; 225 226 struct CallbackInvocation { 227 sp<DispSync::Callback> mCallback; 228 nsecs_t mEventTime; 229 }; 230 231 nsecs_t computeNextEventTimeLocked(nsecs_t now) { 232 nsecs_t nextEventTime = INT64_MAX; 233 for (size_t i = 0; i < mEventListeners.size(); i++) { 234 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 235 now); 236 237 if (t < nextEventTime) { 238 nextEventTime = t; 239 } 240 } 241 242 return nextEventTime; 243 } 244 245 Vector<CallbackInvocation> gatherCallbackInvocationsLocked(nsecs_t now) { 246 Vector<CallbackInvocation> callbackInvocations; 247 nsecs_t ref = now - mPeriod; 248 249 for (size_t i = 0; i < mEventListeners.size(); i++) { 250 nsecs_t t = computeListenerNextEventTimeLocked(mEventListeners[i], 251 ref); 252 253 if (t < now) { 254 CallbackInvocation ci; 255 ci.mCallback = mEventListeners[i].mCallback; 256 ci.mEventTime = t; 257 callbackInvocations.push(ci); 258 mEventListeners.editItemAt(i).mLastEventTime = t; 259 } 260 } 261 262 return callbackInvocations; 263 } 264 265 nsecs_t computeListenerNextEventTimeLocked(const EventListener& listener, 266 nsecs_t ref) { 267 268 nsecs_t lastEventTime = listener.mLastEventTime; 269 if (ref < lastEventTime) { 270 ref = lastEventTime; 271 } 272 273 nsecs_t phase = mPhase + listener.mPhase; 274 nsecs_t t = (((ref - phase) / mPeriod) + 1) * mPeriod + phase; 275 276 if (t - listener.mLastEventTime < mPeriod / 2) { 277 t += mPeriod; 278 } 279 280 return t; 281 } 282 283 void fireCallbackInvocations(const Vector<CallbackInvocation>& callbacks) { 284 for (size_t i = 0; i < callbacks.size(); i++) { 285 callbacks[i].mCallback->onDispSyncEvent(callbacks[i].mEventTime); 286 } 287 } 288 289 bool mStop; 290 bool mLastVsyncSent; 291 bool mLastBufferFull; 292 293 nsecs_t mPeriod; 294 nsecs_t mPhase; 295 nsecs_t mWakeupLatency; 296 297 Vector<EventListener> mEventListeners; 298 299 Mutex mMutex; 300 Condition mCond; 301}; 302 303class ZeroPhaseTracer : public DispSync::Callback { 304public: 305 ZeroPhaseTracer() : mParity(false) {} 306 307 virtual void onDispSyncEvent(nsecs_t /*when*/) { 308 mParity = !mParity; 309 ATRACE_INT("ZERO_PHASE_VSYNC", mParity ? 1 : 0); 310 } 311 312private: 313 bool mParity; 314}; 315 316DispSync::DispSync() { 317 mThread = new DispSyncThread(); 318 mThread->run("DispSync", PRIORITY_URGENT_DISPLAY + PRIORITY_MORE_FAVORABLE); 319 320 reset(); 321 beginResync(); 322 323 if (traceDetailedInfo) { 324 // If runningWithoutSyncFramework is true then the ZeroPhaseTracer 325 // would prevent HW vsync event from ever being turned off. 326 // Furthermore the zero-phase tracing is not needed because any time 327 // there is an event registered we will turn on the HW vsync events. 328 if (!runningWithoutSyncFramework) { 329 addEventListener(0, new ZeroPhaseTracer()); 330 } 331 } 332} 333 334DispSync::~DispSync() {} 335 336void DispSync::reset() { 337 Mutex::Autolock lock(mMutex); 338 339 mNumResyncSamples = 0; 340 mFirstResyncSample = 0; 341 mNumResyncSamplesSincePresent = 0; 342 resetErrorLocked(); 343} 344 345bool DispSync::addPresentFence(const sp<Fence>& fence) { 346 Mutex::Autolock lock(mMutex); 347 348 mPresentFences[mPresentSampleOffset] = fence; 349 mPresentTimes[mPresentSampleOffset] = 0; 350 mPresentSampleOffset = (mPresentSampleOffset + 1) % NUM_PRESENT_SAMPLES; 351 mNumResyncSamplesSincePresent = 0; 352 353 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 354 const sp<Fence>& f(mPresentFences[i]); 355 if (f != NULL) { 356 nsecs_t t = f->getSignalTime(); 357 if (t < INT64_MAX) { 358 mPresentFences[i].clear(); 359 mPresentTimes[i] = t + presentTimeOffset; 360 } 361 } 362 } 363 364 updateErrorLocked(); 365 366 return mPeriod == 0 || mError > errorThreshold; 367} 368 369void DispSync::beginResync() { 370 Mutex::Autolock lock(mMutex); 371 372 mNumResyncSamples = 0; 373} 374 375bool DispSync::addResyncSample(nsecs_t timestamp) { 376 Mutex::Autolock lock(mMutex); 377 378 size_t idx = (mFirstResyncSample + mNumResyncSamples) % MAX_RESYNC_SAMPLES; 379 mResyncSamples[idx] = timestamp; 380 381 if (mNumResyncSamples < MAX_RESYNC_SAMPLES) { 382 mNumResyncSamples++; 383 } else { 384 mFirstResyncSample = (mFirstResyncSample + 1) % MAX_RESYNC_SAMPLES; 385 } 386 387 updateModelLocked(); 388 389 if (mNumResyncSamplesSincePresent++ > MAX_RESYNC_SAMPLES_WITHOUT_PRESENT) { 390 resetErrorLocked(); 391 } 392 393 if (runningWithoutSyncFramework) { 394 // If we don't have the sync framework we will never have 395 // addPresentFence called. This means we have no way to know whether 396 // or not we're synchronized with the HW vsyncs, so we just request 397 // that the HW vsync events be turned on whenever we need to generate 398 // SW vsync events. 399 return mThread->hasAnyEventListeners(); 400 } 401 402 return mPeriod == 0 || mError > errorThreshold; 403} 404 405void DispSync::endResync() { 406} 407 408status_t DispSync::addEventListener(nsecs_t phase, 409 const sp<Callback>& callback) { 410 411 Mutex::Autolock lock(mMutex); 412 return mThread->addEventListener(phase, callback); 413} 414 415void DispSync::setLowPowerMode(bool enabled) { 416 mThread->mLowPowerMode = enabled; 417} 418 419status_t DispSync::removeEventListener(const sp<Callback>& callback) { 420 Mutex::Autolock lock(mMutex); 421 return mThread->removeEventListener(callback); 422} 423 424void DispSync::setPeriod(nsecs_t period) { 425 Mutex::Autolock lock(mMutex); 426 mPeriod = period; 427 mPhase = 0; 428 mThread->updateModel(mPeriod, mPhase); 429} 430 431void DispSync::updateModelLocked() { 432 if (mNumResyncSamples >= MIN_RESYNC_SAMPLES_FOR_UPDATE) { 433 nsecs_t durationSum = 0; 434 for (size_t i = 1; i < mNumResyncSamples; i++) { 435 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 436 size_t prev = (idx + MAX_RESYNC_SAMPLES - 1) % MAX_RESYNC_SAMPLES; 437 durationSum += mResyncSamples[idx] - mResyncSamples[prev]; 438 } 439 440 mPeriod = durationSum / (mNumResyncSamples - 1); 441 442 double sampleAvgX = 0; 443 double sampleAvgY = 0; 444 double scale = 2.0 * M_PI / double(mPeriod); 445 for (size_t i = 0; i < mNumResyncSamples; i++) { 446 size_t idx = (mFirstResyncSample + i) % MAX_RESYNC_SAMPLES; 447 nsecs_t sample = mResyncSamples[idx]; 448 double samplePhase = double(sample % mPeriod) * scale; 449 sampleAvgX += cos(samplePhase); 450 sampleAvgY += sin(samplePhase); 451 } 452 453 sampleAvgX /= double(mNumResyncSamples); 454 sampleAvgY /= double(mNumResyncSamples); 455 456 mPhase = nsecs_t(atan2(sampleAvgY, sampleAvgX) / scale); 457 458 if (mPhase < 0) { 459 mPhase += mPeriod; 460 } 461 462 if (traceDetailedInfo) { 463 ATRACE_INT64("DispSync:Period", mPeriod); 464 ATRACE_INT64("DispSync:Phase", mPhase); 465 } 466 467 mThread->updateModel(mPeriod, mPhase); 468 } 469} 470 471void DispSync::updateErrorLocked() { 472 if (mPeriod == 0) { 473 return; 474 } 475 476 int numErrSamples = 0; 477 nsecs_t sqErrSum = 0; 478 479 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 480 nsecs_t sample = mPresentTimes[i]; 481 if (sample > mPhase) { 482 nsecs_t sampleErr = (sample - mPhase) % mPeriod; 483 if (sampleErr > mPeriod / 2) { 484 sampleErr -= mPeriod; 485 } 486 sqErrSum += sampleErr * sampleErr; 487 numErrSamples++; 488 } 489 } 490 491 if (numErrSamples > 0) { 492 mError = sqErrSum / numErrSamples; 493 } else { 494 mError = 0; 495 } 496 497 if (traceDetailedInfo) { 498 ATRACE_INT64("DispSync:Error", mError); 499 } 500} 501 502void DispSync::resetErrorLocked() { 503 mPresentSampleOffset = 0; 504 mError = 0; 505 for (size_t i = 0; i < NUM_PRESENT_SAMPLES; i++) { 506 mPresentFences[i].clear(); 507 mPresentTimes[i] = 0; 508 } 509} 510 511} // namespace android 512