RenderThread.cpp revision c4fbada76aa840105553b2c2bce2204e673d2983
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "RenderThread.h" 18 19#include "../renderstate/RenderState.h" 20#include "../pipeline/skia/SkiaOpenGLReadback.h" 21#include "CanvasContext.h" 22#include "EglManager.h" 23#include "OpenGLReadback.h" 24#include "RenderProxy.h" 25#include "VulkanManager.h" 26 27#include <gui/DisplayEventReceiver.h> 28#include <gui/ISurfaceComposer.h> 29#include <gui/SurfaceComposerClient.h> 30#include <sys/resource.h> 31#include <utils/Condition.h> 32#include <utils/Log.h> 33#include <utils/Mutex.h> 34 35namespace android { 36namespace uirenderer { 37namespace renderthread { 38 39// Number of events to read at a time from the DisplayEventReceiver pipe. 40// The value should be large enough that we can quickly drain the pipe 41// using just a few large reads. 42static const size_t EVENT_BUFFER_SIZE = 100; 43 44// Slight delay to give the UI time to push us a new frame before we replay 45static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4); 46 47TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {} 48 49RenderTask* TaskQueue::next() { 50 RenderTask* ret = mHead; 51 if (ret) { 52 mHead = ret->mNext; 53 if (!mHead) { 54 mTail = nullptr; 55 } 56 ret->mNext = nullptr; 57 } 58 return ret; 59} 60 61RenderTask* TaskQueue::peek() { 62 return mHead; 63} 64 65void TaskQueue::queue(RenderTask* task) { 66 // Since the RenderTask itself forms the linked list it is not allowed 67 // to have the same task queued twice 68 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!"); 69 if (mTail) { 70 // Fast path if we can just append 71 if (mTail->mRunAt <= task->mRunAt) { 72 mTail->mNext = task; 73 mTail = task; 74 } else { 75 // Need to find the proper insertion point 76 RenderTask* previous = nullptr; 77 RenderTask* next = mHead; 78 while (next && next->mRunAt <= task->mRunAt) { 79 previous = next; 80 next = next->mNext; 81 } 82 if (!previous) { 83 task->mNext = mHead; 84 mHead = task; 85 } else { 86 previous->mNext = task; 87 if (next) { 88 task->mNext = next; 89 } else { 90 mTail = task; 91 } 92 } 93 } 94 } else { 95 mTail = mHead = task; 96 } 97} 98 99void TaskQueue::queueAtFront(RenderTask* task) { 100 if (mTail) { 101 task->mNext = mHead; 102 mHead = task; 103 } else { 104 mTail = mHead = task; 105 } 106} 107 108void TaskQueue::remove(RenderTask* task) { 109 // TaskQueue is strict here to enforce that users are keeping track of 110 // their RenderTasks due to how their memory is managed 111 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task, 112 "Cannot remove a task that isn't in the queue!"); 113 114 // If task is the head we can just call next() to pop it off 115 // Otherwise we need to scan through to find the task before it 116 if (peek() == task) { 117 next(); 118 } else { 119 RenderTask* previous = mHead; 120 while (previous->mNext != task) { 121 previous = previous->mNext; 122 } 123 previous->mNext = task->mNext; 124 if (mTail == task) { 125 mTail = previous; 126 } 127 } 128} 129 130class DispatchFrameCallbacks : public RenderTask { 131private: 132 RenderThread* mRenderThread; 133public: 134 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {} 135 136 virtual void run() override { 137 mRenderThread->dispatchFrameCallbacks(); 138 } 139}; 140 141static bool gHasRenderThreadInstance = false; 142 143bool RenderThread::hasInstance() { 144 return gHasRenderThreadInstance; 145} 146 147RenderThread& RenderThread::getInstance() { 148 // This is a pointer because otherwise __cxa_finalize 149 // will try to delete it like a Good Citizen but that causes us to crash 150 // because we don't want to delete the RenderThread normally. 151 static RenderThread* sInstance = new RenderThread(); 152 gHasRenderThreadInstance = true; 153 return *sInstance; 154} 155 156RenderThread::RenderThread() : Thread(true) 157 , mNextWakeup(LLONG_MAX) 158 , mDisplayEventReceiver(nullptr) 159 , mVsyncRequested(false) 160 , mFrameCallbackTaskPending(false) 161 , mFrameCallbackTask(nullptr) 162 , mRenderState(nullptr) 163 , mEglManager(nullptr) 164 , mVkManager(nullptr) { 165 Properties::load(); 166 mFrameCallbackTask = new DispatchFrameCallbacks(this); 167 mLooper = new Looper(false); 168 run("RenderThread"); 169} 170 171RenderThread::~RenderThread() { 172 LOG_ALWAYS_FATAL("Can't destroy the render thread"); 173} 174 175void RenderThread::initializeDisplayEventReceiver() { 176 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); 177 mDisplayEventReceiver = new DisplayEventReceiver(); 178 status_t status = mDisplayEventReceiver->initCheck(); 179 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " 180 "failed with status: %d", status); 181 182 // Register the FD 183 mLooper->addFd(mDisplayEventReceiver->getFd(), 0, 184 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); 185} 186 187void RenderThread::initThreadLocals() { 188 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay( 189 ISurfaceComposer::eDisplayIdMain)); 190 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo); 191 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n"); 192 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps); 193 mTimeLord.setFrameInterval(frameIntervalNanos); 194 initializeDisplayEventReceiver(); 195 mEglManager = new EglManager(*this); 196 mRenderState = new RenderState(*this); 197 mJankTracker = new JankTracker(mDisplayInfo); 198 mVkManager = new VulkanManager(*this); 199} 200 201Readback& RenderThread::readback() { 202 203 if (!mReadback) { 204 auto renderType = Properties::getRenderPipelineType(); 205 switch (renderType) { 206 case RenderPipelineType::OpenGL: 207 mReadback = new OpenGLReadbackImpl(*this); 208 break; 209 case RenderPipelineType::SkiaGL: 210 case RenderPipelineType::SkiaVulkan: 211 // It works to use the OpenGL pipeline for Vulkan but this is not 212 // ideal as it causes us to create an OpenGL context in addition 213 // to the Vulkan one. 214 mReadback = new skiapipeline::SkiaOpenGLReadback(*this); 215 break; 216 default: 217 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType); 218 break; 219 } 220 } 221 222 return *mReadback; 223} 224 225int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) { 226 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) { 227 ALOGE("Display event receiver pipe was closed or an error occurred. " 228 "events=0x%x", events); 229 return 0; // remove the callback 230 } 231 232 if (!(events & Looper::EVENT_INPUT)) { 233 ALOGW("Received spurious callback for unhandled poll event. " 234 "events=0x%x", events); 235 return 1; // keep the callback 236 } 237 238 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue(); 239 240 return 1; // keep the callback 241} 242 243static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) { 244 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE]; 245 nsecs_t latest = 0; 246 ssize_t n; 247 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) { 248 for (ssize_t i = 0; i < n; i++) { 249 const DisplayEventReceiver::Event& ev = buf[i]; 250 switch (ev.header.type) { 251 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: 252 latest = ev.header.timestamp; 253 break; 254 } 255 } 256 } 257 if (n < 0) { 258 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n)); 259 } 260 return latest; 261} 262 263void RenderThread::drainDisplayEventQueue() { 264 ATRACE_CALL(); 265 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); 266 if (vsyncEvent > 0) { 267 mVsyncRequested = false; 268 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { 269 ATRACE_NAME("queue mFrameCallbackTask"); 270 mFrameCallbackTaskPending = true; 271 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); 272 queueAt(mFrameCallbackTask, runAt); 273 } 274 } 275} 276 277void RenderThread::dispatchFrameCallbacks() { 278 ATRACE_CALL(); 279 mFrameCallbackTaskPending = false; 280 281 std::set<IFrameCallback*> callbacks; 282 mFrameCallbacks.swap(callbacks); 283 284 if (callbacks.size()) { 285 // Assume one of them will probably animate again so preemptively 286 // request the next vsync in case it occurs mid-frame 287 requestVsync(); 288 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) { 289 (*it)->doFrame(); 290 } 291 } 292} 293 294void RenderThread::requestVsync() { 295 if (!mVsyncRequested) { 296 mVsyncRequested = true; 297 status_t status = mDisplayEventReceiver->requestNextVsync(); 298 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, 299 "requestNextVsync failed with status: %d", status); 300 } 301} 302 303bool RenderThread::threadLoop() { 304 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY); 305 initThreadLocals(); 306 307 int timeoutMillis = -1; 308 for (;;) { 309 int result = mLooper->pollOnce(timeoutMillis); 310 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR, 311 "RenderThread Looper POLL_ERROR!"); 312 313 nsecs_t nextWakeup; 314 // Process our queue, if we have anything 315 while (RenderTask* task = nextTask(&nextWakeup)) { 316 task->run(); 317 // task may have deleted itself, do not reference it again 318 } 319 if (nextWakeup == LLONG_MAX) { 320 timeoutMillis = -1; 321 } else { 322 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC); 323 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos); 324 if (timeoutMillis < 0) { 325 timeoutMillis = 0; 326 } 327 } 328 329 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) { 330 drainDisplayEventQueue(); 331 mFrameCallbacks.insert( 332 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end()); 333 mPendingRegistrationFrameCallbacks.clear(); 334 requestVsync(); 335 } 336 337 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) { 338 // TODO: Clean this up. This is working around an issue where a combination 339 // of bad timing and slow drawing can result in dropping a stale vsync 340 // on the floor (correct!) but fails to schedule to listen for the 341 // next vsync (oops), so none of the callbacks are run. 342 requestVsync(); 343 } 344 } 345 346 return false; 347} 348 349void RenderThread::queue(RenderTask* task) { 350 AutoMutex _lock(mLock); 351 mQueue.queue(task); 352 if (mNextWakeup && task->mRunAt < mNextWakeup) { 353 mNextWakeup = 0; 354 mLooper->wake(); 355 } 356} 357 358void RenderThread::queueAndWait(RenderTask* task) { 359 // These need to be local to the thread to avoid the Condition 360 // signaling the wrong thread. The easiest way to achieve that is to just 361 // make this on the stack, although that has a slight cost to it 362 Mutex mutex; 363 Condition condition; 364 SignalingRenderTask syncTask(task, &mutex, &condition); 365 366 AutoMutex _lock(mutex); 367 queue(&syncTask); 368 condition.wait(mutex); 369} 370 371void RenderThread::queueAtFront(RenderTask* task) { 372 AutoMutex _lock(mLock); 373 mQueue.queueAtFront(task); 374 mLooper->wake(); 375} 376 377void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) { 378 task->mRunAt = runAtNs; 379 queue(task); 380} 381 382void RenderThread::remove(RenderTask* task) { 383 AutoMutex _lock(mLock); 384 mQueue.remove(task); 385} 386 387void RenderThread::postFrameCallback(IFrameCallback* callback) { 388 mPendingRegistrationFrameCallbacks.insert(callback); 389} 390 391bool RenderThread::removeFrameCallback(IFrameCallback* callback) { 392 size_t erased; 393 erased = mFrameCallbacks.erase(callback); 394 erased |= mPendingRegistrationFrameCallbacks.erase(callback); 395 return erased; 396} 397 398void RenderThread::pushBackFrameCallback(IFrameCallback* callback) { 399 if (mFrameCallbacks.erase(callback)) { 400 mPendingRegistrationFrameCallbacks.insert(callback); 401 } 402} 403 404RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) { 405 AutoMutex _lock(mLock); 406 RenderTask* next = mQueue.peek(); 407 if (!next) { 408 mNextWakeup = LLONG_MAX; 409 } else { 410 mNextWakeup = next->mRunAt; 411 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls 412 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) { 413 next = mQueue.next(); 414 } else { 415 next = nullptr; 416 } 417 } 418 if (nextWakeup) { 419 *nextWakeup = mNextWakeup; 420 } 421 return next; 422} 423 424} /* namespace renderthread */ 425} /* namespace uirenderer */ 426} /* namespace android */ 427