RenderThread.cpp revision 625dd56a45bfe95c5f1baa1891529503ff3374a9
1/* 2 * Copyright (C) 2013 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17#include "RenderThread.h" 18 19#include "../renderstate/RenderState.h" 20#include "../pipeline/skia/SkiaOpenGLPipeline.h" 21#include "../pipeline/skia/SkiaOpenGLReadback.h" 22#include "CanvasContext.h" 23#include "EglManager.h" 24#include "OpenGLReadback.h" 25#include "RenderProxy.h" 26#include "VulkanManager.h" 27#include "utils/FatVector.h" 28 29#include <gui/DisplayEventReceiver.h> 30#include <gui/ISurfaceComposer.h> 31#include <gui/SurfaceComposerClient.h> 32#include <sys/resource.h> 33#include <utils/Condition.h> 34#include <utils/Log.h> 35#include <utils/Mutex.h> 36 37namespace android { 38namespace uirenderer { 39namespace renderthread { 40 41// Number of events to read at a time from the DisplayEventReceiver pipe. 42// The value should be large enough that we can quickly drain the pipe 43// using just a few large reads. 44static const size_t EVENT_BUFFER_SIZE = 100; 45 46// Slight delay to give the UI time to push us a new frame before we replay 47static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4); 48 49TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {} 50 51RenderTask* TaskQueue::next() { 52 RenderTask* ret = mHead; 53 if (ret) { 54 mHead = ret->mNext; 55 if (!mHead) { 56 mTail = nullptr; 57 } 58 ret->mNext = nullptr; 59 } 60 return ret; 61} 62 63RenderTask* TaskQueue::peek() { 64 return mHead; 65} 66 67void TaskQueue::queue(RenderTask* task) { 68 // Since the RenderTask itself forms the linked list it is not allowed 69 // to have the same task queued twice 70 LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!"); 71 if (mTail) { 72 // Fast path if we can just append 73 if (mTail->mRunAt <= task->mRunAt) { 74 mTail->mNext = task; 75 mTail = task; 76 } else { 77 // Need to find the proper insertion point 78 RenderTask* previous = nullptr; 79 RenderTask* next = mHead; 80 while (next && next->mRunAt <= task->mRunAt) { 81 previous = next; 82 next = next->mNext; 83 } 84 if (!previous) { 85 task->mNext = mHead; 86 mHead = task; 87 } else { 88 previous->mNext = task; 89 if (next) { 90 task->mNext = next; 91 } else { 92 mTail = task; 93 } 94 } 95 } 96 } else { 97 mTail = mHead = task; 98 } 99} 100 101void TaskQueue::queueAtFront(RenderTask* task) { 102 LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!"); 103 if (mTail) { 104 task->mNext = mHead; 105 mHead = task; 106 } else { 107 mTail = mHead = task; 108 } 109} 110 111void TaskQueue::remove(RenderTask* task) { 112 // TaskQueue is strict here to enforce that users are keeping track of 113 // their RenderTasks due to how their memory is managed 114 LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task, 115 "Cannot remove a task that isn't in the queue!"); 116 117 // If task is the head we can just call next() to pop it off 118 // Otherwise we need to scan through to find the task before it 119 if (peek() == task) { 120 next(); 121 } else { 122 RenderTask* previous = mHead; 123 while (previous->mNext != task) { 124 previous = previous->mNext; 125 } 126 previous->mNext = task->mNext; 127 if (mTail == task) { 128 mTail = previous; 129 } 130 } 131} 132 133class DispatchFrameCallbacks : public RenderTask { 134private: 135 RenderThread* mRenderThread; 136public: 137 explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {} 138 139 virtual void run() override { 140 mRenderThread->dispatchFrameCallbacks(); 141 } 142}; 143 144static bool gHasRenderThreadInstance = false; 145 146bool RenderThread::hasInstance() { 147 return gHasRenderThreadInstance; 148} 149 150RenderThread& RenderThread::getInstance() { 151 // This is a pointer because otherwise __cxa_finalize 152 // will try to delete it like a Good Citizen but that causes us to crash 153 // because we don't want to delete the RenderThread normally. 154 static RenderThread* sInstance = new RenderThread(); 155 gHasRenderThreadInstance = true; 156 return *sInstance; 157} 158 159RenderThread::RenderThread() : Thread(true) 160 , mNextWakeup(LLONG_MAX) 161 , mDisplayEventReceiver(nullptr) 162 , mVsyncRequested(false) 163 , mFrameCallbackTaskPending(false) 164 , mFrameCallbackTask(nullptr) 165 , mRenderState(nullptr) 166 , mEglManager(nullptr) 167 , mVkManager(nullptr) { 168 Properties::load(); 169 mFrameCallbackTask = new DispatchFrameCallbacks(this); 170 mLooper = new Looper(false); 171 run("RenderThread"); 172} 173 174RenderThread::~RenderThread() { 175 LOG_ALWAYS_FATAL("Can't destroy the render thread"); 176} 177 178void RenderThread::initializeDisplayEventReceiver() { 179 LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?"); 180 mDisplayEventReceiver = new DisplayEventReceiver(); 181 status_t status = mDisplayEventReceiver->initCheck(); 182 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver " 183 "failed with status: %d", status); 184 185 // Register the FD 186 mLooper->addFd(mDisplayEventReceiver->getFd(), 0, 187 Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this); 188} 189 190void RenderThread::initThreadLocals() { 191 sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay( 192 ISurfaceComposer::eDisplayIdMain)); 193 status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo); 194 LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n"); 195 nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps); 196 mTimeLord.setFrameInterval(frameIntervalNanos); 197 initializeDisplayEventReceiver(); 198 mEglManager = new EglManager(*this); 199 mRenderState = new RenderState(*this); 200 mJankTracker = new JankTracker(mDisplayInfo); 201 mVkManager = new VulkanManager(*this); 202} 203 204Readback& RenderThread::readback() { 205 206 if (!mReadback) { 207 auto renderType = Properties::getRenderPipelineType(); 208 switch (renderType) { 209 case RenderPipelineType::OpenGL: 210 mReadback = new OpenGLReadbackImpl(*this); 211 break; 212 case RenderPipelineType::SkiaGL: 213 case RenderPipelineType::SkiaVulkan: 214 // It works to use the OpenGL pipeline for Vulkan but this is not 215 // ideal as it causes us to create an OpenGL context in addition 216 // to the Vulkan one. 217 mReadback = new skiapipeline::SkiaOpenGLReadback(*this); 218 break; 219 default: 220 LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType); 221 break; 222 } 223 } 224 225 return *mReadback; 226} 227 228int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) { 229 if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) { 230 ALOGE("Display event receiver pipe was closed or an error occurred. " 231 "events=0x%x", events); 232 return 0; // remove the callback 233 } 234 235 if (!(events & Looper::EVENT_INPUT)) { 236 ALOGW("Received spurious callback for unhandled poll event. " 237 "events=0x%x", events); 238 return 1; // keep the callback 239 } 240 241 reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue(); 242 243 return 1; // keep the callback 244} 245 246static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) { 247 DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE]; 248 nsecs_t latest = 0; 249 ssize_t n; 250 while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) { 251 for (ssize_t i = 0; i < n; i++) { 252 const DisplayEventReceiver::Event& ev = buf[i]; 253 switch (ev.header.type) { 254 case DisplayEventReceiver::DISPLAY_EVENT_VSYNC: 255 latest = ev.header.timestamp; 256 break; 257 } 258 } 259 } 260 if (n < 0) { 261 ALOGW("Failed to get events from display event receiver, status=%d", status_t(n)); 262 } 263 return latest; 264} 265 266void RenderThread::drainDisplayEventQueue() { 267 ATRACE_CALL(); 268 nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver); 269 if (vsyncEvent > 0) { 270 mVsyncRequested = false; 271 if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) { 272 ATRACE_NAME("queue mFrameCallbackTask"); 273 mFrameCallbackTaskPending = true; 274 nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY); 275 queueAt(mFrameCallbackTask, runAt); 276 } 277 } 278} 279 280void RenderThread::dispatchFrameCallbacks() { 281 ATRACE_CALL(); 282 mFrameCallbackTaskPending = false; 283 284 std::set<IFrameCallback*> callbacks; 285 mFrameCallbacks.swap(callbacks); 286 287 if (callbacks.size()) { 288 // Assume one of them will probably animate again so preemptively 289 // request the next vsync in case it occurs mid-frame 290 requestVsync(); 291 for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) { 292 (*it)->doFrame(); 293 } 294 } 295} 296 297void RenderThread::requestVsync() { 298 if (!mVsyncRequested) { 299 mVsyncRequested = true; 300 status_t status = mDisplayEventReceiver->requestNextVsync(); 301 LOG_ALWAYS_FATAL_IF(status != NO_ERROR, 302 "requestNextVsync failed with status: %d", status); 303 } 304} 305 306bool RenderThread::threadLoop() { 307 setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY); 308 initThreadLocals(); 309 310 int timeoutMillis = -1; 311 for (;;) { 312 int result = mLooper->pollOnce(timeoutMillis); 313 LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR, 314 "RenderThread Looper POLL_ERROR!"); 315 316 nsecs_t nextWakeup; 317 { 318 FatVector<RenderTask*, 10> workQueue; 319 // Process our queue, if we have anything. By first acquiring 320 // all the pending events then processing them we avoid vsync 321 // starvation if more tasks are queued while we are processing tasks. 322 while (RenderTask* task = nextTask(&nextWakeup)) { 323 workQueue.push_back(task); 324 } 325 for (auto task : workQueue) { 326 task->run(); 327 // task may have deleted itself, do not reference it again 328 } 329 } 330 if (nextWakeup == LLONG_MAX) { 331 timeoutMillis = -1; 332 } else { 333 nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC); 334 timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos); 335 if (timeoutMillis < 0) { 336 timeoutMillis = 0; 337 } 338 } 339 340 if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) { 341 drainDisplayEventQueue(); 342 mFrameCallbacks.insert( 343 mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end()); 344 mPendingRegistrationFrameCallbacks.clear(); 345 requestVsync(); 346 } 347 348 if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) { 349 // TODO: Clean this up. This is working around an issue where a combination 350 // of bad timing and slow drawing can result in dropping a stale vsync 351 // on the floor (correct!) but fails to schedule to listen for the 352 // next vsync (oops), so none of the callbacks are run. 353 requestVsync(); 354 } 355 } 356 357 return false; 358} 359 360void RenderThread::queue(RenderTask* task) { 361 AutoMutex _lock(mLock); 362 mQueue.queue(task); 363 if (mNextWakeup && task->mRunAt < mNextWakeup) { 364 mNextWakeup = 0; 365 mLooper->wake(); 366 } 367} 368 369void RenderThread::queueAndWait(RenderTask* task) { 370 // These need to be local to the thread to avoid the Condition 371 // signaling the wrong thread. The easiest way to achieve that is to just 372 // make this on the stack, although that has a slight cost to it 373 Mutex mutex; 374 Condition condition; 375 SignalingRenderTask syncTask(task, &mutex, &condition); 376 377 AutoMutex _lock(mutex); 378 queue(&syncTask); 379 while (!syncTask.hasRun()) { 380 condition.wait(mutex); 381 } 382} 383 384void RenderThread::queueAtFront(RenderTask* task) { 385 AutoMutex _lock(mLock); 386 mQueue.queueAtFront(task); 387 mLooper->wake(); 388} 389 390void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) { 391 task->mRunAt = runAtNs; 392 queue(task); 393} 394 395void RenderThread::remove(RenderTask* task) { 396 AutoMutex _lock(mLock); 397 mQueue.remove(task); 398} 399 400void RenderThread::postFrameCallback(IFrameCallback* callback) { 401 mPendingRegistrationFrameCallbacks.insert(callback); 402} 403 404bool RenderThread::removeFrameCallback(IFrameCallback* callback) { 405 size_t erased; 406 erased = mFrameCallbacks.erase(callback); 407 erased |= mPendingRegistrationFrameCallbacks.erase(callback); 408 return erased; 409} 410 411void RenderThread::pushBackFrameCallback(IFrameCallback* callback) { 412 if (mFrameCallbacks.erase(callback)) { 413 mPendingRegistrationFrameCallbacks.insert(callback); 414 } 415} 416 417RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) { 418 AutoMutex _lock(mLock); 419 RenderTask* next = mQueue.peek(); 420 if (!next) { 421 mNextWakeup = LLONG_MAX; 422 } else { 423 mNextWakeup = next->mRunAt; 424 // Most tasks won't be delayed, so avoid unnecessary systemTime() calls 425 if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) { 426 next = mQueue.next(); 427 } else { 428 next = nullptr; 429 } 430 } 431 if (nextWakeup) { 432 *nextWakeup = mNextWakeup; 433 } 434 return next; 435} 436 437sk_sp<SkImage> RenderThread::makeTextureImage(Bitmap* bitmap) { 438 auto renderType = Properties::getRenderPipelineType(); 439 sk_sp<SkImage> hardwareImage; 440 switch (renderType) { 441 case RenderPipelineType::SkiaGL: 442 hardwareImage = skiapipeline::SkiaOpenGLPipeline::makeTextureImage(*this, bitmap); 443 break; 444 case RenderPipelineType::SkiaVulkan: 445 //TODO: add Vulkan support 446 break; 447 default: 448 LOG_ALWAYS_FATAL("makeTextureImage: canvas context type %d not supported", 449 (int32_t) renderType); 450 break; 451 } 452 return hardwareImage; 453} 454 455} /* namespace renderthread */ 456} /* namespace uirenderer */ 457} /* namespace android */ 458