1/*
2 * Copyright (C) 2013 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 *      http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "RenderThread.h"
18
19#include "hwui/Bitmap.h"
20#include "renderstate/RenderState.h"
21#include "renderthread/OpenGLPipeline.h"
22#include "pipeline/skia/SkiaOpenGLReadback.h"
23#include "pipeline/skia/SkiaOpenGLPipeline.h"
24#include "pipeline/skia/SkiaVulkanPipeline.h"
25#include "CanvasContext.h"
26#include "EglManager.h"
27#include "OpenGLReadback.h"
28#include "RenderProxy.h"
29#include "VulkanManager.h"
30#include "utils/FatVector.h"
31
32#include <gui/DisplayEventReceiver.h>
33#include <gui/ISurfaceComposer.h>
34#include <gui/SurfaceComposerClient.h>
35#include <sys/resource.h>
36#include <utils/Condition.h>
37#include <utils/Log.h>
38#include <utils/Mutex.h>
39
40namespace android {
41namespace uirenderer {
42namespace renderthread {
43
44// Number of events to read at a time from the DisplayEventReceiver pipe.
45// The value should be large enough that we can quickly drain the pipe
46// using just a few large reads.
47static const size_t EVENT_BUFFER_SIZE = 100;
48
49// Slight delay to give the UI time to push us a new frame before we replay
50static const nsecs_t DISPATCH_FRAME_CALLBACKS_DELAY = milliseconds_to_nanoseconds(4);
51
52TaskQueue::TaskQueue() : mHead(nullptr), mTail(nullptr) {}
53
54RenderTask* TaskQueue::next() {
55    RenderTask* ret = mHead;
56    if (ret) {
57        mHead = ret->mNext;
58        if (!mHead) {
59            mTail = nullptr;
60        }
61        ret->mNext = nullptr;
62    }
63    return ret;
64}
65
66RenderTask* TaskQueue::peek() {
67    return mHead;
68}
69
70void TaskQueue::queue(RenderTask* task) {
71    // Since the RenderTask itself forms the linked list it is not allowed
72    // to have the same task queued twice
73    LOG_ALWAYS_FATAL_IF(task->mNext || mTail == task, "Task is already in the queue!");
74    if (mTail) {
75        // Fast path if we can just append
76        if (mTail->mRunAt <= task->mRunAt) {
77            mTail->mNext = task;
78            mTail = task;
79        } else {
80            // Need to find the proper insertion point
81            RenderTask* previous = nullptr;
82            RenderTask* next = mHead;
83            while (next && next->mRunAt <= task->mRunAt) {
84                previous = next;
85                next = next->mNext;
86            }
87            if (!previous) {
88                task->mNext = mHead;
89                mHead = task;
90            } else {
91                previous->mNext = task;
92                if (next) {
93                    task->mNext = next;
94                } else {
95                    mTail = task;
96                }
97            }
98        }
99    } else {
100        mTail = mHead = task;
101    }
102}
103
104void TaskQueue::queueAtFront(RenderTask* task) {
105    LOG_ALWAYS_FATAL_IF(task->mNext || mHead == task, "Task is already in the queue!");
106    if (mTail) {
107        task->mNext = mHead;
108        mHead = task;
109    } else {
110        mTail = mHead = task;
111    }
112}
113
114void TaskQueue::remove(RenderTask* task) {
115    // TaskQueue is strict here to enforce that users are keeping track of
116    // their RenderTasks due to how their memory is managed
117    LOG_ALWAYS_FATAL_IF(!task->mNext && mTail != task,
118            "Cannot remove a task that isn't in the queue!");
119
120    // If task is the head we can just call next() to pop it off
121    // Otherwise we need to scan through to find the task before it
122    if (peek() == task) {
123        next();
124    } else {
125        RenderTask* previous = mHead;
126        while (previous->mNext != task) {
127            previous = previous->mNext;
128        }
129        previous->mNext = task->mNext;
130        if (mTail == task) {
131            mTail = previous;
132        }
133    }
134}
135
136class DispatchFrameCallbacks : public RenderTask {
137private:
138    RenderThread* mRenderThread;
139public:
140    explicit DispatchFrameCallbacks(RenderThread* rt) : mRenderThread(rt) {}
141
142    virtual void run() override {
143        mRenderThread->dispatchFrameCallbacks();
144    }
145};
146
147static bool gHasRenderThreadInstance = false;
148
149bool RenderThread::hasInstance() {
150    return gHasRenderThreadInstance;
151}
152
153RenderThread& RenderThread::getInstance() {
154    // This is a pointer because otherwise __cxa_finalize
155    // will try to delete it like a Good Citizen but that causes us to crash
156    // because we don't want to delete the RenderThread normally.
157    static RenderThread* sInstance = new RenderThread();
158    gHasRenderThreadInstance = true;
159    return *sInstance;
160}
161
162RenderThread::RenderThread() : Thread(true)
163        , mNextWakeup(LLONG_MAX)
164        , mDisplayEventReceiver(nullptr)
165        , mVsyncRequested(false)
166        , mFrameCallbackTaskPending(false)
167        , mFrameCallbackTask(nullptr)
168        , mRenderState(nullptr)
169        , mEglManager(nullptr)
170        , mVkManager(nullptr) {
171    Properties::load();
172    mFrameCallbackTask = new DispatchFrameCallbacks(this);
173    mLooper = new Looper(false);
174    run("RenderThread");
175}
176
177RenderThread::~RenderThread() {
178    LOG_ALWAYS_FATAL("Can't destroy the render thread");
179}
180
181void RenderThread::initializeDisplayEventReceiver() {
182    LOG_ALWAYS_FATAL_IF(mDisplayEventReceiver, "Initializing a second DisplayEventReceiver?");
183    mDisplayEventReceiver = new DisplayEventReceiver();
184    status_t status = mDisplayEventReceiver->initCheck();
185    LOG_ALWAYS_FATAL_IF(status != NO_ERROR, "Initialization of DisplayEventReceiver "
186            "failed with status: %d", status);
187
188    // Register the FD
189    mLooper->addFd(mDisplayEventReceiver->getFd(), 0,
190            Looper::EVENT_INPUT, RenderThread::displayEventReceiverCallback, this);
191}
192
193void RenderThread::initThreadLocals() {
194    sp<IBinder> dtoken(SurfaceComposerClient::getBuiltInDisplay(
195            ISurfaceComposer::eDisplayIdMain));
196    status_t status = SurfaceComposerClient::getDisplayInfo(dtoken, &mDisplayInfo);
197    LOG_ALWAYS_FATAL_IF(status, "Failed to get display info\n");
198    nsecs_t frameIntervalNanos = static_cast<nsecs_t>(1000000000 / mDisplayInfo.fps);
199    mTimeLord.setFrameInterval(frameIntervalNanos);
200    initializeDisplayEventReceiver();
201    mEglManager = new EglManager(*this);
202    mRenderState = new RenderState(*this);
203    mVkManager = new VulkanManager(*this);
204    mCacheManager = new CacheManager(mDisplayInfo);
205}
206
207void RenderThread::dumpGraphicsMemory(int fd) {
208    globalProfileData()->dump(fd);
209
210    String8 cachesOutput;
211    String8 pipeline;
212    auto renderType = Properties::getRenderPipelineType();
213    switch (renderType) {
214        case RenderPipelineType::OpenGL: {
215            if (Caches::hasInstance()) {
216                cachesOutput.appendFormat("Caches:\n");
217                Caches::getInstance().dumpMemoryUsage(cachesOutput);
218            } else {
219                cachesOutput.appendFormat("No caches instance.");
220            }
221            pipeline.appendFormat("FrameBuilder");
222            break;
223        }
224        case RenderPipelineType::SkiaGL: {
225            mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
226            pipeline.appendFormat("Skia (OpenGL)");
227            break;
228        }
229        case RenderPipelineType::SkiaVulkan: {
230            mCacheManager->dumpMemoryUsage(cachesOutput, mRenderState);
231            pipeline.appendFormat("Skia (Vulkan)");
232            break;
233        }
234        default:
235            LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
236            break;
237    }
238
239    FILE *file = fdopen(fd, "a");
240    fprintf(file, "\n%s\n", cachesOutput.string());
241    fprintf(file, "\nPipeline=%s\n", pipeline.string());
242    fflush(file);
243}
244
245Readback& RenderThread::readback() {
246
247    if (!mReadback) {
248        auto renderType = Properties::getRenderPipelineType();
249        switch (renderType) {
250            case RenderPipelineType::OpenGL:
251                mReadback = new OpenGLReadbackImpl(*this);
252                break;
253            case RenderPipelineType::SkiaGL:
254            case RenderPipelineType::SkiaVulkan:
255                // It works to use the OpenGL pipeline for Vulkan but this is not
256                // ideal as it causes us to create an OpenGL context in addition
257                // to the Vulkan one.
258                mReadback = new skiapipeline::SkiaOpenGLReadback(*this);
259                break;
260            default:
261                LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
262                break;
263        }
264    }
265
266    return *mReadback;
267}
268
269void RenderThread::setGrContext(GrContext* context) {
270    mCacheManager->reset(context);
271    if (mGrContext.get()) {
272        mGrContext->releaseResourcesAndAbandonContext();
273    }
274    mGrContext.reset(context);
275}
276
277int RenderThread::displayEventReceiverCallback(int fd, int events, void* data) {
278    if (events & (Looper::EVENT_ERROR | Looper::EVENT_HANGUP)) {
279        ALOGE("Display event receiver pipe was closed or an error occurred.  "
280                "events=0x%x", events);
281        return 0; // remove the callback
282    }
283
284    if (!(events & Looper::EVENT_INPUT)) {
285        ALOGW("Received spurious callback for unhandled poll event.  "
286                "events=0x%x", events);
287        return 1; // keep the callback
288    }
289
290    reinterpret_cast<RenderThread*>(data)->drainDisplayEventQueue();
291
292    return 1; // keep the callback
293}
294
295static nsecs_t latestVsyncEvent(DisplayEventReceiver* receiver) {
296    DisplayEventReceiver::Event buf[EVENT_BUFFER_SIZE];
297    nsecs_t latest = 0;
298    ssize_t n;
299    while ((n = receiver->getEvents(buf, EVENT_BUFFER_SIZE)) > 0) {
300        for (ssize_t i = 0; i < n; i++) {
301            const DisplayEventReceiver::Event& ev = buf[i];
302            switch (ev.header.type) {
303            case DisplayEventReceiver::DISPLAY_EVENT_VSYNC:
304                latest = ev.header.timestamp;
305                break;
306            }
307        }
308    }
309    if (n < 0) {
310        ALOGW("Failed to get events from display event receiver, status=%d", status_t(n));
311    }
312    return latest;
313}
314
315void RenderThread::drainDisplayEventQueue() {
316    ATRACE_CALL();
317    nsecs_t vsyncEvent = latestVsyncEvent(mDisplayEventReceiver);
318    if (vsyncEvent > 0) {
319        mVsyncRequested = false;
320        if (mTimeLord.vsyncReceived(vsyncEvent) && !mFrameCallbackTaskPending) {
321            ATRACE_NAME("queue mFrameCallbackTask");
322            mFrameCallbackTaskPending = true;
323            nsecs_t runAt = (vsyncEvent + DISPATCH_FRAME_CALLBACKS_DELAY);
324            queueAt(mFrameCallbackTask, runAt);
325        }
326    }
327}
328
329void RenderThread::dispatchFrameCallbacks() {
330    ATRACE_CALL();
331    mFrameCallbackTaskPending = false;
332
333    std::set<IFrameCallback*> callbacks;
334    mFrameCallbacks.swap(callbacks);
335
336    if (callbacks.size()) {
337        // Assume one of them will probably animate again so preemptively
338        // request the next vsync in case it occurs mid-frame
339        requestVsync();
340        for (std::set<IFrameCallback*>::iterator it = callbacks.begin(); it != callbacks.end(); it++) {
341            (*it)->doFrame();
342        }
343    }
344}
345
346void RenderThread::requestVsync() {
347    if (!mVsyncRequested) {
348        mVsyncRequested = true;
349        status_t status = mDisplayEventReceiver->requestNextVsync();
350        LOG_ALWAYS_FATAL_IF(status != NO_ERROR,
351                "requestNextVsync failed with status: %d", status);
352    }
353}
354
355bool RenderThread::threadLoop() {
356    setpriority(PRIO_PROCESS, 0, PRIORITY_DISPLAY);
357    initThreadLocals();
358
359    int timeoutMillis = -1;
360    for (;;) {
361        int result = mLooper->pollOnce(timeoutMillis);
362        LOG_ALWAYS_FATAL_IF(result == Looper::POLL_ERROR,
363                "RenderThread Looper POLL_ERROR!");
364
365        nsecs_t nextWakeup;
366        {
367            FatVector<RenderTask*, 10> workQueue;
368            // Process our queue, if we have anything. By first acquiring
369            // all the pending events then processing them we avoid vsync
370            // starvation if more tasks are queued while we are processing tasks.
371            while (RenderTask* task = nextTask(&nextWakeup)) {
372                workQueue.push_back(task);
373            }
374            for (auto task : workQueue) {
375                task->run();
376                // task may have deleted itself, do not reference it again
377            }
378        }
379        if (nextWakeup == LLONG_MAX) {
380            timeoutMillis = -1;
381        } else {
382            nsecs_t timeoutNanos = nextWakeup - systemTime(SYSTEM_TIME_MONOTONIC);
383            timeoutMillis = nanoseconds_to_milliseconds(timeoutNanos);
384            if (timeoutMillis < 0) {
385                timeoutMillis = 0;
386            }
387        }
388
389        if (mPendingRegistrationFrameCallbacks.size() && !mFrameCallbackTaskPending) {
390            drainDisplayEventQueue();
391            mFrameCallbacks.insert(
392                    mPendingRegistrationFrameCallbacks.begin(), mPendingRegistrationFrameCallbacks.end());
393            mPendingRegistrationFrameCallbacks.clear();
394            requestVsync();
395        }
396
397        if (!mFrameCallbackTaskPending && !mVsyncRequested && mFrameCallbacks.size()) {
398            // TODO: Clean this up. This is working around an issue where a combination
399            // of bad timing and slow drawing can result in dropping a stale vsync
400            // on the floor (correct!) but fails to schedule to listen for the
401            // next vsync (oops), so none of the callbacks are run.
402            requestVsync();
403        }
404    }
405
406    return false;
407}
408
409void RenderThread::queue(RenderTask* task) {
410    AutoMutex _lock(mLock);
411    mQueue.queue(task);
412    if (mNextWakeup && task->mRunAt < mNextWakeup) {
413        mNextWakeup = 0;
414        mLooper->wake();
415    }
416}
417
418void RenderThread::queueAndWait(RenderTask* task) {
419    // These need to be local to the thread to avoid the Condition
420    // signaling the wrong thread. The easiest way to achieve that is to just
421    // make this on the stack, although that has a slight cost to it
422    Mutex mutex;
423    Condition condition;
424    SignalingRenderTask syncTask(task, &mutex, &condition);
425
426    AutoMutex _lock(mutex);
427    queue(&syncTask);
428    while (!syncTask.hasRun()) {
429        condition.wait(mutex);
430    }
431}
432
433void RenderThread::queueAtFront(RenderTask* task) {
434    AutoMutex _lock(mLock);
435    mQueue.queueAtFront(task);
436    mLooper->wake();
437}
438
439void RenderThread::queueAt(RenderTask* task, nsecs_t runAtNs) {
440    task->mRunAt = runAtNs;
441    queue(task);
442}
443
444void RenderThread::remove(RenderTask* task) {
445    AutoMutex _lock(mLock);
446    mQueue.remove(task);
447}
448
449void RenderThread::postFrameCallback(IFrameCallback* callback) {
450    mPendingRegistrationFrameCallbacks.insert(callback);
451}
452
453bool RenderThread::removeFrameCallback(IFrameCallback* callback) {
454    size_t erased;
455    erased = mFrameCallbacks.erase(callback);
456    erased |= mPendingRegistrationFrameCallbacks.erase(callback);
457    return erased;
458}
459
460void RenderThread::pushBackFrameCallback(IFrameCallback* callback) {
461    if (mFrameCallbacks.erase(callback)) {
462        mPendingRegistrationFrameCallbacks.insert(callback);
463    }
464}
465
466RenderTask* RenderThread::nextTask(nsecs_t* nextWakeup) {
467    AutoMutex _lock(mLock);
468    RenderTask* next = mQueue.peek();
469    if (!next) {
470        mNextWakeup = LLONG_MAX;
471    } else {
472        mNextWakeup = next->mRunAt;
473        // Most tasks won't be delayed, so avoid unnecessary systemTime() calls
474        if (next->mRunAt <= 0 || next->mRunAt <= systemTime(SYSTEM_TIME_MONOTONIC)) {
475            next = mQueue.next();
476        } else {
477            next = nullptr;
478        }
479    }
480    if (nextWakeup) {
481        *nextWakeup = mNextWakeup;
482    }
483    return next;
484}
485
486sk_sp<Bitmap> RenderThread::allocateHardwareBitmap(SkBitmap& skBitmap) {
487    auto renderType = Properties::getRenderPipelineType();
488    switch (renderType) {
489        case RenderPipelineType::OpenGL:
490            return OpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
491        case RenderPipelineType::SkiaGL:
492            return skiapipeline::SkiaOpenGLPipeline::allocateHardwareBitmap(*this, skBitmap);
493        case RenderPipelineType::SkiaVulkan:
494            return skiapipeline::SkiaVulkanPipeline::allocateHardwareBitmap(*this, skBitmap);
495        default:
496            LOG_ALWAYS_FATAL("canvas context type %d not supported", (int32_t) renderType);
497            break;
498    }
499    return nullptr;
500}
501
502} /* namespace renderthread */
503} /* namespace uirenderer */
504} /* namespace android */
505