1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrVkGpu_DEFINED
9#define GrVkGpu_DEFINED
10
11#include "GrGpu.h"
12#include "GrGpuFactory.h"
13#include "vk/GrVkBackendContext.h"
14#include "GrVkCaps.h"
15#include "GrVkCopyManager.h"
16#include "GrVkIndexBuffer.h"
17#include "GrVkMemory.h"
18#include "GrVkResourceProvider.h"
19#include "GrVkSemaphore.h"
20#include "GrVkVertexBuffer.h"
21#include "GrVkUtil.h"
22#include "vk/GrVkDefines.h"
23
24class GrPipeline;
25
26class GrVkBufferImpl;
27class GrVkPipeline;
28class GrVkPipelineState;
29class GrVkPrimaryCommandBuffer;
30class GrVkRenderPass;
31class GrVkSecondaryCommandBuffer;
32class GrVkTexture;
33struct GrVkInterface;
34
35namespace SkSL {
36    class Compiler;
37}
38
39class GrVkGpu : public GrGpu {
40public:
41    static sk_sp<GrGpu> Make(GrBackendContext backendContext, const GrContextOptions&, GrContext*);
42    static sk_sp<GrGpu> Make(sk_sp<const GrVkBackendContext>, const GrContextOptions&, GrContext*);
43
44    ~GrVkGpu() override;
45
46    void disconnect(DisconnectType) override;
47
48    const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); }
49    const GrVkCaps& vkCaps() const { return *fVkCaps; }
50
51    VkDevice device() const { return fDevice; }
52    VkQueue  queue() const { return fQueue; }
53    VkCommandPool cmdPool() const { return fCmdPool; }
54    VkPhysicalDeviceProperties physicalDeviceProperties() const {
55        return fPhysDevProps;
56    }
57    VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
58        return fPhysDevMemProps;
59    }
60
61    GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
62
63    GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
64
65    enum SyncQueue {
66        kForce_SyncQueue,
67        kSkip_SyncQueue
68    };
69
70    void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
71
72    GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
73                                                     GrPixelConfig config,
74                                                     bool isRenderTarget,
75                                                     GrMipMapped) override;
76    bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
77    void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
78
79    GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
80                                                                int width,
81                                                                int height) override;
82
83    void clearStencil(GrRenderTarget* target, int clearValue) override;
84
85    GrGpuRTCommandBuffer* createCommandBuffer(
86            GrRenderTarget*, GrSurfaceOrigin,
87            const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
88            const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
89
90    GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
91
92    void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
93                          VkPipelineStageFlags dstStageMask,
94                          bool byRegion,
95                          VkMemoryBarrier* barrier) const;
96    void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
97                                VkPipelineStageFlags dstStageMask,
98                                bool byRegion,
99                                VkBufferMemoryBarrier* barrier) const;
100    void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
101                               VkPipelineStageFlags dstStageMask,
102                               bool byRegion,
103                               VkImageMemoryBarrier* barrier) const;
104
105    SkSL::Compiler* shaderCompiler() const {
106        return fCompiler;
107    }
108
109    void onResolveRenderTarget(GrRenderTarget* target) override {
110        this->internalResolveRenderTarget(target, true);
111    }
112
113    void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&,
114                                      const GrVkRenderPass*,
115                                      const VkClearValue* colorClear,
116                                      GrVkRenderTarget*, GrSurfaceOrigin,
117                                      const SkIRect& bounds);
118
119    GrFence SK_WARN_UNUSED_RESULT insertFence() override;
120    bool waitFence(GrFence, uint64_t timeout) override;
121    void deleteFence(GrFence) const override;
122
123    sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
124    sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
125                                            GrResourceProvider::SemaphoreWrapType wrapType,
126                                            GrWrapOwnership ownership) override;
127    void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
128    void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
129
130    sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
131
132    void generateMipmap(GrVkTexture* tex, GrSurfaceOrigin texOrigin);
133
134    void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
135                    VkDeviceSize dstOffset, VkDeviceSize size);
136    bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
137
138    // Heaps
139    enum Heap {
140        kLinearImage_Heap = 0,
141        // We separate out small (i.e., <= 16K) images to reduce fragmentation
142        // in the main heap.
143        kOptimalImage_Heap,
144        kSmallOptimalImage_Heap,
145        // We have separate vertex and image heaps, because it's possible that
146        // a given Vulkan driver may allocate them separately.
147        kVertexBuffer_Heap,
148        kIndexBuffer_Heap,
149        kUniformBuffer_Heap,
150        kTexelBuffer_Heap,
151        kCopyReadBuffer_Heap,
152        kCopyWriteBuffer_Heap,
153
154        kLastHeap = kCopyWriteBuffer_Heap
155    };
156    static const int kHeapCount = kLastHeap + 1;
157
158    GrVkHeap* getHeap(Heap heap) const { return fHeaps[heap].get(); }
159
160private:
161    GrVkGpu(GrContext*, const GrContextOptions&, sk_sp<const GrVkBackendContext> backendContext);
162
163    void onResetContext(uint32_t resetBits) override {}
164
165    void destroyResources();
166
167    sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
168                                     const GrMipLevel texels[], int mipLevelCount) override;
169
170    sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership) override;
171    sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
172                                                    int sampleCnt,
173                                                    GrWrapOwnership) override;
174    sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
175
176    sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
177                                                             int sampleCnt) override;
178
179    GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern,
180                             const void* data) override;
181
182    bool onGetReadPixelsInfo(GrSurface*, GrSurfaceOrigin, int width, int height, size_t rowBytes,
183                             GrColorType, DrawPreference*, ReadPixelTempDrawInfo*) override;
184
185    bool onGetWritePixelsInfo(GrSurface*, GrSurfaceOrigin, int width, int height, GrColorType,
186                              DrawPreference*, WritePixelTempDrawInfo*) override;
187
188    bool onReadPixels(GrSurface* surface, GrSurfaceOrigin, int left, int top, int width, int height,
189                      GrColorType, void* buffer, size_t rowBytes) override;
190
191    bool onWritePixels(GrSurface* surface, GrSurfaceOrigin, int left, int top, int width,
192                       int height, GrColorType, const GrMipLevel texels[],
193                       int mipLevelCount) override;
194
195    bool onTransferPixels(GrTexture*, int left, int top, int width, int height, GrColorType,
196                          GrBuffer* transferBuffer, size_t offset, size_t rowBytes) override;
197
198    bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
199                       GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
200                       const SkIPoint& dstPoint) override;
201
202    void onFinishFlush(bool insertedSemaphores) override;
203
204    // Ends and submits the current command buffer to the queue and then creates a new command
205    // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
206    // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
207    // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
208    // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
209    // wait semaphores to the submission of this command buffer.
210    void submitCommandBuffer(SyncQueue sync);
211
212    void internalResolveRenderTarget(GrRenderTarget*, bool requiresSubmit);
213
214    void copySurfaceAsCopyImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
215                                GrSurface* src, GrSurfaceOrigin srcOrigin,
216                                GrVkImage* dstImage, GrVkImage* srcImage,
217                                const SkIRect& srcRect,
218                                const SkIPoint& dstPoint);
219
220    void copySurfaceAsBlit(GrSurface* dst, GrSurfaceOrigin dstOrigin,
221                           GrSurface* src, GrSurfaceOrigin srcOrigin,
222                           GrVkImage* dstImage, GrVkImage* srcImage,
223                           const SkIRect& srcRect,
224                           const SkIPoint& dstPoint);
225
226    void copySurfaceAsResolve(GrSurface* dst, GrSurfaceOrigin dstOrigin,
227                              GrSurface* src, GrSurfaceOrigin srcOrigin,
228                              const SkIRect& srcRect,
229                              const SkIPoint& dstPoint);
230
231    // helpers for onCreateTexture and writeTexturePixels
232    bool uploadTexDataLinear(GrVkTexture* tex, GrSurfaceOrigin texOrigin, int left, int top,
233                             int width, int height, GrColorType colorType, const void* data,
234                             size_t rowBytes);
235    bool uploadTexDataOptimal(GrVkTexture* tex, GrSurfaceOrigin texOrigin, int left, int top,
236                              int width, int height, GrColorType colorType,
237                              const GrMipLevel texels[], int mipLevelCount);
238
239    void resolveImage(GrSurface* dst, GrVkRenderTarget* src, const SkIRect& srcRect,
240                      const SkIPoint& dstPoint);
241
242    sk_sp<const GrVkBackendContext> fBackendContext;
243    sk_sp<GrVkCaps>                 fVkCaps;
244
245    // These Vulkan objects are provided by the client, and also stored in fBackendContext.
246    // They're copied here for convenient access.
247    VkDevice                                     fDevice;
248    VkQueue                                      fQueue;    // Must be Graphics queue
249
250    // Created by GrVkGpu
251    GrVkResourceProvider                         fResourceProvider;
252    VkCommandPool                                fCmdPool;
253
254    GrVkPrimaryCommandBuffer*                    fCurrentCmdBuffer;
255
256    SkSTArray<1, GrVkSemaphore::Resource*>       fSemaphoresToWaitOn;
257    SkSTArray<1, GrVkSemaphore::Resource*>       fSemaphoresToSignal;
258
259    VkPhysicalDeviceProperties                   fPhysDevProps;
260    VkPhysicalDeviceMemoryProperties             fPhysDevMemProps;
261
262    std::unique_ptr<GrVkHeap>                    fHeaps[kHeapCount];
263
264    GrVkCopyManager                              fCopyManager;
265
266#ifdef SK_ENABLE_VK_LAYERS
267    // For reporting validation layer errors
268    VkDebugReportCallbackEXT               fCallback;
269#endif
270
271    // compiler used for compiling sksl into spirv. We only want to create the compiler once since
272    // there is significant overhead to the first compile of any compiler.
273    SkSL::Compiler* fCompiler;
274
275    // We need a bool to track whether or not we've already disconnected all the gpu resources from
276    // vulkan context.
277    bool fDisconnected;
278
279    typedef GrGpu INHERITED;
280};
281
282#endif
283