1/*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrVkGpu_DEFINED
9#define GrVkGpu_DEFINED
10
11#include "GrGpu.h"
12#include "GrGpuFactory.h"
13#include "vk/GrVkBackendContext.h"
14#include "GrVkCaps.h"
15#include "GrVkCopyManager.h"
16#include "GrVkIndexBuffer.h"
17#include "GrVkMemory.h"
18#include "GrVkResourceProvider.h"
19#include "GrVkSemaphore.h"
20#include "GrVkVertexBuffer.h"
21#include "GrVkUtil.h"
22#include "vk/GrVkDefines.h"
23
24class GrPipeline;
25
26class GrVkBufferImpl;
27class GrVkPipeline;
28class GrVkPipelineState;
29class GrVkPrimaryCommandBuffer;
30class GrVkRenderPass;
31class GrVkSecondaryCommandBuffer;
32class GrVkTexture;
33struct GrVkInterface;
34
35namespace SkSL {
36    class Compiler;
37}
38
39class GrVkGpu : public GrGpu {
40public:
41    static sk_sp<GrGpu> Make(GrBackendContext backendContext, const GrContextOptions&, GrContext*);
42    static sk_sp<GrGpu> Make(sk_sp<const GrVkBackendContext>, const GrContextOptions&, GrContext*);
43
44    ~GrVkGpu() override;
45
46    void disconnect(DisconnectType) override;
47
48    const GrVkInterface* vkInterface() const { return fBackendContext->fInterface.get(); }
49    const GrVkCaps& vkCaps() const { return *fVkCaps; }
50
51    VkDevice device() const { return fDevice; }
52    VkQueue  queue() const { return fQueue; }
53    uint32_t  queueIndex() const { return fBackendContext->fGraphicsQueueIndex; }
54    VkCommandPool cmdPool() const { return fCmdPool; }
55    VkPhysicalDeviceProperties physicalDeviceProperties() const {
56        return fPhysDevProps;
57    }
58    VkPhysicalDeviceMemoryProperties physicalDeviceMemoryProperties() const {
59        return fPhysDevMemProps;
60    }
61
62    GrVkResourceProvider& resourceProvider() { return fResourceProvider; }
63
64    GrVkPrimaryCommandBuffer* currentCommandBuffer() { return fCurrentCmdBuffer; }
65
66    enum SyncQueue {
67        kForce_SyncQueue,
68        kSkip_SyncQueue
69    };
70
71    bool onGetReadPixelsInfo(GrSurface* srcSurface, GrSurfaceOrigin srcOrigin,
72                             int readWidth, int readHeight, size_t rowBytes,
73                             GrPixelConfig readConfig, DrawPreference*,
74                             ReadPixelTempDrawInfo*) override;
75
76    bool onGetWritePixelsInfo(GrSurface* dstSurface, GrSurfaceOrigin dstOrigin,
77                              int width, int height,
78                              GrPixelConfig srcConfig, DrawPreference*,
79                              WritePixelTempDrawInfo*) override;
80
81    void onQueryMultisampleSpecs(GrRenderTarget*, GrSurfaceOrigin, const GrStencilSettings&,
82                                 int* effectiveSampleCnt, SamplePattern*) override;
83
84    void xferBarrier(GrRenderTarget*, GrXferBarrierType) override {}
85
86    GrBackendTexture createTestingOnlyBackendTexture(void* pixels, int w, int h,
87                                                     GrPixelConfig config,
88                                                     bool isRenderTarget,
89                                                     GrMipMapped) override;
90    bool isTestingOnlyBackendTexture(const GrBackendTexture&) const override;
91    void deleteTestingOnlyBackendTexture(GrBackendTexture*, bool abandonTexture = false) override;
92
93    void testingOnly_flushGpuAndSync() override;
94
95    GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*,
96                                                                int width,
97                                                                int height) override;
98
99    void clearStencil(GrRenderTarget* target, int clearValue) override;
100
101    GrGpuRTCommandBuffer* createCommandBuffer(
102            GrRenderTarget*, GrSurfaceOrigin,
103            const GrGpuRTCommandBuffer::LoadAndStoreInfo&,
104            const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo&) override;
105
106    GrGpuTextureCommandBuffer* createCommandBuffer(GrTexture*, GrSurfaceOrigin) override;
107
108    void addMemoryBarrier(VkPipelineStageFlags srcStageMask,
109                          VkPipelineStageFlags dstStageMask,
110                          bool byRegion,
111                          VkMemoryBarrier* barrier) const;
112    void addBufferMemoryBarrier(VkPipelineStageFlags srcStageMask,
113                                VkPipelineStageFlags dstStageMask,
114                                bool byRegion,
115                                VkBufferMemoryBarrier* barrier) const;
116    void addImageMemoryBarrier(VkPipelineStageFlags srcStageMask,
117                               VkPipelineStageFlags dstStageMask,
118                               bool byRegion,
119                               VkImageMemoryBarrier* barrier) const;
120
121    SkSL::Compiler* shaderCompiler() const {
122        return fCompiler;
123    }
124
125    void onResolveRenderTarget(GrRenderTarget* target, GrSurfaceOrigin origin) override {
126        this->internalResolveRenderTarget(target, origin, true);
127    }
128
129    void submitSecondaryCommandBuffer(const SkTArray<GrVkSecondaryCommandBuffer*>&,
130                                      const GrVkRenderPass*,
131                                      const VkClearValue* colorClear,
132                                      GrVkRenderTarget*, GrSurfaceOrigin,
133                                      const SkIRect& bounds);
134
135    GrFence SK_WARN_UNUSED_RESULT insertFence() override;
136    bool waitFence(GrFence, uint64_t timeout) override;
137    void deleteFence(GrFence) const override;
138
139    sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore(bool isOwned) override;
140    sk_sp<GrSemaphore> wrapBackendSemaphore(const GrBackendSemaphore& semaphore,
141                                            GrResourceProvider::SemaphoreWrapType wrapType,
142                                            GrWrapOwnership ownership) override;
143    void insertSemaphore(sk_sp<GrSemaphore> semaphore, bool flush) override;
144    void waitSemaphore(sk_sp<GrSemaphore> semaphore) override;
145
146    sk_sp<GrSemaphore> prepareTextureForCrossContextUsage(GrTexture*) override;
147
148    void generateMipmap(GrVkTexture* tex, GrSurfaceOrigin texOrigin);
149
150    void copyBuffer(GrVkBuffer* srcBuffer, GrVkBuffer* dstBuffer, VkDeviceSize srcOffset,
151                    VkDeviceSize dstOffset, VkDeviceSize size);
152    bool updateBuffer(GrVkBuffer* buffer, const void* src, VkDeviceSize offset, VkDeviceSize size);
153
154    // Heaps
155    enum Heap {
156        kLinearImage_Heap = 0,
157        // We separate out small (i.e., <= 16K) images to reduce fragmentation
158        // in the main heap.
159        kOptimalImage_Heap,
160        kSmallOptimalImage_Heap,
161        // We have separate vertex and image heaps, because it's possible that
162        // a given Vulkan driver may allocate them separately.
163        kVertexBuffer_Heap,
164        kIndexBuffer_Heap,
165        kUniformBuffer_Heap,
166        kTexelBuffer_Heap,
167        kCopyReadBuffer_Heap,
168        kCopyWriteBuffer_Heap,
169
170        kLastHeap = kCopyWriteBuffer_Heap
171    };
172    static const int kHeapCount = kLastHeap + 1;
173
174    GrVkHeap* getHeap(Heap heap) const { return fHeaps[heap].get(); }
175
176private:
177    GrVkGpu(GrContext*, const GrContextOptions&, sk_sp<const GrVkBackendContext> backendContext);
178
179    void onResetContext(uint32_t resetBits) override {}
180
181    void destroyResources();
182
183    sk_sp<GrTexture> onCreateTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted,
184                                     const GrMipLevel texels[], int mipLevelCount) override;
185
186    sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTexture&, GrWrapOwnership) override;
187    sk_sp<GrTexture> onWrapRenderableBackendTexture(const GrBackendTexture&,
188                                                    int sampleCnt,
189                                                    GrWrapOwnership) override;
190    sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTarget&) override;
191
192    sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTexture&,
193                                                             int sampleCnt) override;
194
195    GrBuffer* onCreateBuffer(size_t size, GrBufferType type, GrAccessPattern,
196                             const void* data) override;
197
198    bool onReadPixels(GrSurface* surface, GrSurfaceOrigin,
199                      int left, int top, int width, int height,
200                      GrPixelConfig,
201                      void* buffer,
202                      size_t rowBytes) override;
203
204    bool onWritePixels(GrSurface* surface, GrSurfaceOrigin,
205                       int left, int top, int width, int height,
206                       GrPixelConfig config, const GrMipLevel texels[], int mipLevelCount) override;
207
208    bool onTransferPixels(GrTexture*,
209                          int left, int top, int width, int height,
210                          GrPixelConfig config, GrBuffer* transferBuffer,
211                          size_t offset, size_t rowBytes) override;
212
213    bool onCopySurface(GrSurface* dst, GrSurfaceOrigin dstOrigin, GrSurface* src,
214                       GrSurfaceOrigin srcOrigin, const SkIRect& srcRect,
215                       const SkIPoint& dstPoint, bool canDiscardOutsideDstRect) override;
216
217    void onFinishFlush(bool insertedSemaphores) override;
218
219    // Ends and submits the current command buffer to the queue and then creates a new command
220    // buffer and begins it. If sync is set to kForce_SyncQueue, the function will wait for all
221    // work in the queue to finish before returning. If this GrVkGpu object has any semaphores in
222    // fSemaphoreToSignal, we will add those signal semaphores to the submission of this command
223    // buffer. If this GrVkGpu object has any semaphores in fSemaphoresToWaitOn, we will add those
224    // wait semaphores to the submission of this command buffer.
225    void submitCommandBuffer(SyncQueue sync);
226
227    void internalResolveRenderTarget(GrRenderTarget*, GrSurfaceOrigin origin, bool requiresSubmit);
228
229    void copySurfaceAsCopyImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
230                                GrSurface* src, GrSurfaceOrigin srcOrigin,
231                                GrVkImage* dstImage, GrVkImage* srcImage,
232                                const SkIRect& srcRect,
233                                const SkIPoint& dstPoint);
234
235    void copySurfaceAsBlit(GrSurface* dst, GrSurfaceOrigin dstOrigin,
236                           GrSurface* src, GrSurfaceOrigin srcOrigin,
237                           GrVkImage* dstImage, GrVkImage* srcImage,
238                           const SkIRect& srcRect,
239                           const SkIPoint& dstPoint);
240
241    void copySurfaceAsResolve(GrSurface* dst, GrSurfaceOrigin dstOrigin,
242                              GrSurface* src, GrSurfaceOrigin srcOrigin,
243                              const SkIRect& srcRect,
244                              const SkIPoint& dstPoint);
245
246    // helpers for onCreateTexture and writeTexturePixels
247    bool uploadTexDataLinear(GrVkTexture* tex, GrSurfaceOrigin texOrigin,
248                             int left, int top, int width, int height,
249                             GrPixelConfig dataConfig,
250                             const void* data,
251                             size_t rowBytes);
252    bool uploadTexDataOptimal(GrVkTexture* tex, GrSurfaceOrigin texOrigin,
253                              int left, int top, int width, int height,
254                              GrPixelConfig dataConfig,
255                              const GrMipLevel texels[], int mipLevelCount);
256
257    void resolveImage(GrSurface* dst, GrSurfaceOrigin dstOrigin,
258                      GrVkRenderTarget* src, GrSurfaceOrigin srcOrigin,
259                      const SkIRect& srcRect, const SkIPoint& dstPoint);
260
261    sk_sp<const GrVkBackendContext> fBackendContext;
262    sk_sp<GrVkCaps>                 fVkCaps;
263
264    // These Vulkan objects are provided by the client, and also stored in fBackendContext.
265    // They're copied here for convenient access.
266    VkDevice                                     fDevice;
267    VkQueue                                      fQueue;    // Must be Graphics queue
268
269    // Created by GrVkGpu
270    GrVkResourceProvider                         fResourceProvider;
271    VkCommandPool                                fCmdPool;
272
273    GrVkPrimaryCommandBuffer*                    fCurrentCmdBuffer;
274
275    SkSTArray<1, GrVkSemaphore::Resource*>       fSemaphoresToWaitOn;
276    SkSTArray<1, GrVkSemaphore::Resource*>       fSemaphoresToSignal;
277
278    VkPhysicalDeviceProperties                   fPhysDevProps;
279    VkPhysicalDeviceMemoryProperties             fPhysDevMemProps;
280
281    std::unique_ptr<GrVkHeap>                    fHeaps[kHeapCount];
282
283    GrVkCopyManager                              fCopyManager;
284
285#ifdef SK_ENABLE_VK_LAYERS
286    // For reporting validation layer errors
287    VkDebugReportCallbackEXT               fCallback;
288#endif
289
290    // compiler used for compiling sksl into spirv. We only want to create the compiler once since
291    // there is significant overhead to the first compile of any compiler.
292    SkSL::Compiler* fCompiler;
293
294    // We need a bool to track whether or not we've already disconnected all the gpu resources from
295    // vulkan context.
296    bool fDisconnected;
297
298    typedef GrGpu INHERITED;
299};
300
301#endif
302