GrBufferAllocPool.cpp revision 7dea7b7df14f327f4af32a83af52ee695b4ab1e0
1
2/*
3 * Copyright 2010 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrBufferAllocPool.h"
11#include "GrCaps.h"
12#include "GrContext.h"
13#include "GrGpu.h"
14#include "GrIndexBuffer.h"
15#include "GrResourceProvider.h"
16#include "GrTypes.h"
17#include "GrVertexBuffer.h"
18
19#include "SkTraceEvent.h"
20
21#ifdef SK_DEBUG
22    #define VALIDATE validate
23#else
24    static void VALIDATE(bool = false) {}
25#endif
26
27static const size_t MIN_VERTEX_BUFFER_SIZE = 1 << 15;
28static const size_t MIN_INDEX_BUFFER_SIZE = 1 << 12;
29
30// page size
31#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 15)
32
33#define UNMAP_BUFFER(block)                                                               \
34do {                                                                                      \
35    TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),                           \
36                         "GrBufferAllocPool Unmapping Buffer",                            \
37                         TRACE_EVENT_SCOPE_THREAD,                                        \
38                         "percent_unwritten",                                             \
39                         (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
40    (block).fBuffer->unmap();                                                             \
41} while (false)
42
43GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
44                                     BufferType bufferType,
45                                     size_t blockSize)
46    : fBlocks(8) {
47
48    fGpu = SkRef(gpu);
49    fCpuData = nullptr;
50    fBufferType = bufferType;
51    fBufferPtr = nullptr;
52    fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
53
54    fBytesInUse = 0;
55
56    fGeometryBufferMapThreshold = gpu->caps()->geometryBufferMapThreshold();
57}
58
59void GrBufferAllocPool::deleteBlocks() {
60    if (fBlocks.count()) {
61        GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
62        if (buffer->isMapped()) {
63            UNMAP_BUFFER(fBlocks.back());
64        }
65    }
66    while (!fBlocks.empty()) {
67        this->destroyBlock();
68    }
69    SkASSERT(!fBufferPtr);
70}
71
72GrBufferAllocPool::~GrBufferAllocPool() {
73    VALIDATE();
74    this->deleteBlocks();
75    sk_free(fCpuData);
76    fGpu->unref();
77}
78
79void GrBufferAllocPool::reset() {
80    VALIDATE();
81    fBytesInUse = 0;
82    this->deleteBlocks();
83
84    // we may have created a large cpu mirror of a large VB. Reset the size to match our minimum.
85    this->resetCpuData(fMinBlockSize);
86
87    VALIDATE();
88}
89
90void GrBufferAllocPool::unmap() {
91    VALIDATE();
92
93    if (fBufferPtr) {
94        BufferBlock& block = fBlocks.back();
95        if (block.fBuffer->isMapped()) {
96            UNMAP_BUFFER(block);
97        } else {
98            size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
99            this->flushCpuData(fBlocks.back(), flushSize);
100        }
101        fBufferPtr = nullptr;
102    }
103    VALIDATE();
104}
105
106#ifdef SK_DEBUG
107void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
108    bool wasDestroyed = false;
109    if (fBufferPtr) {
110        SkASSERT(!fBlocks.empty());
111        if (fBlocks.back().fBuffer->isMapped()) {
112            GrGeometryBuffer* buf = fBlocks.back().fBuffer;
113            SkASSERT(buf->mapPtr() == fBufferPtr);
114        } else {
115            SkASSERT(fCpuData == fBufferPtr);
116        }
117    } else {
118        SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
119    }
120    size_t bytesInUse = 0;
121    for (int i = 0; i < fBlocks.count() - 1; ++i) {
122        SkASSERT(!fBlocks[i].fBuffer->isMapped());
123    }
124    for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
125        if (fBlocks[i].fBuffer->wasDestroyed()) {
126            wasDestroyed = true;
127        } else {
128            size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
129            bytesInUse += bytes;
130            SkASSERT(bytes || unusedBlockAllowed);
131        }
132    }
133
134    if (!wasDestroyed) {
135        SkASSERT(bytesInUse == fBytesInUse);
136        if (unusedBlockAllowed) {
137            SkASSERT((fBytesInUse && !fBlocks.empty()) ||
138                     (!fBytesInUse && (fBlocks.count() < 2)));
139        } else {
140            SkASSERT((0 == fBytesInUse) == fBlocks.empty());
141        }
142    }
143}
144#endif
145
146void* GrBufferAllocPool::makeSpace(size_t size,
147                                   size_t alignment,
148                                   const GrGeometryBuffer** buffer,
149                                   size_t* offset) {
150    VALIDATE();
151
152    SkASSERT(buffer);
153    SkASSERT(offset);
154
155    if (fBufferPtr) {
156        BufferBlock& back = fBlocks.back();
157        size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
158        size_t pad = GrSizeAlignUpPad(usedBytes, alignment);
159        if ((size + pad) <= back.fBytesFree) {
160            memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
161            usedBytes += pad;
162            *offset = usedBytes;
163            *buffer = back.fBuffer;
164            back.fBytesFree -= size + pad;
165            fBytesInUse += size + pad;
166            VALIDATE();
167            return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
168        }
169    }
170
171    // We could honor the space request using by a partial update of the current
172    // VB (if there is room). But we don't currently use draw calls to GL that
173    // allow the driver to know that previously issued draws won't read from
174    // the part of the buffer we update. Also, the GL buffer implementation
175    // may be cheating on the actual buffer size by shrinking the buffer on
176    // updateData() if the amount of data passed is less than the full buffer
177    // size.
178
179    if (!this->createBlock(size)) {
180        return nullptr;
181    }
182    SkASSERT(fBufferPtr);
183
184    *offset = 0;
185    BufferBlock& back = fBlocks.back();
186    *buffer = back.fBuffer;
187    back.fBytesFree -= size;
188    fBytesInUse += size;
189    VALIDATE();
190    return fBufferPtr;
191}
192
193void GrBufferAllocPool::putBack(size_t bytes) {
194    VALIDATE();
195
196    while (bytes) {
197        // caller shouldn't try to put back more than they've taken
198        SkASSERT(!fBlocks.empty());
199        BufferBlock& block = fBlocks.back();
200        size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
201        if (bytes >= bytesUsed) {
202            bytes -= bytesUsed;
203            fBytesInUse -= bytesUsed;
204            // if we locked a vb to satisfy the make space and we're releasing
205            // beyond it, then unmap it.
206            if (block.fBuffer->isMapped()) {
207                UNMAP_BUFFER(block);
208            }
209            this->destroyBlock();
210        } else {
211            block.fBytesFree += bytes;
212            fBytesInUse -= bytes;
213            bytes = 0;
214            break;
215        }
216    }
217
218    VALIDATE();
219}
220
221bool GrBufferAllocPool::createBlock(size_t requestSize) {
222
223    size_t size = SkTMax(requestSize, fMinBlockSize);
224    SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
225
226    VALIDATE();
227
228    BufferBlock& block = fBlocks.push_back();
229
230    block.fBuffer = this->getBuffer(size);
231    if (!block.fBuffer) {
232        fBlocks.pop_back();
233        return false;
234    }
235
236    block.fBytesFree = block.fBuffer->gpuMemorySize();
237    if (fBufferPtr) {
238        SkASSERT(fBlocks.count() > 1);
239        BufferBlock& prev = fBlocks.fromBack(1);
240        if (prev.fBuffer->isMapped()) {
241            UNMAP_BUFFER(prev);
242        } else {
243            this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
244        }
245        fBufferPtr = nullptr;
246    }
247
248    SkASSERT(!fBufferPtr);
249
250    // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
251    // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
252    // threshold.
253    bool attemptMap = block.fBuffer->isCPUBacked();
254    if (!attemptMap && GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
255        attemptMap = size > fGeometryBufferMapThreshold;
256    }
257
258    if (attemptMap) {
259        fBufferPtr = block.fBuffer->map();
260    }
261
262    if (!fBufferPtr) {
263        fBufferPtr = this->resetCpuData(block.fBytesFree);
264    }
265
266    VALIDATE(true);
267
268    return true;
269}
270
271void GrBufferAllocPool::destroyBlock() {
272    SkASSERT(!fBlocks.empty());
273
274    BufferBlock& block = fBlocks.back();
275
276    SkASSERT(!block.fBuffer->isMapped());
277    block.fBuffer->unref();
278    fBlocks.pop_back();
279    fBufferPtr = nullptr;
280}
281
282void* GrBufferAllocPool::resetCpuData(size_t newSize) {
283    sk_free(fCpuData);
284    if (newSize) {
285        if (fGpu->caps()->mustClearUploadedBufferData()) {
286            fCpuData = sk_calloc(newSize);
287        } else {
288            fCpuData = sk_malloc_throw(newSize);
289        }
290    } else {
291        fCpuData = nullptr;
292    }
293    return fCpuData;
294}
295
296
297void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
298    GrGeometryBuffer* buffer = block.fBuffer;
299    SkASSERT(buffer);
300    SkASSERT(!buffer->isMapped());
301    SkASSERT(fCpuData == fBufferPtr);
302    SkASSERT(flushSize <= buffer->gpuMemorySize());
303    VALIDATE(true);
304
305    if (GrCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
306        flushSize > fGeometryBufferMapThreshold) {
307        void* data = buffer->map();
308        if (data) {
309            memcpy(data, fBufferPtr, flushSize);
310            UNMAP_BUFFER(block);
311            return;
312        }
313    }
314    buffer->updateData(fBufferPtr, flushSize);
315    VALIDATE(true);
316}
317
318GrGeometryBuffer* GrBufferAllocPool::getBuffer(size_t size) {
319
320    GrResourceProvider* rp = fGpu->getContext()->resourceProvider();
321
322    static const GrResourceProvider::BufferUsage kUsage = GrResourceProvider::kDynamic_BufferUsage;
323    // Shouldn't have to use this flag (http://skbug.com/4156)
324    static const uint32_t kFlags = GrResourceProvider::kNoPendingIO_Flag;
325    if (kIndex_BufferType == fBufferType) {
326        return rp->createIndexBuffer(size, kUsage, kFlags);
327    } else {
328        SkASSERT(kVertex_BufferType == fBufferType);
329        return rp->createVertexBuffer(size, kUsage, kFlags);
330    }
331}
332
333////////////////////////////////////////////////////////////////////////////////
334
335GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu)
336    : GrBufferAllocPool(gpu, kVertex_BufferType, MIN_VERTEX_BUFFER_SIZE) {
337}
338
339void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
340                                         int vertexCount,
341                                         const GrVertexBuffer** buffer,
342                                         int* startVertex) {
343
344    SkASSERT(vertexCount >= 0);
345    SkASSERT(buffer);
346    SkASSERT(startVertex);
347
348    size_t offset = 0; // assign to suppress warning
349    const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
350    void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
351                                     vertexSize,
352                                     &geomBuffer,
353                                     &offset);
354
355    *buffer = (const GrVertexBuffer*) geomBuffer;
356    SkASSERT(0 == offset % vertexSize);
357    *startVertex = static_cast<int>(offset / vertexSize);
358    return ptr;
359}
360
361////////////////////////////////////////////////////////////////////////////////
362
363GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu)
364    : GrBufferAllocPool(gpu, kIndex_BufferType, MIN_INDEX_BUFFER_SIZE) {
365}
366
367void* GrIndexBufferAllocPool::makeSpace(int indexCount,
368                                        const GrIndexBuffer** buffer,
369                                        int* startIndex) {
370
371    SkASSERT(indexCount >= 0);
372    SkASSERT(buffer);
373    SkASSERT(startIndex);
374
375    size_t offset = 0; // assign to suppress warning
376    const GrGeometryBuffer* geomBuffer = nullptr; // assign to suppress warning
377    void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
378                                     sizeof(uint16_t),
379                                     &geomBuffer,
380                                     &offset);
381
382    *buffer = (const GrIndexBuffer*) geomBuffer;
383    SkASSERT(0 == offset % sizeof(uint16_t));
384    *startIndex = static_cast<int>(offset / sizeof(uint16_t));
385    return ptr;
386}
387