1
2/*
3 * Copyright 2010 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrBufferAllocPool.h"
11#include "GrDrawTargetCaps.h"
12#include "GrGpu.h"
13#include "GrIndexBuffer.h"
14#include "GrTypes.h"
15#include "GrVertexBuffer.h"
16
17#include "SkTraceEvent.h"
18
19#ifdef SK_DEBUG
20    #define VALIDATE validate
21#else
22    static void VALIDATE(bool = false) {}
23#endif
24
25// page size
26#define GrBufferAllocPool_MIN_BLOCK_SIZE ((size_t)1 << 12)
27
28#define UNMAP_BUFFER(block)                                                               \
29do {                                                                                      \
30    TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("skia.gpu"),                           \
31                         "GrBufferAllocPool Unmapping Buffer",                            \
32                         TRACE_EVENT_SCOPE_THREAD,                                        \
33                         "percent_unwritten",                                             \
34                         (float)((block).fBytesFree) / (block).fBuffer->gpuMemorySize()); \
35    (block).fBuffer->unmap();                                                             \
36} while (false)
37
38GrBufferAllocPool::GrBufferAllocPool(GrGpu* gpu,
39                                     BufferType bufferType,
40                                     size_t blockSize,
41                                     int preallocBufferCnt)
42    : fBlocks(SkTMax(8, 2*preallocBufferCnt)) {
43
44    fGpu = SkRef(gpu);
45
46    fBufferType = bufferType;
47    fBufferPtr = NULL;
48    fMinBlockSize = SkTMax(GrBufferAllocPool_MIN_BLOCK_SIZE, blockSize);
49
50    fBytesInUse = 0;
51
52    fPreallocBuffersInUse = 0;
53    fPreallocBufferStartIdx = 0;
54    for (int i = 0; i < preallocBufferCnt; ++i) {
55        GrGeometryBuffer* buffer = this->createBuffer(fMinBlockSize);
56        if (buffer) {
57            *fPreallocBuffers.append() = buffer;
58        }
59    }
60}
61
62GrBufferAllocPool::~GrBufferAllocPool() {
63    VALIDATE();
64    if (fBlocks.count()) {
65        GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
66        if (buffer->isMapped()) {
67            UNMAP_BUFFER(fBlocks.back());
68        }
69    }
70    while (!fBlocks.empty()) {
71        this->destroyBlock();
72    }
73    fPreallocBuffers.unrefAll();
74    fGpu->unref();
75}
76
77void GrBufferAllocPool::reset() {
78    VALIDATE();
79    fBytesInUse = 0;
80    if (fBlocks.count()) {
81        GrGeometryBuffer* buffer = fBlocks.back().fBuffer;
82        if (buffer->isMapped()) {
83            UNMAP_BUFFER(fBlocks.back());
84        }
85    }
86    // fPreallocBuffersInUse will be decremented down to zero in the while loop
87    int preallocBuffersInUse = fPreallocBuffersInUse;
88    while (!fBlocks.empty()) {
89        this->destroyBlock();
90    }
91    if (fPreallocBuffers.count()) {
92        // must set this after above loop.
93        fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
94                                   preallocBuffersInUse) %
95                                  fPreallocBuffers.count();
96    }
97    // we may have created a large cpu mirror of a large VB. Reset the size
98    // to match our pre-allocated VBs.
99    fCpuData.reset(fMinBlockSize);
100    SkASSERT(0 == fPreallocBuffersInUse);
101    VALIDATE();
102}
103
104void GrBufferAllocPool::unmap() {
105    VALIDATE();
106
107    if (fBufferPtr) {
108        BufferBlock& block = fBlocks.back();
109        if (block.fBuffer->isMapped()) {
110            UNMAP_BUFFER(block);
111        } else {
112            size_t flushSize = block.fBuffer->gpuMemorySize() - block.fBytesFree;
113            this->flushCpuData(fBlocks.back(), flushSize);
114        }
115        fBufferPtr = NULL;
116    }
117    VALIDATE();
118}
119
120#ifdef SK_DEBUG
121void GrBufferAllocPool::validate(bool unusedBlockAllowed) const {
122    bool wasDestroyed = false;
123    if (fBufferPtr) {
124        SkASSERT(!fBlocks.empty());
125        if (fBlocks.back().fBuffer->isMapped()) {
126            GrGeometryBuffer* buf = fBlocks.back().fBuffer;
127            SkASSERT(buf->mapPtr() == fBufferPtr);
128        } else {
129            SkASSERT(fCpuData.get() == fBufferPtr);
130        }
131    } else {
132        SkASSERT(fBlocks.empty() || !fBlocks.back().fBuffer->isMapped());
133    }
134    size_t bytesInUse = 0;
135    for (int i = 0; i < fBlocks.count() - 1; ++i) {
136        SkASSERT(!fBlocks[i].fBuffer->isMapped());
137    }
138    for (int i = 0; !wasDestroyed && i < fBlocks.count(); ++i) {
139        if (fBlocks[i].fBuffer->wasDestroyed()) {
140            wasDestroyed = true;
141        } else {
142            size_t bytes = fBlocks[i].fBuffer->gpuMemorySize() - fBlocks[i].fBytesFree;
143            bytesInUse += bytes;
144            SkASSERT(bytes || unusedBlockAllowed);
145        }
146    }
147
148    if (!wasDestroyed) {
149        SkASSERT(bytesInUse == fBytesInUse);
150        if (unusedBlockAllowed) {
151            SkASSERT((fBytesInUse && !fBlocks.empty()) ||
152                     (!fBytesInUse && (fBlocks.count() < 2)));
153        } else {
154            SkASSERT((0 == fBytesInUse) == fBlocks.empty());
155        }
156    }
157}
158#endif
159
160void* GrBufferAllocPool::makeSpace(size_t size,
161                                   size_t alignment,
162                                   const GrGeometryBuffer** buffer,
163                                   size_t* offset) {
164    VALIDATE();
165
166    SkASSERT(buffer);
167    SkASSERT(offset);
168
169    if (fBufferPtr) {
170        BufferBlock& back = fBlocks.back();
171        size_t usedBytes = back.fBuffer->gpuMemorySize() - back.fBytesFree;
172        size_t pad = GrSizeAlignUpPad(usedBytes,
173                                      alignment);
174        if ((size + pad) <= back.fBytesFree) {
175            memset((void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes), 0, pad);
176            usedBytes += pad;
177            *offset = usedBytes;
178            *buffer = back.fBuffer;
179            back.fBytesFree -= size + pad;
180            fBytesInUse += size + pad;
181            VALIDATE();
182            return (void*)(reinterpret_cast<intptr_t>(fBufferPtr) + usedBytes);
183        }
184    }
185
186    // We could honor the space request using by a partial update of the current
187    // VB (if there is room). But we don't currently use draw calls to GL that
188    // allow the driver to know that previously issued draws won't read from
189    // the part of the buffer we update. Also, the GL buffer implementation
190    // may be cheating on the actual buffer size by shrinking the buffer on
191    // updateData() if the amount of data passed is less than the full buffer
192    // size.
193
194    if (!this->createBlock(size)) {
195        return NULL;
196    }
197    SkASSERT(fBufferPtr);
198
199    *offset = 0;
200    BufferBlock& back = fBlocks.back();
201    *buffer = back.fBuffer;
202    back.fBytesFree -= size;
203    fBytesInUse += size;
204    VALIDATE();
205    return fBufferPtr;
206}
207
208void GrBufferAllocPool::putBack(size_t bytes) {
209    VALIDATE();
210
211    // if the putBack unwinds all the preallocated buffers then we will
212    // advance the starting index. As blocks are destroyed fPreallocBuffersInUse
213    // will be decremented. I will reach zero if all blocks using preallocated
214    // buffers are released.
215    int preallocBuffersInUse = fPreallocBuffersInUse;
216
217    while (bytes) {
218        // caller shouldn't try to put back more than they've taken
219        SkASSERT(!fBlocks.empty());
220        BufferBlock& block = fBlocks.back();
221        size_t bytesUsed = block.fBuffer->gpuMemorySize() - block.fBytesFree;
222        if (bytes >= bytesUsed) {
223            bytes -= bytesUsed;
224            fBytesInUse -= bytesUsed;
225            // if we locked a vb to satisfy the make space and we're releasing
226            // beyond it, then unmap it.
227            if (block.fBuffer->isMapped()) {
228                UNMAP_BUFFER(block);
229            }
230            this->destroyBlock();
231        } else {
232            block.fBytesFree += bytes;
233            fBytesInUse -= bytes;
234            bytes = 0;
235            break;
236        }
237    }
238    if (!fPreallocBuffersInUse && fPreallocBuffers.count()) {
239            fPreallocBufferStartIdx = (fPreallocBufferStartIdx +
240                                       preallocBuffersInUse) %
241                                      fPreallocBuffers.count();
242    }
243    VALIDATE();
244}
245
246bool GrBufferAllocPool::createBlock(size_t requestSize) {
247
248    size_t size = SkTMax(requestSize, fMinBlockSize);
249    SkASSERT(size >= GrBufferAllocPool_MIN_BLOCK_SIZE);
250
251    VALIDATE();
252
253    BufferBlock& block = fBlocks.push_back();
254
255    if (size == fMinBlockSize &&
256        fPreallocBuffersInUse < fPreallocBuffers.count()) {
257
258        uint32_t nextBuffer = (fPreallocBuffersInUse +
259                               fPreallocBufferStartIdx) %
260                              fPreallocBuffers.count();
261        block.fBuffer = fPreallocBuffers[nextBuffer];
262        block.fBuffer->ref();
263        ++fPreallocBuffersInUse;
264    } else {
265        block.fBuffer = this->createBuffer(size);
266        if (NULL == block.fBuffer) {
267            fBlocks.pop_back();
268            return false;
269        }
270    }
271
272    block.fBytesFree = size;
273    if (fBufferPtr) {
274        SkASSERT(fBlocks.count() > 1);
275        BufferBlock& prev = fBlocks.fromBack(1);
276        if (prev.fBuffer->isMapped()) {
277            UNMAP_BUFFER(prev);
278        } else {
279            this->flushCpuData(prev, prev.fBuffer->gpuMemorySize() - prev.fBytesFree);
280        }
281        fBufferPtr = NULL;
282    }
283
284    SkASSERT(NULL == fBufferPtr);
285
286    // If the buffer is CPU-backed we map it because it is free to do so and saves a copy.
287    // Otherwise when buffer mapping is supported we map if the buffer size is greater than the
288    // threshold.
289    bool attemptMap = block.fBuffer->isCPUBacked();
290    if (!attemptMap && GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags()) {
291        attemptMap = size > GR_GEOM_BUFFER_MAP_THRESHOLD;
292    }
293
294    if (attemptMap) {
295        fBufferPtr = block.fBuffer->map();
296    }
297
298    if (NULL == fBufferPtr) {
299        fBufferPtr = fCpuData.reset(size);
300    }
301
302    VALIDATE(true);
303
304    return true;
305}
306
307void GrBufferAllocPool::destroyBlock() {
308    SkASSERT(!fBlocks.empty());
309
310    BufferBlock& block = fBlocks.back();
311    if (fPreallocBuffersInUse > 0) {
312        uint32_t prevPreallocBuffer = (fPreallocBuffersInUse +
313                                       fPreallocBufferStartIdx +
314                                       (fPreallocBuffers.count() - 1)) %
315                                      fPreallocBuffers.count();
316        if (block.fBuffer == fPreallocBuffers[prevPreallocBuffer]) {
317            --fPreallocBuffersInUse;
318        }
319    }
320    SkASSERT(!block.fBuffer->isMapped());
321    block.fBuffer->unref();
322    fBlocks.pop_back();
323    fBufferPtr = NULL;
324}
325
326void GrBufferAllocPool::flushCpuData(const BufferBlock& block, size_t flushSize) {
327    GrGeometryBuffer* buffer = block.fBuffer;
328    SkASSERT(buffer);
329    SkASSERT(!buffer->isMapped());
330    SkASSERT(fCpuData.get() == fBufferPtr);
331    SkASSERT(flushSize <= buffer->gpuMemorySize());
332    VALIDATE(true);
333
334    if (GrDrawTargetCaps::kNone_MapFlags != fGpu->caps()->mapBufferFlags() &&
335        flushSize > GR_GEOM_BUFFER_MAP_THRESHOLD) {
336        void* data = buffer->map();
337        if (data) {
338            memcpy(data, fBufferPtr, flushSize);
339            UNMAP_BUFFER(block);
340            return;
341        }
342    }
343    buffer->updateData(fBufferPtr, flushSize);
344    VALIDATE(true);
345}
346
347GrGeometryBuffer* GrBufferAllocPool::createBuffer(size_t size) {
348    if (kIndex_BufferType == fBufferType) {
349        return fGpu->createIndexBuffer(size, true);
350    } else {
351        SkASSERT(kVertex_BufferType == fBufferType);
352        return fGpu->createVertexBuffer(size, true);
353    }
354}
355
356////////////////////////////////////////////////////////////////////////////////
357
358GrVertexBufferAllocPool::GrVertexBufferAllocPool(GrGpu* gpu,
359                                                 size_t bufferSize,
360                                                 int preallocBufferCnt)
361    : GrBufferAllocPool(gpu,
362                        kVertex_BufferType,
363                        bufferSize,
364                        preallocBufferCnt) {
365}
366
367void* GrVertexBufferAllocPool::makeSpace(size_t vertexSize,
368                                         int vertexCount,
369                                         const GrVertexBuffer** buffer,
370                                         int* startVertex) {
371
372    SkASSERT(vertexCount >= 0);
373    SkASSERT(buffer);
374    SkASSERT(startVertex);
375
376    size_t offset = 0; // assign to suppress warning
377    const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
378    void* ptr = INHERITED::makeSpace(vertexSize * vertexCount,
379                                     vertexSize,
380                                     &geomBuffer,
381                                     &offset);
382
383    *buffer = (const GrVertexBuffer*) geomBuffer;
384    SkASSERT(0 == offset % vertexSize);
385    *startVertex = static_cast<int>(offset / vertexSize);
386    return ptr;
387}
388
389////////////////////////////////////////////////////////////////////////////////
390
391GrIndexBufferAllocPool::GrIndexBufferAllocPool(GrGpu* gpu,
392                                               size_t bufferSize,
393                                               int preallocBufferCnt)
394    : GrBufferAllocPool(gpu,
395                        kIndex_BufferType,
396                        bufferSize,
397                        preallocBufferCnt) {
398}
399
400void* GrIndexBufferAllocPool::makeSpace(int indexCount,
401                                        const GrIndexBuffer** buffer,
402                                        int* startIndex) {
403
404    SkASSERT(indexCount >= 0);
405    SkASSERT(buffer);
406    SkASSERT(startIndex);
407
408    size_t offset = 0; // assign to suppress warning
409    const GrGeometryBuffer* geomBuffer = NULL; // assign to suppress warning
410    void* ptr = INHERITED::makeSpace(indexCount * sizeof(uint16_t),
411                                     sizeof(uint16_t),
412                                     &geomBuffer,
413                                     &offset);
414
415    *buffer = (const GrIndexBuffer*) geomBuffer;
416    SkASSERT(0 == offset % sizeof(uint16_t));
417    *startIndex = static_cast<int>(offset / sizeof(uint16_t));
418    return ptr;
419}
420
421
422