1
2/*
3 * Copyright 2010 Google Inc.
4 *
5 * Use of this source code is governed by a BSD-style license that can be
6 * found in the LICENSE file.
7 */
8
9
10#include "GrGpu.h"
11
12#include "GrBufferAllocPool.h"
13#include "GrContext.h"
14#include "GrDrawTargetCaps.h"
15#include "GrIndexBuffer.h"
16#include "GrStencilBuffer.h"
17#include "GrVertexBuffer.h"
18
19// probably makes no sense for this to be less than a page
20static const size_t VERTEX_POOL_VB_SIZE = 1 << 18;
21static const int VERTEX_POOL_VB_COUNT = 4;
22static const size_t INDEX_POOL_IB_SIZE = 1 << 16;
23static const int INDEX_POOL_IB_COUNT = 4;
24
25////////////////////////////////////////////////////////////////////////////////
26
27#define DEBUG_INVAL_BUFFER    0xdeadcafe
28#define DEBUG_INVAL_START_IDX -1
29
30GrGpu::GrGpu(GrContext* context)
31    : GrDrawTarget(context)
32    , fResetTimestamp(kExpiredTimestamp+1)
33    , fResetBits(kAll_GrBackendState)
34    , fVertexPool(NULL)
35    , fIndexPool(NULL)
36    , fVertexPoolUseCnt(0)
37    , fIndexPoolUseCnt(0)
38    , fQuadIndexBuffer(NULL) {
39
40    fClipMaskManager.setGpu(this);
41
42    fGeomPoolStateStack.push_back();
43#ifdef SK_DEBUG
44    GeometryPoolState& poolState = fGeomPoolStateStack.back();
45    poolState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
46    poolState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
47    poolState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
48    poolState.fPoolStartIndex = DEBUG_INVAL_START_IDX;
49#endif
50}
51
52GrGpu::~GrGpu() {
53    this->releaseResources();
54}
55
56void GrGpu::abandonResources() {
57
58    fClipMaskManager.releaseResources();
59
60    while (NULL != fObjectList.head()) {
61        fObjectList.head()->abandon();
62    }
63
64    SkASSERT(NULL == fQuadIndexBuffer || fQuadIndexBuffer->wasDestroyed());
65    SkSafeSetNull(fQuadIndexBuffer);
66    delete fVertexPool;
67    fVertexPool = NULL;
68    delete fIndexPool;
69    fIndexPool = NULL;
70}
71
72void GrGpu::releaseResources() {
73
74    fClipMaskManager.releaseResources();
75
76    while (NULL != fObjectList.head()) {
77        fObjectList.head()->release();
78    }
79
80    SkASSERT(NULL == fQuadIndexBuffer || fQuadIndexBuffer->wasDestroyed());
81    SkSafeSetNull(fQuadIndexBuffer);
82    delete fVertexPool;
83    fVertexPool = NULL;
84    delete fIndexPool;
85    fIndexPool = NULL;
86}
87
88void GrGpu::insertObject(GrGpuObject* object) {
89    SkASSERT(NULL != object);
90    SkASSERT(this == object->getGpu());
91
92    fObjectList.addToHead(object);
93}
94
95void GrGpu::removeObject(GrGpuObject* object) {
96    SkASSERT(NULL != object);
97    SkASSERT(this == object->getGpu());
98
99    fObjectList.remove(object);
100}
101
102
103void GrGpu::unimpl(const char msg[]) {
104#ifdef SK_DEBUG
105    GrPrintf("--- GrGpu unimplemented(\"%s\")\n", msg);
106#endif
107}
108
109////////////////////////////////////////////////////////////////////////////////
110
111GrTexture* GrGpu::createTexture(const GrTextureDesc& desc,
112                                const void* srcData, size_t rowBytes) {
113    if (!this->caps()->isConfigTexturable(desc.fConfig)) {
114        return NULL;
115    }
116
117    if ((desc.fFlags & kRenderTarget_GrTextureFlagBit) &&
118        !this->caps()->isConfigRenderable(desc.fConfig, desc.fSampleCnt > 0)) {
119        return NULL;
120    }
121
122    GrTexture *tex = NULL;
123    if (GrPixelConfigIsCompressed(desc.fConfig)) {
124        // We shouldn't be rendering into this
125        SkASSERT((desc.fFlags & kRenderTarget_GrTextureFlagBit) == 0);
126
127        if (!this->caps()->npotTextureTileSupport() &&
128            (!SkIsPow2(desc.fWidth) || !SkIsPow2(desc.fHeight))) {
129            return NULL;
130        }
131
132        this->handleDirtyContext();
133        tex = this->onCreateCompressedTexture(desc, srcData);
134    } else {
135        this->handleDirtyContext();
136        tex = this->onCreateTexture(desc, srcData, rowBytes);
137        if (NULL != tex &&
138            (kRenderTarget_GrTextureFlagBit & desc.fFlags) &&
139            !(kNoStencil_GrTextureFlagBit & desc.fFlags)) {
140            SkASSERT(NULL != tex->asRenderTarget());
141            // TODO: defer this and attach dynamically
142            if (!this->attachStencilBufferToRenderTarget(tex->asRenderTarget())) {
143                tex->unref();
144                return NULL;
145            }
146        }
147    }
148    return tex;
149}
150
151bool GrGpu::attachStencilBufferToRenderTarget(GrRenderTarget* rt) {
152    SkASSERT(NULL == rt->getStencilBuffer());
153    GrStencilBuffer* sb =
154        this->getContext()->findStencilBuffer(rt->width(),
155                                              rt->height(),
156                                              rt->numSamples());
157    if (NULL != sb) {
158        rt->setStencilBuffer(sb);
159        bool attached = this->attachStencilBufferToRenderTarget(sb, rt);
160        if (!attached) {
161            rt->setStencilBuffer(NULL);
162        }
163        return attached;
164    }
165    if (this->createStencilBufferForRenderTarget(rt,
166                                                 rt->width(), rt->height())) {
167        // Right now we're clearing the stencil buffer here after it is
168        // attached to an RT for the first time. When we start matching
169        // stencil buffers with smaller color targets this will no longer
170        // be correct because it won't be guaranteed to clear the entire
171        // sb.
172        // We used to clear down in the GL subclass using a special purpose
173        // FBO. But iOS doesn't allow a stencil-only FBO. It reports unsupported
174        // FBO status.
175        GrDrawState::AutoRenderTargetRestore artr(this->drawState(), rt);
176        this->clearStencil();
177        return true;
178    } else {
179        return false;
180    }
181}
182
183GrTexture* GrGpu::wrapBackendTexture(const GrBackendTextureDesc& desc) {
184    this->handleDirtyContext();
185    GrTexture* tex = this->onWrapBackendTexture(desc);
186    if (NULL == tex) {
187        return NULL;
188    }
189    // TODO: defer this and attach dynamically
190    GrRenderTarget* tgt = tex->asRenderTarget();
191    if (NULL != tgt &&
192        !this->attachStencilBufferToRenderTarget(tgt)) {
193        tex->unref();
194        return NULL;
195    } else {
196        return tex;
197    }
198}
199
200GrRenderTarget* GrGpu::wrapBackendRenderTarget(const GrBackendRenderTargetDesc& desc) {
201    this->handleDirtyContext();
202    return this->onWrapBackendRenderTarget(desc);
203}
204
205GrVertexBuffer* GrGpu::createVertexBuffer(size_t size, bool dynamic) {
206    this->handleDirtyContext();
207    return this->onCreateVertexBuffer(size, dynamic);
208}
209
210GrIndexBuffer* GrGpu::createIndexBuffer(size_t size, bool dynamic) {
211    this->handleDirtyContext();
212    return this->onCreateIndexBuffer(size, dynamic);
213}
214
215GrPath* GrGpu::createPath(const SkPath& path, const SkStrokeRec& stroke) {
216    SkASSERT(this->caps()->pathRenderingSupport());
217    this->handleDirtyContext();
218    return this->onCreatePath(path, stroke);
219}
220
221void GrGpu::clear(const SkIRect* rect,
222                  GrColor color,
223                  bool canIgnoreRect,
224                  GrRenderTarget* renderTarget) {
225    GrDrawState::AutoRenderTargetRestore art;
226    if (NULL != renderTarget) {
227        art.set(this->drawState(), renderTarget);
228    }
229    if (NULL == this->getDrawState().getRenderTarget()) {
230        SkASSERT(0);
231        return;
232    }
233    this->handleDirtyContext();
234    this->onClear(rect, color, canIgnoreRect);
235}
236
237bool GrGpu::readPixels(GrRenderTarget* target,
238                       int left, int top, int width, int height,
239                       GrPixelConfig config, void* buffer,
240                       size_t rowBytes) {
241    this->handleDirtyContext();
242    return this->onReadPixels(target, left, top, width, height,
243                              config, buffer, rowBytes);
244}
245
246bool GrGpu::writeTexturePixels(GrTexture* texture,
247                               int left, int top, int width, int height,
248                               GrPixelConfig config, const void* buffer,
249                               size_t rowBytes) {
250    this->handleDirtyContext();
251    return this->onWriteTexturePixels(texture, left, top, width, height,
252                                      config, buffer, rowBytes);
253}
254
255void GrGpu::resolveRenderTarget(GrRenderTarget* target) {
256    SkASSERT(target);
257    this->handleDirtyContext();
258    this->onResolveRenderTarget(target);
259}
260
261static const GrStencilSettings& winding_path_stencil_settings() {
262    GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings,
263        kIncClamp_StencilOp,
264        kIncClamp_StencilOp,
265        kAlwaysIfInClip_StencilFunc,
266        0xFFFF, 0xFFFF, 0xFFFF);
267    return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings);
268}
269
270static const GrStencilSettings& even_odd_path_stencil_settings() {
271    GR_STATIC_CONST_SAME_STENCIL_STRUCT(gSettings,
272        kInvert_StencilOp,
273        kInvert_StencilOp,
274        kAlwaysIfInClip_StencilFunc,
275        0xFFFF, 0xFFFF, 0xFFFF);
276    return *GR_CONST_STENCIL_SETTINGS_PTR_FROM_STRUCT_PTR(&gSettings);
277}
278
279void GrGpu::getPathStencilSettingsForFillType(SkPath::FillType fill, GrStencilSettings* outStencilSettings) {
280
281    switch (fill) {
282        default:
283            SkFAIL("Unexpected path fill.");
284            /* fallthrough */;
285        case SkPath::kWinding_FillType:
286        case SkPath::kInverseWinding_FillType:
287            *outStencilSettings = winding_path_stencil_settings();
288            break;
289        case SkPath::kEvenOdd_FillType:
290        case SkPath::kInverseEvenOdd_FillType:
291            *outStencilSettings = even_odd_path_stencil_settings();
292            break;
293    }
294    fClipMaskManager.adjustPathStencilParams(outStencilSettings);
295}
296
297
298////////////////////////////////////////////////////////////////////////////////
299
300static const int MAX_QUADS = 1 << 12; // max possible: (1 << 14) - 1;
301
302GR_STATIC_ASSERT(4 * MAX_QUADS <= 65535);
303
304static inline void fill_indices(uint16_t* indices, int quadCount) {
305    for (int i = 0; i < quadCount; ++i) {
306        indices[6 * i + 0] = 4 * i + 0;
307        indices[6 * i + 1] = 4 * i + 1;
308        indices[6 * i + 2] = 4 * i + 2;
309        indices[6 * i + 3] = 4 * i + 0;
310        indices[6 * i + 4] = 4 * i + 2;
311        indices[6 * i + 5] = 4 * i + 3;
312    }
313}
314
315const GrIndexBuffer* GrGpu::getQuadIndexBuffer() const {
316    if (NULL == fQuadIndexBuffer) {
317        static const int SIZE = sizeof(uint16_t) * 6 * MAX_QUADS;
318        GrGpu* me = const_cast<GrGpu*>(this);
319        fQuadIndexBuffer = me->createIndexBuffer(SIZE, false);
320        if (NULL != fQuadIndexBuffer) {
321            uint16_t* indices = (uint16_t*)fQuadIndexBuffer->map();
322            if (NULL != indices) {
323                fill_indices(indices, MAX_QUADS);
324                fQuadIndexBuffer->unmap();
325            } else {
326                indices = (uint16_t*)sk_malloc_throw(SIZE);
327                fill_indices(indices, MAX_QUADS);
328                if (!fQuadIndexBuffer->updateData(indices, SIZE)) {
329                    fQuadIndexBuffer->unref();
330                    fQuadIndexBuffer = NULL;
331                    SkFAIL("Can't get indices into buffer!");
332                }
333                sk_free(indices);
334            }
335        }
336    }
337
338    return fQuadIndexBuffer;
339}
340
341////////////////////////////////////////////////////////////////////////////////
342
343bool GrGpu::setupClipAndFlushState(DrawType type, const GrDeviceCoordTexture* dstCopy,
344                                   GrDrawState::AutoRestoreEffects* are,
345                                   const SkRect* devBounds) {
346    if (!fClipMaskManager.setupClipping(this->getClip(), are, devBounds)) {
347        return false;
348    }
349
350    if (!this->flushGraphicsState(type, dstCopy)) {
351        return false;
352    }
353
354    return true;
355}
356
357////////////////////////////////////////////////////////////////////////////////
358
359void GrGpu::geometrySourceWillPush() {
360    const GeometrySrcState& geoSrc = this->getGeomSrc();
361    if (kArray_GeometrySrcType == geoSrc.fVertexSrc ||
362        kReserved_GeometrySrcType == geoSrc.fVertexSrc) {
363        this->finalizeReservedVertices();
364    }
365    if (kArray_GeometrySrcType == geoSrc.fIndexSrc ||
366        kReserved_GeometrySrcType == geoSrc.fIndexSrc) {
367        this->finalizeReservedIndices();
368    }
369    GeometryPoolState& newState = fGeomPoolStateStack.push_back();
370#ifdef SK_DEBUG
371    newState.fPoolVertexBuffer = (GrVertexBuffer*)DEBUG_INVAL_BUFFER;
372    newState.fPoolStartVertex = DEBUG_INVAL_START_IDX;
373    newState.fPoolIndexBuffer = (GrIndexBuffer*)DEBUG_INVAL_BUFFER;
374    newState.fPoolStartIndex = DEBUG_INVAL_START_IDX;
375#else
376    (void) newState; // silence compiler warning
377#endif
378}
379
380void GrGpu::geometrySourceWillPop(const GeometrySrcState& restoredState) {
381    // if popping last entry then pops are unbalanced with pushes
382    SkASSERT(fGeomPoolStateStack.count() > 1);
383    fGeomPoolStateStack.pop_back();
384}
385
386void GrGpu::onDraw(const DrawInfo& info) {
387    this->handleDirtyContext();
388    GrDrawState::AutoRestoreEffects are;
389    if (!this->setupClipAndFlushState(PrimTypeToDrawType(info.primitiveType()),
390                                      info.getDstCopy(), &are, info.getDevBounds())) {
391        return;
392    }
393    this->onGpuDraw(info);
394}
395
396void GrGpu::onStencilPath(const GrPath* path, SkPath::FillType fill) {
397    this->handleDirtyContext();
398
399    GrDrawState::AutoRestoreEffects are;
400    if (!this->setupClipAndFlushState(kStencilPath_DrawType, NULL, &are, NULL)) {
401        return;
402    }
403
404    this->onGpuStencilPath(path, fill);
405}
406
407
408void GrGpu::onDrawPath(const GrPath* path, SkPath::FillType fill,
409                       const GrDeviceCoordTexture* dstCopy) {
410    this->handleDirtyContext();
411
412    drawState()->setDefaultVertexAttribs();
413
414    GrDrawState::AutoRestoreEffects are;
415    if (!this->setupClipAndFlushState(kDrawPath_DrawType, dstCopy, &are, NULL)) {
416        return;
417    }
418
419    this->onGpuDrawPath(path, fill);
420}
421
422void GrGpu::onDrawPaths(int pathCount, const GrPath** paths,
423                        const SkMatrix* transforms, SkPath::FillType fill,
424                        SkStrokeRec::Style style,
425                        const GrDeviceCoordTexture* dstCopy) {
426    this->handleDirtyContext();
427
428    drawState()->setDefaultVertexAttribs();
429
430    GrDrawState::AutoRestoreEffects are;
431    if (!this->setupClipAndFlushState(kDrawPaths_DrawType, dstCopy, &are, NULL)) {
432        return;
433    }
434
435    this->onGpuDrawPaths(pathCount, paths, transforms, fill, style);
436}
437
438void GrGpu::finalizeReservedVertices() {
439    SkASSERT(NULL != fVertexPool);
440    fVertexPool->unmap();
441}
442
443void GrGpu::finalizeReservedIndices() {
444    SkASSERT(NULL != fIndexPool);
445    fIndexPool->unmap();
446}
447
448void GrGpu::prepareVertexPool() {
449    if (NULL == fVertexPool) {
450        SkASSERT(0 == fVertexPoolUseCnt);
451        fVertexPool = SkNEW_ARGS(GrVertexBufferAllocPool, (this, true,
452                                                  VERTEX_POOL_VB_SIZE,
453                                                  VERTEX_POOL_VB_COUNT));
454        fVertexPool->releaseGpuRef();
455    } else if (!fVertexPoolUseCnt) {
456        // the client doesn't have valid data in the pool
457        fVertexPool->reset();
458    }
459}
460
461void GrGpu::prepareIndexPool() {
462    if (NULL == fIndexPool) {
463        SkASSERT(0 == fIndexPoolUseCnt);
464        fIndexPool = SkNEW_ARGS(GrIndexBufferAllocPool, (this, true,
465                                                INDEX_POOL_IB_SIZE,
466                                                INDEX_POOL_IB_COUNT));
467        fIndexPool->releaseGpuRef();
468    } else if (!fIndexPoolUseCnt) {
469        // the client doesn't have valid data in the pool
470        fIndexPool->reset();
471    }
472}
473
474bool GrGpu::onReserveVertexSpace(size_t vertexSize,
475                                 int vertexCount,
476                                 void** vertices) {
477    GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
478
479    SkASSERT(vertexCount > 0);
480    SkASSERT(NULL != vertices);
481
482    this->prepareVertexPool();
483
484    *vertices = fVertexPool->makeSpace(vertexSize,
485                                       vertexCount,
486                                       &geomPoolState.fPoolVertexBuffer,
487                                       &geomPoolState.fPoolStartVertex);
488    if (NULL == *vertices) {
489        return false;
490    }
491    ++fVertexPoolUseCnt;
492    return true;
493}
494
495bool GrGpu::onReserveIndexSpace(int indexCount, void** indices) {
496    GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
497
498    SkASSERT(indexCount > 0);
499    SkASSERT(NULL != indices);
500
501    this->prepareIndexPool();
502
503    *indices = fIndexPool->makeSpace(indexCount,
504                                     &geomPoolState.fPoolIndexBuffer,
505                                     &geomPoolState.fPoolStartIndex);
506    if (NULL == *indices) {
507        return false;
508    }
509    ++fIndexPoolUseCnt;
510    return true;
511}
512
513void GrGpu::releaseReservedVertexSpace() {
514    const GeometrySrcState& geoSrc = this->getGeomSrc();
515    SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc);
516    size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize;
517    fVertexPool->putBack(bytes);
518    --fVertexPoolUseCnt;
519}
520
521void GrGpu::releaseReservedIndexSpace() {
522    const GeometrySrcState& geoSrc = this->getGeomSrc();
523    SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc);
524    size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
525    fIndexPool->putBack(bytes);
526    --fIndexPoolUseCnt;
527}
528
529void GrGpu::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) {
530    this->prepareVertexPool();
531    GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
532#ifdef SK_DEBUG
533    bool success =
534#endif
535    fVertexPool->appendVertices(this->getVertexSize(),
536                                vertexCount,
537                                vertexArray,
538                                &geomPoolState.fPoolVertexBuffer,
539                                &geomPoolState.fPoolStartVertex);
540    ++fVertexPoolUseCnt;
541    GR_DEBUGASSERT(success);
542}
543
544void GrGpu::onSetIndexSourceToArray(const void* indexArray, int indexCount) {
545    this->prepareIndexPool();
546    GeometryPoolState& geomPoolState = fGeomPoolStateStack.back();
547#ifdef SK_DEBUG
548    bool success =
549#endif
550    fIndexPool->appendIndices(indexCount,
551                              indexArray,
552                              &geomPoolState.fPoolIndexBuffer,
553                              &geomPoolState.fPoolStartIndex);
554    ++fIndexPoolUseCnt;
555    GR_DEBUGASSERT(success);
556}
557
558void GrGpu::releaseVertexArray() {
559    // if vertex source was array, we stowed data in the pool
560    const GeometrySrcState& geoSrc = this->getGeomSrc();
561    SkASSERT(kArray_GeometrySrcType == geoSrc.fVertexSrc);
562    size_t bytes = geoSrc.fVertexCount * geoSrc.fVertexSize;
563    fVertexPool->putBack(bytes);
564    --fVertexPoolUseCnt;
565}
566
567void GrGpu::releaseIndexArray() {
568    // if index source was array, we stowed data in the pool
569    const GeometrySrcState& geoSrc = this->getGeomSrc();
570    SkASSERT(kArray_GeometrySrcType == geoSrc.fIndexSrc);
571    size_t bytes = geoSrc.fIndexCount * sizeof(uint16_t);
572    fIndexPool->putBack(bytes);
573    --fIndexPoolUseCnt;
574}
575