1/*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#include "GrInOrderDrawBuffer.h"
9
10#include "GrBufferAllocPool.h"
11#include "GrDrawTargetCaps.h"
12#include "GrTextStrike.h"
13#include "GrGpu.h"
14#include "GrTemplates.h"
15#include "GrTexture.h"
16
17GrInOrderDrawBuffer::GrInOrderDrawBuffer(GrGpu* gpu,
18                                         GrVertexBufferAllocPool* vertexPool,
19                                         GrIndexBufferAllocPool* indexPool)
20    : GrDrawTarget(gpu->getContext())
21    , fDstGpu(gpu)
22    , fClipSet(true)
23    , fClipProxyState(kUnknown_ClipProxyState)
24    , fVertexPool(*vertexPool)
25    , fIndexPool(*indexPool)
26    , fFlushing(false)
27    , fDrawID(0) {
28
29    fDstGpu->ref();
30    fCaps.reset(SkRef(fDstGpu->caps()));
31
32    SkASSERT(vertexPool);
33    SkASSERT(indexPool);
34
35    GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
36    poolState.fUsedPoolVertexBytes = 0;
37    poolState.fUsedPoolIndexBytes = 0;
38#ifdef SK_DEBUG
39    poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
40    poolState.fPoolStartVertex = ~0;
41    poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
42    poolState.fPoolStartIndex = ~0;
43#endif
44    this->reset();
45}
46
47GrInOrderDrawBuffer::~GrInOrderDrawBuffer() {
48    this->reset();
49    // This must be called by before the GrDrawTarget destructor
50    this->releaseGeometry();
51    fDstGpu->unref();
52}
53
54////////////////////////////////////////////////////////////////////////////////
55
56namespace {
57void get_vertex_bounds(const void* vertices,
58                       size_t vertexSize,
59                       int vertexCount,
60                       SkRect* bounds) {
61    SkASSERT(vertexSize >= sizeof(SkPoint));
62    SkASSERT(vertexCount > 0);
63    const SkPoint* point = static_cast<const SkPoint*>(vertices);
64    bounds->fLeft = bounds->fRight = point->fX;
65    bounds->fTop = bounds->fBottom = point->fY;
66    for (int i = 1; i < vertexCount; ++i) {
67        point = reinterpret_cast<SkPoint*>(reinterpret_cast<intptr_t>(point) + vertexSize);
68        bounds->growToInclude(point->fX, point->fY);
69    }
70}
71}
72
73
74namespace {
75
76extern const GrVertexAttrib kRectAttribs[] = {
77    {kVec2f_GrVertexAttribType,  0,                               kPosition_GrVertexAttribBinding},
78    {kVec4ub_GrVertexAttribType, sizeof(SkPoint),                 kColor_GrVertexAttribBinding},
79    {kVec2f_GrVertexAttribType,  sizeof(SkPoint)+sizeof(GrColor), kLocalCoord_GrVertexAttribBinding},
80};
81}
82
83/** We always use per-vertex colors so that rects can be batched across color changes. Sometimes we
84    have explicit local coords and sometimes not. We *could* always provide explicit local coords
85    and just duplicate the positions when the caller hasn't provided a local coord rect, but we
86    haven't seen a use case which frequently switches between local rect and no local rect draws.
87
88    The color param is used to determine whether the opaque hint can be set on the draw state.
89    The caller must populate the vertex colors itself.
90
91    The vertex attrib order is always pos, color, [local coords].
92 */
93static void set_vertex_attributes(GrDrawState* drawState, bool hasLocalCoords, GrColor color) {
94    if (hasLocalCoords) {
95        drawState->setVertexAttribs<kRectAttribs>(3, 2 * sizeof(SkPoint) + sizeof(SkColor));
96    } else {
97        drawState->setVertexAttribs<kRectAttribs>(2, sizeof(SkPoint) + sizeof(SkColor));
98    }
99    if (0xFF == GrColorUnpackA(color)) {
100        drawState->setHint(GrDrawState::kVertexColorsAreOpaque_Hint, true);
101    }
102}
103
104enum {
105    kTraceCmdBit = 0x80,
106    kCmdMask = 0x7f,
107};
108
109static inline uint8_t add_trace_bit(uint8_t cmd) { return cmd | kTraceCmdBit; }
110
111static inline uint8_t strip_trace_bit(uint8_t cmd) { return cmd & kCmdMask; }
112
113static inline bool cmd_has_trace_marker(uint8_t cmd) { return SkToBool(cmd & kTraceCmdBit); }
114
115void GrInOrderDrawBuffer::onDrawRect(const SkRect& rect,
116                                     const SkRect* localRect,
117                                     const SkMatrix* localMatrix) {
118    GrDrawState* drawState = this->drawState();
119
120    GrColor color = drawState->getColor();
121
122    set_vertex_attributes(drawState, SkToBool(localRect),  color);
123
124    AutoReleaseGeometry geo(this, 4, 0);
125    if (!geo.succeeded()) {
126        GrPrintf("Failed to get space for vertices!\n");
127        return;
128    }
129
130    // Go to device coords to allow batching across matrix changes
131    SkMatrix matrix = drawState->getViewMatrix();
132
133    // When the caller has provided an explicit source rect for a stage then we don't want to
134    // modify that stage's matrix. Otherwise if the effect is generating its source rect from
135    // the vertex positions then we have to account for the view matrix change.
136    GrDrawState::AutoViewMatrixRestore avmr;
137    if (!avmr.setIdentity(drawState)) {
138        return;
139    }
140
141    size_t vstride = drawState->getVertexStride();
142
143    geo.positions()->setRectFan(rect.fLeft, rect.fTop, rect.fRight, rect.fBottom, vstride);
144    matrix.mapPointsWithStride(geo.positions(), vstride, 4);
145
146    SkRect devBounds;
147    // since we already computed the dev verts, set the bounds hint. This will help us avoid
148    // unnecessary clipping in our onDraw().
149    get_vertex_bounds(geo.vertices(), vstride, 4, &devBounds);
150
151    if (localRect) {
152        static const int kLocalOffset = sizeof(SkPoint) + sizeof(GrColor);
153        SkPoint* coords = GrTCast<SkPoint*>(GrTCast<intptr_t>(geo.vertices()) + kLocalOffset);
154        coords->setRectFan(localRect->fLeft, localRect->fTop,
155                           localRect->fRight, localRect->fBottom,
156                           vstride);
157        if (localMatrix) {
158            localMatrix->mapPointsWithStride(coords, vstride, 4);
159        }
160    }
161
162    static const int kColorOffset = sizeof(SkPoint);
163    GrColor* vertColor = GrTCast<GrColor*>(GrTCast<intptr_t>(geo.vertices()) + kColorOffset);
164    for (int i = 0; i < 4; ++i) {
165        *vertColor = color;
166        vertColor = (GrColor*) ((intptr_t) vertColor + vstride);
167    }
168
169    this->setIndexSourceToBuffer(this->getContext()->getQuadIndexBuffer());
170    this->drawIndexedInstances(kTriangles_GrPrimitiveType, 1, 4, 6, &devBounds);
171
172    // to ensure that stashing the drawState ptr is valid
173    SkASSERT(this->drawState() == drawState);
174}
175
176bool GrInOrderDrawBuffer::quickInsideClip(const SkRect& devBounds) {
177    if (!this->getDrawState().isClipState()) {
178        return true;
179    }
180    if (kUnknown_ClipProxyState == fClipProxyState) {
181        SkIRect rect;
182        bool iior;
183        this->getClip()->getConservativeBounds(this->getDrawState().getRenderTarget(), &rect, &iior);
184        if (iior) {
185            // The clip is a rect. We will remember that in fProxyClip. It is common for an edge (or
186            // all edges) of the clip to be at the edge of the RT. However, we get that clipping for
187            // free via the viewport. We don't want to think that clipping must be enabled in this
188            // case. So we extend the clip outward from the edge to avoid these false negatives.
189            fClipProxyState = kValid_ClipProxyState;
190            fClipProxy = SkRect::Make(rect);
191
192            if (fClipProxy.fLeft <= 0) {
193                fClipProxy.fLeft = SK_ScalarMin;
194            }
195            if (fClipProxy.fTop <= 0) {
196                fClipProxy.fTop = SK_ScalarMin;
197            }
198            if (fClipProxy.fRight >= this->getDrawState().getRenderTarget()->width()) {
199                fClipProxy.fRight = SK_ScalarMax;
200            }
201            if (fClipProxy.fBottom >= this->getDrawState().getRenderTarget()->height()) {
202                fClipProxy.fBottom = SK_ScalarMax;
203            }
204        } else {
205            fClipProxyState = kInvalid_ClipProxyState;
206        }
207    }
208    if (kValid_ClipProxyState == fClipProxyState) {
209        return fClipProxy.contains(devBounds);
210    }
211    SkPoint originOffset = {SkIntToScalar(this->getClip()->fOrigin.fX),
212                            SkIntToScalar(this->getClip()->fOrigin.fY)};
213    SkRect clipSpaceBounds = devBounds;
214    clipSpaceBounds.offset(originOffset);
215    return this->getClip()->fClipStack->quickContains(clipSpaceBounds);
216}
217
218int GrInOrderDrawBuffer::concatInstancedDraw(const DrawInfo& info) {
219    SkASSERT(info.isInstanced());
220
221    const GeometrySrcState& geomSrc = this->getGeomSrc();
222    const GrDrawState& drawState = this->getDrawState();
223
224    // we only attempt to concat the case when reserved verts are used with a client-specified index
225    // buffer. To make this work with client-specified VBs we'd need to know if the VB was updated
226    // between draws.
227    if (kReserved_GeometrySrcType != geomSrc.fVertexSrc ||
228        kBuffer_GeometrySrcType != geomSrc.fIndexSrc) {
229        return 0;
230    }
231    // Check if there is a draw info that is compatible that uses the same VB from the pool and
232    // the same IB
233    if (kDraw_Cmd != strip_trace_bit(fCmds.back())) {
234        return 0;
235    }
236
237    Draw* draw = &fDraws.back();
238    GeometryPoolState& poolState = fGeoPoolStateStack.back();
239    const GrVertexBuffer* vertexBuffer = poolState.fPoolVertexBuffer;
240
241    if (!draw->isInstanced() ||
242        draw->verticesPerInstance() != info.verticesPerInstance() ||
243        draw->indicesPerInstance() != info.indicesPerInstance() ||
244        draw->vertexBuffer() != vertexBuffer ||
245        draw->indexBuffer() != geomSrc.fIndexBuffer) {
246        return 0;
247    }
248    // info does not yet account for the offset from the start of the pool's VB while the previous
249    // draw record does.
250    int adjustedStartVertex = poolState.fPoolStartVertex + info.startVertex();
251    if (draw->startVertex() + draw->vertexCount() != adjustedStartVertex) {
252        return 0;
253    }
254
255    SkASSERT(poolState.fPoolStartVertex == draw->startVertex() + draw->vertexCount());
256
257    // how many instances can be concat'ed onto draw given the size of the index buffer
258    int instancesToConcat = this->indexCountInCurrentSource() / info.indicesPerInstance();
259    instancesToConcat -= draw->instanceCount();
260    instancesToConcat = SkTMin(instancesToConcat, info.instanceCount());
261
262    // update the amount of reserved vertex data actually referenced in draws
263    size_t vertexBytes = instancesToConcat * info.verticesPerInstance() *
264                         drawState.getVertexStride();
265    poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, vertexBytes);
266
267    draw->adjustInstanceCount(instancesToConcat);
268
269    // update last fGpuCmdMarkers to include any additional trace markers that have been added
270    if (this->getActiveTraceMarkers().count() > 0) {
271        if (cmd_has_trace_marker(fCmds.back())) {
272            fGpuCmdMarkers.back().addSet(this->getActiveTraceMarkers());
273        } else {
274            fGpuCmdMarkers.push_back(this->getActiveTraceMarkers());
275            fCmds.back() = add_trace_bit(fCmds.back());
276        }
277    }
278
279    return instancesToConcat;
280}
281
282class AutoClipReenable {
283public:
284    AutoClipReenable() : fDrawState(NULL) {}
285    ~AutoClipReenable() {
286        if (fDrawState) {
287            fDrawState->enableState(GrDrawState::kClip_StateBit);
288        }
289    }
290    void set(GrDrawState* drawState) {
291        if (drawState->isClipState()) {
292            fDrawState = drawState;
293            drawState->disableState(GrDrawState::kClip_StateBit);
294        }
295    }
296private:
297    GrDrawState*    fDrawState;
298};
299
300void GrInOrderDrawBuffer::onDraw(const DrawInfo& info) {
301
302    GeometryPoolState& poolState = fGeoPoolStateStack.back();
303    const GrDrawState& drawState = this->getDrawState();
304    AutoClipReenable acr;
305
306    if (drawState.isClipState() &&
307        info.getDevBounds() &&
308        this->quickInsideClip(*info.getDevBounds())) {
309        acr.set(this->drawState());
310    }
311
312    if (this->needsNewClip()) {
313       this->recordClip();
314    }
315    this->recordStateIfNecessary();
316
317    const GrVertexBuffer* vb;
318    if (kBuffer_GeometrySrcType == this->getGeomSrc().fVertexSrc) {
319        vb = this->getGeomSrc().fVertexBuffer;
320    } else {
321        vb = poolState.fPoolVertexBuffer;
322    }
323
324    const GrIndexBuffer* ib = NULL;
325    if (info.isIndexed()) {
326        if (kBuffer_GeometrySrcType == this->getGeomSrc().fIndexSrc) {
327            ib = this->getGeomSrc().fIndexBuffer;
328        } else {
329            ib = poolState.fPoolIndexBuffer;
330        }
331    }
332
333    Draw* draw;
334    if (info.isInstanced()) {
335        int instancesConcated = this->concatInstancedDraw(info);
336        if (info.instanceCount() > instancesConcated) {
337            draw = this->recordDraw(info, vb, ib);
338            draw->adjustInstanceCount(-instancesConcated);
339        } else {
340            return;
341        }
342    } else {
343        draw = this->recordDraw(info, vb, ib);
344    }
345
346    // Adjust the starting vertex and index when we are using reserved or array sources to
347    // compensate for the fact that the data was inserted into a larger vb/ib owned by the pool.
348    if (kBuffer_GeometrySrcType != this->getGeomSrc().fVertexSrc) {
349        size_t bytes = (info.vertexCount() + info.startVertex()) * drawState.getVertexStride();
350        poolState.fUsedPoolVertexBytes = SkTMax(poolState.fUsedPoolVertexBytes, bytes);
351        draw->adjustStartVertex(poolState.fPoolStartVertex);
352    }
353
354    if (info.isIndexed() && kBuffer_GeometrySrcType != this->getGeomSrc().fIndexSrc) {
355        size_t bytes = (info.indexCount() + info.startIndex()) * sizeof(uint16_t);
356        poolState.fUsedPoolIndexBytes = SkTMax(poolState.fUsedPoolIndexBytes, bytes);
357        draw->adjustStartIndex(poolState.fPoolStartIndex);
358    }
359}
360
361void GrInOrderDrawBuffer::onStencilPath(const GrPath* path, SkPath::FillType fill) {
362    if (this->needsNewClip()) {
363        this->recordClip();
364    }
365    // Only compare the subset of GrDrawState relevant to path stenciling?
366    this->recordStateIfNecessary();
367    StencilPath* sp = this->recordStencilPath(path);
368    sp->fFill = fill;
369}
370
371void GrInOrderDrawBuffer::onDrawPath(const GrPath* path,
372                                     SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
373    if (this->needsNewClip()) {
374        this->recordClip();
375    }
376    // TODO: Only compare the subset of GrDrawState relevant to path covering?
377    this->recordStateIfNecessary();
378    DrawPath* cp = this->recordDrawPath(path);
379    cp->fFill = fill;
380    if (dstCopy) {
381        cp->fDstCopy = *dstCopy;
382    }
383}
384
385void GrInOrderDrawBuffer::onDrawPaths(const GrPathRange* pathRange,
386                                      const uint32_t indices[], int count,
387                                      const float transforms[], PathTransformType transformsType,
388                                      SkPath::FillType fill, const GrDeviceCoordTexture* dstCopy) {
389    SkASSERT(pathRange);
390    SkASSERT(indices);
391    SkASSERT(transforms);
392
393    if (this->needsNewClip()) {
394        this->recordClip();
395    }
396    this->recordStateIfNecessary();
397    DrawPaths* dp = this->recordDrawPaths(pathRange);
398    dp->fIndices = SkNEW_ARRAY(uint32_t, count); // TODO: Accomplish this without a malloc
399    memcpy(dp->fIndices, indices, sizeof(uint32_t) * count);
400    dp->fCount = count;
401
402    const int transformsLength = GrPathRendering::PathTransformSize(transformsType) * count;
403    dp->fTransforms = SkNEW_ARRAY(float, transformsLength);
404    memcpy(dp->fTransforms, transforms, sizeof(float) * transformsLength);
405    dp->fTransformsType = transformsType;
406
407    dp->fFill = fill;
408
409    if (dstCopy) {
410        dp->fDstCopy = *dstCopy;
411    }
412}
413
414void GrInOrderDrawBuffer::clear(const SkIRect* rect, GrColor color,
415                                bool canIgnoreRect, GrRenderTarget* renderTarget) {
416    SkIRect r;
417    if (NULL == renderTarget) {
418        renderTarget = this->drawState()->getRenderTarget();
419        SkASSERT(renderTarget);
420    }
421    if (NULL == rect) {
422        // We could do something smart and remove previous draws and clears to
423        // the current render target. If we get that smart we have to make sure
424        // those draws aren't read before this clear (render-to-texture).
425        r.setLTRB(0, 0, renderTarget->width(), renderTarget->height());
426        rect = &r;
427    }
428    Clear* clr = this->recordClear(renderTarget);
429    GrColorIsPMAssert(color);
430    clr->fColor = color;
431    clr->fRect = *rect;
432    clr->fCanIgnoreRect = canIgnoreRect;
433}
434
435void GrInOrderDrawBuffer::discard(GrRenderTarget* renderTarget) {
436    if (!this->caps()->discardRenderTargetSupport()) {
437        return;
438    }
439    if (NULL == renderTarget) {
440        renderTarget = this->drawState()->getRenderTarget();
441        SkASSERT(renderTarget);
442    }
443    Clear* clr = this->recordClear(renderTarget);
444    clr->fColor = GrColor_ILLEGAL;
445}
446
447void GrInOrderDrawBuffer::reset() {
448    SkASSERT(1 == fGeoPoolStateStack.count());
449    this->resetVertexSource();
450    this->resetIndexSource();
451
452    fCmds.reset();
453    fDraws.reset();
454    fStencilPaths.reset();
455    fDrawPath.reset();
456    fDrawPaths.reset();
457    fStates.reset();
458    fClears.reset();
459    fVertexPool.reset();
460    fIndexPool.reset();
461    fClips.reset();
462    fCopySurfaces.reset();
463    fGpuCmdMarkers.reset();
464    fClipSet = true;
465}
466
467void GrInOrderDrawBuffer::flush() {
468    if (fFlushing) {
469        return;
470    }
471
472    this->getContext()->getFontCache()->updateTextures();
473
474    SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fVertexSrc);
475    SkASSERT(kReserved_GeometrySrcType != this->getGeomSrc().fIndexSrc);
476
477    int numCmds = fCmds.count();
478    if (0 == numCmds) {
479        return;
480    }
481
482    GrAutoTRestore<bool> flushRestore(&fFlushing);
483    fFlushing = true;
484
485    fVertexPool.unmap();
486    fIndexPool.unmap();
487
488    GrDrawTarget::AutoClipRestore acr(fDstGpu);
489    AutoGeometryAndStatePush agasp(fDstGpu, kPreserve_ASRInit);
490
491    GrDrawState* prevDrawState = SkRef(fDstGpu->drawState());
492
493    GrClipData clipData;
494
495    StateAllocator::Iter stateIter(&fStates);
496    ClipAllocator::Iter clipIter(&fClips);
497    ClearAllocator::Iter clearIter(&fClears);
498    DrawAllocator::Iter drawIter(&fDraws);
499    StencilPathAllocator::Iter stencilPathIter(&fStencilPaths);
500    DrawPathAllocator::Iter drawPathIter(&fDrawPath);
501    DrawPathsAllocator::Iter drawPathsIter(&fDrawPaths);
502    CopySurfaceAllocator::Iter copySurfaceIter(&fCopySurfaces);
503
504    int currCmdMarker   = 0;
505
506    fDstGpu->saveActiveTraceMarkers();
507    for (int c = 0; c < numCmds; ++c) {
508        GrGpuTraceMarker newMarker("", -1);
509        SkString traceString;
510        if (cmd_has_trace_marker(fCmds[c])) {
511            traceString = fGpuCmdMarkers[currCmdMarker].toString();
512            newMarker.fMarker = traceString.c_str();
513            fDstGpu->addGpuTraceMarker(&newMarker);
514            ++currCmdMarker;
515        }
516        switch (strip_trace_bit(fCmds[c])) {
517            case kDraw_Cmd: {
518                SkASSERT(fDstGpu->drawState() != prevDrawState);
519                SkAssertResult(drawIter.next());
520                fDstGpu->setVertexSourceToBuffer(drawIter->vertexBuffer());
521                if (drawIter->isIndexed()) {
522                    fDstGpu->setIndexSourceToBuffer(drawIter->indexBuffer());
523                }
524                fDstGpu->executeDraw(*drawIter);
525                break;
526            }
527            case kStencilPath_Cmd: {
528                SkASSERT(fDstGpu->drawState() != prevDrawState);
529                SkAssertResult(stencilPathIter.next());
530                fDstGpu->stencilPath(stencilPathIter->path(), stencilPathIter->fFill);
531                break;
532            }
533            case kDrawPath_Cmd: {
534                SkASSERT(fDstGpu->drawState() != prevDrawState);
535                SkAssertResult(drawPathIter.next());
536                fDstGpu->executeDrawPath(drawPathIter->path(), drawPathIter->fFill,
537                                         drawPathIter->fDstCopy.texture() ?
538                                            &drawPathIter->fDstCopy :
539                                            NULL);
540                break;
541            }
542            case kDrawPaths_Cmd: {
543                SkASSERT(fDstGpu->drawState() != prevDrawState);
544                SkAssertResult(drawPathsIter.next());
545                const GrDeviceCoordTexture* dstCopy =
546                    drawPathsIter->fDstCopy.texture() ? &drawPathsIter->fDstCopy : NULL;
547                fDstGpu->executeDrawPaths(drawPathsIter->pathRange(),
548                                          drawPathsIter->fIndices,
549                                          drawPathsIter->fCount,
550                                          drawPathsIter->fTransforms,
551                                          drawPathsIter->fTransformsType,
552                                          drawPathsIter->fFill,
553                                          dstCopy);
554                break;
555            }
556            case kSetState_Cmd:
557                SkAssertResult(stateIter.next());
558                fDstGpu->setDrawState(stateIter.get());
559                break;
560            case kSetClip_Cmd:
561                SkAssertResult(clipIter.next());
562                clipData.fClipStack = &clipIter->fStack;
563                clipData.fOrigin = clipIter->fOrigin;
564                fDstGpu->setClip(&clipData);
565                break;
566            case kClear_Cmd:
567                SkAssertResult(clearIter.next());
568                if (GrColor_ILLEGAL == clearIter->fColor) {
569                    fDstGpu->discard(clearIter->renderTarget());
570                } else {
571                    fDstGpu->clear(&clearIter->fRect,
572                                   clearIter->fColor,
573                                   clearIter->fCanIgnoreRect,
574                                   clearIter->renderTarget());
575                }
576                break;
577            case kCopySurface_Cmd:
578                SkAssertResult(copySurfaceIter.next());
579                fDstGpu->copySurface(copySurfaceIter->dst(),
580                                     copySurfaceIter->src(),
581                                     copySurfaceIter->fSrcRect,
582                                     copySurfaceIter->fDstPoint);
583                break;
584        }
585        if (cmd_has_trace_marker(fCmds[c])) {
586            fDstGpu->removeGpuTraceMarker(&newMarker);
587        }
588    }
589    fDstGpu->restoreActiveTraceMarkers();
590    // we should have consumed all the states, clips, etc.
591    SkASSERT(!stateIter.next());
592    SkASSERT(!clipIter.next());
593    SkASSERT(!clearIter.next());
594    SkASSERT(!drawIter.next());
595    SkASSERT(!copySurfaceIter.next());
596    SkASSERT(!stencilPathIter.next());
597    SkASSERT(!drawPathIter.next());
598    SkASSERT(!drawPathsIter.next());
599
600    SkASSERT(fGpuCmdMarkers.count() == currCmdMarker);
601
602    fDstGpu->setDrawState(prevDrawState);
603    prevDrawState->unref();
604    this->reset();
605    ++fDrawID;
606}
607
608bool GrInOrderDrawBuffer::onCopySurface(GrSurface* dst,
609                                        GrSurface* src,
610                                        const SkIRect& srcRect,
611                                        const SkIPoint& dstPoint) {
612    if (fDstGpu->canCopySurface(dst, src, srcRect, dstPoint)) {
613        CopySurface* cs = this->recordCopySurface(dst, src);
614        cs->fSrcRect = srcRect;
615        cs->fDstPoint = dstPoint;
616        return true;
617    } else {
618        return false;
619    }
620}
621
622bool GrInOrderDrawBuffer::onCanCopySurface(GrSurface* dst,
623                                           GrSurface* src,
624                                           const SkIRect& srcRect,
625                                           const SkIPoint& dstPoint) {
626    return fDstGpu->canCopySurface(dst, src, srcRect, dstPoint);
627}
628
629void GrInOrderDrawBuffer::initCopySurfaceDstDesc(const GrSurface* src, GrTextureDesc* desc) {
630    fDstGpu->initCopySurfaceDstDesc(src, desc);
631}
632
633void GrInOrderDrawBuffer::willReserveVertexAndIndexSpace(int vertexCount,
634                                                         int indexCount) {
635    // We use geometryHints() to know whether to flush the draw buffer. We
636    // can't flush if we are inside an unbalanced pushGeometrySource.
637    // Moreover, flushing blows away vertex and index data that was
638    // previously reserved. So if the vertex or index data is pulled from
639    // reserved space and won't be released by this request then we can't
640    // flush.
641    bool insideGeoPush = fGeoPoolStateStack.count() > 1;
642
643    bool unreleasedVertexSpace =
644        !vertexCount &&
645        kReserved_GeometrySrcType == this->getGeomSrc().fVertexSrc;
646
647    bool unreleasedIndexSpace =
648        !indexCount &&
649        kReserved_GeometrySrcType == this->getGeomSrc().fIndexSrc;
650
651    // we don't want to finalize any reserved geom on the target since
652    // we don't know that the client has finished writing to it.
653    bool targetHasReservedGeom = fDstGpu->hasReservedVerticesOrIndices();
654
655    int vcount = vertexCount;
656    int icount = indexCount;
657
658    if (!insideGeoPush &&
659        !unreleasedVertexSpace &&
660        !unreleasedIndexSpace &&
661        !targetHasReservedGeom &&
662        this->geometryHints(&vcount, &icount)) {
663
664        this->flush();
665    }
666}
667
668bool GrInOrderDrawBuffer::geometryHints(int* vertexCount,
669                                        int* indexCount) const {
670    // we will recommend a flush if the data could fit in a single
671    // preallocated buffer but none are left and it can't fit
672    // in the current buffer (which may not be prealloced).
673    bool flush = false;
674    if (indexCount) {
675        int32_t currIndices = fIndexPool.currentBufferIndices();
676        if (*indexCount > currIndices &&
677            (!fIndexPool.preallocatedBuffersRemaining() &&
678             *indexCount <= fIndexPool.preallocatedBufferIndices())) {
679
680            flush = true;
681        }
682        *indexCount = currIndices;
683    }
684    if (vertexCount) {
685        size_t vertexStride = this->getDrawState().getVertexStride();
686        int32_t currVertices = fVertexPool.currentBufferVertices(vertexStride);
687        if (*vertexCount > currVertices &&
688            (!fVertexPool.preallocatedBuffersRemaining() &&
689             *vertexCount <= fVertexPool.preallocatedBufferVertices(vertexStride))) {
690
691            flush = true;
692        }
693        *vertexCount = currVertices;
694    }
695    return flush;
696}
697
698bool GrInOrderDrawBuffer::onReserveVertexSpace(size_t vertexSize,
699                                               int vertexCount,
700                                               void** vertices) {
701    GeometryPoolState& poolState = fGeoPoolStateStack.back();
702    SkASSERT(vertexCount > 0);
703    SkASSERT(vertices);
704    SkASSERT(0 == poolState.fUsedPoolVertexBytes);
705
706    *vertices = fVertexPool.makeSpace(vertexSize,
707                                      vertexCount,
708                                      &poolState.fPoolVertexBuffer,
709                                      &poolState.fPoolStartVertex);
710    return SkToBool(*vertices);
711}
712
713bool GrInOrderDrawBuffer::onReserveIndexSpace(int indexCount, void** indices) {
714    GeometryPoolState& poolState = fGeoPoolStateStack.back();
715    SkASSERT(indexCount > 0);
716    SkASSERT(indices);
717    SkASSERT(0 == poolState.fUsedPoolIndexBytes);
718
719    *indices = fIndexPool.makeSpace(indexCount,
720                                    &poolState.fPoolIndexBuffer,
721                                    &poolState.fPoolStartIndex);
722    return SkToBool(*indices);
723}
724
725void GrInOrderDrawBuffer::releaseReservedVertexSpace() {
726    GeometryPoolState& poolState = fGeoPoolStateStack.back();
727    const GeometrySrcState& geoSrc = this->getGeomSrc();
728
729    // If we get a release vertex space call then our current source should either be reserved
730    // or array (which we copied into reserved space).
731    SkASSERT(kReserved_GeometrySrcType == geoSrc.fVertexSrc ||
732             kArray_GeometrySrcType == geoSrc.fVertexSrc);
733
734    // When the caller reserved vertex buffer space we gave it back a pointer
735    // provided by the vertex buffer pool. At each draw we tracked the largest
736    // offset into the pool's pointer that was referenced. Now we return to the
737    // pool any portion at the tail of the allocation that no draw referenced.
738    size_t reservedVertexBytes = geoSrc.fVertexSize * geoSrc.fVertexCount;
739    fVertexPool.putBack(reservedVertexBytes -
740                        poolState.fUsedPoolVertexBytes);
741    poolState.fUsedPoolVertexBytes = 0;
742    poolState.fPoolVertexBuffer = NULL;
743    poolState.fPoolStartVertex = 0;
744}
745
746void GrInOrderDrawBuffer::releaseReservedIndexSpace() {
747    GeometryPoolState& poolState = fGeoPoolStateStack.back();
748    const GeometrySrcState& geoSrc = this->getGeomSrc();
749
750    // If we get a release index space call then our current source should either be reserved
751    // or array (which we copied into reserved space).
752    SkASSERT(kReserved_GeometrySrcType == geoSrc.fIndexSrc ||
753             kArray_GeometrySrcType == geoSrc.fIndexSrc);
754
755    // Similar to releaseReservedVertexSpace we return any unused portion at
756    // the tail
757    size_t reservedIndexBytes = sizeof(uint16_t) * geoSrc.fIndexCount;
758    fIndexPool.putBack(reservedIndexBytes - poolState.fUsedPoolIndexBytes);
759    poolState.fUsedPoolIndexBytes = 0;
760    poolState.fPoolIndexBuffer = NULL;
761    poolState.fPoolStartIndex = 0;
762}
763
764void GrInOrderDrawBuffer::onSetVertexSourceToArray(const void* vertexArray, int vertexCount) {
765    GeometryPoolState& poolState = fGeoPoolStateStack.back();
766    SkASSERT(0 == poolState.fUsedPoolVertexBytes);
767#ifdef SK_DEBUG
768    bool success =
769#endif
770    fVertexPool.appendVertices(this->getVertexSize(),
771                               vertexCount,
772                               vertexArray,
773                               &poolState.fPoolVertexBuffer,
774                               &poolState.fPoolStartVertex);
775    GR_DEBUGASSERT(success);
776}
777
778void GrInOrderDrawBuffer::onSetIndexSourceToArray(const void* indexArray,
779                                                  int indexCount) {
780    GeometryPoolState& poolState = fGeoPoolStateStack.back();
781    SkASSERT(0 == poolState.fUsedPoolIndexBytes);
782#ifdef SK_DEBUG
783    bool success =
784#endif
785    fIndexPool.appendIndices(indexCount,
786                             indexArray,
787                             &poolState.fPoolIndexBuffer,
788                             &poolState.fPoolStartIndex);
789    GR_DEBUGASSERT(success);
790}
791
792void GrInOrderDrawBuffer::releaseVertexArray() {
793    // When the client provides an array as the vertex source we handled it
794    // by copying their array into reserved space.
795    this->GrInOrderDrawBuffer::releaseReservedVertexSpace();
796}
797
798void GrInOrderDrawBuffer::releaseIndexArray() {
799    // When the client provides an array as the index source we handled it
800    // by copying their array into reserved space.
801    this->GrInOrderDrawBuffer::releaseReservedIndexSpace();
802}
803
804void GrInOrderDrawBuffer::geometrySourceWillPush() {
805    GeometryPoolState& poolState = fGeoPoolStateStack.push_back();
806    poolState.fUsedPoolVertexBytes = 0;
807    poolState.fUsedPoolIndexBytes = 0;
808#ifdef SK_DEBUG
809    poolState.fPoolVertexBuffer = (GrVertexBuffer*)~0;
810    poolState.fPoolStartVertex = ~0;
811    poolState.fPoolIndexBuffer = (GrIndexBuffer*)~0;
812    poolState.fPoolStartIndex = ~0;
813#endif
814}
815
816void GrInOrderDrawBuffer::geometrySourceWillPop(const GeometrySrcState& restoredState) {
817    SkASSERT(fGeoPoolStateStack.count() > 1);
818    fGeoPoolStateStack.pop_back();
819    GeometryPoolState& poolState = fGeoPoolStateStack.back();
820    // we have to assume that any slack we had in our vertex/index data
821    // is now unreleasable because data may have been appended later in the
822    // pool.
823    if (kReserved_GeometrySrcType == restoredState.fVertexSrc ||
824        kArray_GeometrySrcType == restoredState.fVertexSrc) {
825        poolState.fUsedPoolVertexBytes = restoredState.fVertexSize * restoredState.fVertexCount;
826    }
827    if (kReserved_GeometrySrcType == restoredState.fIndexSrc ||
828        kArray_GeometrySrcType == restoredState.fIndexSrc) {
829        poolState.fUsedPoolIndexBytes = sizeof(uint16_t) *
830                                         restoredState.fIndexCount;
831    }
832}
833
834void GrInOrderDrawBuffer::recordStateIfNecessary() {
835    if (fStates.empty()) {
836        this->convertDrawStateToPendingExec(&fStates.push_back(this->getDrawState()));
837        this->addToCmdBuffer(kSetState_Cmd);
838        return;
839    }
840    const GrDrawState& curr = this->getDrawState();
841    GrDrawState& prev = fStates.back();
842    switch (GrDrawState::CombineIfPossible(prev, curr, *this->caps())) {
843        case GrDrawState::kIncompatible_CombinedState:
844            this->convertDrawStateToPendingExec(&fStates.push_back(curr));
845            this->addToCmdBuffer(kSetState_Cmd);
846            break;
847        case GrDrawState::kA_CombinedState:
848        case GrDrawState::kAOrB_CombinedState: // Treat the same as kA.
849            break;
850        case GrDrawState::kB_CombinedState:
851            // prev has already been converted to pending execution. That is a one-way ticket.
852            // So here we just delete prev and push back a new copy of curr. Note that this
853            // goes away when we move GrIODB over to taking optimized snapshots of draw states.
854            fStates.pop_back();
855            this->convertDrawStateToPendingExec(&fStates.push_back(curr));
856            break;
857    }
858}
859
860bool GrInOrderDrawBuffer::needsNewClip() const {
861    if (this->getDrawState().isClipState()) {
862       if (fClipSet &&
863           (fClips.empty() ||
864            fClips.back().fStack != *this->getClip()->fClipStack ||
865            fClips.back().fOrigin != this->getClip()->fOrigin)) {
866           return true;
867       }
868    }
869    return false;
870}
871
872void GrInOrderDrawBuffer::addToCmdBuffer(uint8_t cmd) {
873    SkASSERT(!cmd_has_trace_marker(cmd));
874    const GrTraceMarkerSet& activeTraceMarkers = this->getActiveTraceMarkers();
875    if (activeTraceMarkers.count() > 0) {
876        fCmds.push_back(add_trace_bit(cmd));
877        fGpuCmdMarkers.push_back(activeTraceMarkers);
878    } else {
879        fCmds.push_back(cmd);
880    }
881}
882
883void GrInOrderDrawBuffer::recordClip() {
884    fClips.push_back().fStack = *this->getClip()->fClipStack;
885    fClips.back().fOrigin = this->getClip()->fOrigin;
886    fClipSet = false;
887    this->addToCmdBuffer(kSetClip_Cmd);
888}
889
890GrInOrderDrawBuffer::Draw* GrInOrderDrawBuffer::recordDraw(const DrawInfo& info,
891                                                           const GrVertexBuffer* vb,
892                                                           const GrIndexBuffer* ib) {
893    this->addToCmdBuffer(kDraw_Cmd);
894    return GrNEW_APPEND_TO_ALLOCATOR(&fDraws, Draw, (info, vb, ib));
895}
896
897GrInOrderDrawBuffer::StencilPath* GrInOrderDrawBuffer::recordStencilPath(const GrPath* path) {
898    this->addToCmdBuffer(kStencilPath_Cmd);
899    return GrNEW_APPEND_TO_ALLOCATOR(&fStencilPaths, StencilPath, (path));
900}
901
902GrInOrderDrawBuffer::DrawPath* GrInOrderDrawBuffer::recordDrawPath(const GrPath* path) {
903    this->addToCmdBuffer(kDrawPath_Cmd);
904    return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPath, DrawPath, (path));
905}
906
907GrInOrderDrawBuffer::DrawPaths* GrInOrderDrawBuffer::recordDrawPaths(const GrPathRange* pathRange) {
908    this->addToCmdBuffer(kDrawPaths_Cmd);
909    return GrNEW_APPEND_TO_ALLOCATOR(&fDrawPaths, DrawPaths, (pathRange));
910}
911
912GrInOrderDrawBuffer::Clear* GrInOrderDrawBuffer::recordClear(GrRenderTarget* rt) {
913    this->addToCmdBuffer(kClear_Cmd);
914    return GrNEW_APPEND_TO_ALLOCATOR(&fClears, Clear, (rt));
915}
916
917GrInOrderDrawBuffer::CopySurface* GrInOrderDrawBuffer::recordCopySurface(GrSurface* dst,
918                                                                         GrSurface* src) {
919    this->addToCmdBuffer(kCopySurface_Cmd);
920    return GrNEW_APPEND_TO_ALLOCATOR(&fCopySurfaces, CopySurface, (dst, src));
921}
922
923void GrInOrderDrawBuffer::clipWillBeSet(const GrClipData* newClipData) {
924    INHERITED::clipWillBeSet(newClipData);
925    fClipSet = true;
926    fClipProxyState = kUnknown_ClipProxyState;
927}
928