GrTargetCommands.cpp revision 385e26ef36af65bfe97567657d5b9a3207627a07
1/* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrTargetCommands.h" 9 10#include "GrColor.h" 11#include "GrDefaultGeoProcFactory.h" 12#include "GrInOrderDrawBuffer.h" 13#include "GrTemplates.h" 14#include "SkPoint.h" 15 16void GrTargetCommands::closeBatch() { 17 if (fDrawBatch) { 18 fBatchTarget.resetNumberOfDraws(); 19 fDrawBatch->execute(NULL, fPrevState); 20 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); 21 fDrawBatch = NULL; 22 } 23} 24 25static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) { 26 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face; 27 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); 28 if (isWinding) { 29 // Double check that it is in fact winding. 30 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); 31 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); 32 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); 33 SkASSERT(!pathStencilSettings.isTwoSided()); 34 } 35 return isWinding; 36} 37 38int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, 39 const GrDrawTarget::DrawInfo& info) { 40 SkASSERT(!fCmdBuffer.empty()); 41 SkASSERT(info.isInstanced()); 42 43 const GrIndexBuffer* ib; 44 if (!iodb->canConcatToIndexBuffer(&ib)) { 45 return 0; 46 } 47 48 // Check if there is a draw info that is compatible that uses the same VB from the pool and 49 // the same IB 50 if (Cmd::kDraw_CmdType != fCmdBuffer.back().type()) { 51 return 0; 52 } 53 54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); 55 56 if (!draw->fInfo.isInstanced() || 57 draw->fInfo.primitiveType() != info.primitiveType() || 58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || 59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || 60 draw->fInfo.vertexBuffer() != info.vertexBuffer() || 61 draw->fInfo.indexBuffer() != ib) { 62 return 0; 63 } 64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) { 65 return 0; 66 } 67 68 // how many instances can be concat'ed onto draw given the size of the index buffer 69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance(); 70 instancesToConcat -= draw->fInfo.instanceCount(); 71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); 72 73 draw->fInfo.adjustInstanceCount(instancesToConcat); 74 75 // update last fGpuCmdMarkers to include any additional trace markers that have been added 76 iodb->recordTraceMarkersIfNecessary(draw); 77 return instancesToConcat; 78} 79 80GrTargetCommands::Cmd* GrTargetCommands::recordDraw( 81 GrInOrderDrawBuffer* iodb, 82 const GrGeometryProcessor* gp, 83 const GrDrawTarget::DrawInfo& info, 84 const GrDrawTarget::PipelineInfo& pipelineInfo) { 85 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); 86 this->closeBatch(); 87 88 if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) { 89 return NULL; 90 } 91 92 Draw* draw; 93 if (info.isInstanced()) { 94 int instancesConcated = this->concatInstancedDraw(iodb, info); 95 if (info.instanceCount() > instancesConcated) { 96 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 97 draw->fInfo.adjustInstanceCount(-instancesConcated); 98 } else { 99 return NULL; 100 } 101 } else { 102 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 103 } 104 105 return draw; 106} 107 108GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( 109 GrInOrderDrawBuffer* iodb, 110 GrBatch* batch, 111 const GrDrawTarget::PipelineInfo& pipelineInfo) { 112 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { 113 return NULL; 114 } 115 116 // Check if there is a Batch Draw we can batch with 117 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { 118 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); 119 return fDrawBatch; 120 } 121 122 SkASSERT(&fCmdBuffer.back() == fDrawBatch); 123 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { 124 this->closeBatch(); 125 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); 126 } 127 128 return fDrawBatch; 129} 130 131GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( 132 GrInOrderDrawBuffer* iodb, 133 const GrPipelineBuilder& pipelineBuilder, 134 const GrPathProcessor* pathProc, 135 const GrPath* path, 136 const GrScissorState& scissorState, 137 const GrStencilSettings& stencilSettings) { 138 this->closeBatch(); 139 140 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, 141 (path, pipelineBuilder.getRenderTarget())); 142 143 sp->fScissor = scissorState; 144 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); 145 sp->fViewMatrix = pathProc->viewMatrix(); 146 sp->fStencil = stencilSettings; 147 return sp; 148} 149 150GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( 151 GrInOrderDrawBuffer* iodb, 152 const GrPathProcessor* pathProc, 153 const GrPath* path, 154 const GrStencilSettings& stencilSettings, 155 const GrDrawTarget::PipelineInfo& pipelineInfo) { 156 this->closeBatch(); 157 158 // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering? 159 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { 160 return NULL; 161 } 162 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); 163 dp->fStencilSettings = stencilSettings; 164 return dp; 165} 166 167GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( 168 GrInOrderDrawBuffer* iodb, 169 const GrPathProcessor* pathProc, 170 const GrPathRange* pathRange, 171 const void* indexValues, 172 GrDrawTarget::PathIndexType indexType, 173 const float transformValues[], 174 GrDrawTarget::PathTransformType transformType, 175 int count, 176 const GrStencilSettings& stencilSettings, 177 const GrDrawTarget::PipelineInfo& pipelineInfo) { 178 SkASSERT(pathRange); 179 SkASSERT(indexValues); 180 SkASSERT(transformValues); 181 this->closeBatch(); 182 183 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { 184 return NULL; 185 } 186 187 char* savedIndices; 188 float* savedTransforms; 189 190 iodb->appendIndicesAndTransforms(indexValues, indexType, 191 transformValues, transformType, 192 count, &savedIndices, &savedTransforms); 193 194 if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { 195 // The previous command was also DrawPaths. Try to collapse this call into the one 196 // before. Note that stenciling all the paths at once, then covering, may not be 197 // equivalent to two separate draw calls if there is overlap. Blending won't work, 198 // and the combined calls may also cancel each other's winding numbers in some 199 // places. For now the winding numbers are only an issue if the fill is even/odd, 200 // because DrawPaths is currently only used for glyphs, and glyphs in the same 201 // font tend to all wind in the same direction. 202 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); 203 if (pathRange == previous->pathRange() && 204 indexType == previous->fIndexType && 205 transformType == previous->fTransformType && 206 stencilSettings == previous->fStencilSettings && 207 path_fill_type_is_winding(stencilSettings) && 208 !pipelineInfo.willBlendWithDst(pathProc)) { 209 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); 210 const int xformSize = GrPathRendering::PathTransformSize(transformType); 211 if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && 212 (0 == xformSize || 213 &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { 214 // Fold this DrawPaths call into the one previous. 215 previous->fCount += count; 216 return NULL; 217 } 218 } 219 } 220 221 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)); 222 dp->fIndices = savedIndices; 223 dp->fIndexType = indexType; 224 dp->fTransforms = savedTransforms; 225 dp->fTransformType = transformType; 226 dp->fCount = count; 227 dp->fStencilSettings = stencilSettings; 228 return dp; 229} 230 231GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb, 232 const SkIRect* rect, 233 GrColor color, 234 bool canIgnoreRect, 235 GrRenderTarget* renderTarget) { 236 SkASSERT(renderTarget); 237 this->closeBatch(); 238 239 SkIRect r; 240 if (NULL == rect) { 241 // We could do something smart and remove previous draws and clears to 242 // the current render target. If we get that smart we have to make sure 243 // those draws aren't read before this clear (render-to-texture). 244 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); 245 rect = &r; 246 } 247 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 248 GrColorIsPMAssert(color); 249 clr->fColor = color; 250 clr->fRect = *rect; 251 clr->fCanIgnoreRect = canIgnoreRect; 252 return clr; 253} 254 255GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb, 256 const SkIRect& rect, 257 bool insideClip, 258 GrRenderTarget* renderTarget) { 259 SkASSERT(renderTarget); 260 this->closeBatch(); 261 262 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget)); 263 clr->fRect = rect; 264 clr->fInsideClip = insideClip; 265 return clr; 266} 267 268GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb, 269 GrRenderTarget* renderTarget) { 270 SkASSERT(renderTarget); 271 this->closeBatch(); 272 273 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 274 clr->fColor = GrColor_ILLEGAL; 275 return clr; 276} 277 278void GrTargetCommands::reset() { 279 fCmdBuffer.reset(); 280 fPrevState = NULL; 281 fDrawBatch = NULL; 282} 283 284void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { 285 if (fCmdBuffer.empty()) { 286 return; 287 } 288 289 // TODO this is temporary while batch is being rolled out 290 this->closeBatch(); 291 iodb->getVertexAllocPool()->unmap(); 292 iodb->getIndexAllocPool()->unmap(); 293 fBatchTarget.preFlush(); 294 295 // Updated every time we find a set state cmd to reflect the current state in the playback 296 // stream. 297 SetState* currentState = NULL; 298 299 CmdBuffer::Iter iter(fCmdBuffer); 300 301 GrGpu* gpu = iodb->getGpu(); 302 303 while (iter.next()) { 304 GrGpuTraceMarker newMarker("", -1); 305 SkString traceString; 306 if (iter->isTraced()) { 307 traceString = iodb->getCmdString(iter->markerID()); 308 newMarker.fMarker = traceString.c_str(); 309 gpu->addGpuTraceMarker(&newMarker); 310 } 311 312 // TODO temporary hack 313 if (Cmd::kDrawBatch_CmdType == iter->type()) { 314 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get()); 315 fBatchTarget.flushNext(db->fBatch->numberOfDraws()); 316 317 if (iter->isTraced()) { 318 gpu->removeGpuTraceMarker(&newMarker); 319 } 320 continue; 321 } 322 323 if (Cmd::kSetState_CmdType == iter->type()) { 324 SetState* ss = reinterpret_cast<SetState*>(iter.get()); 325 326 ss->execute(gpu, currentState); 327 currentState = ss; 328 } else { 329 iter->execute(gpu, currentState); 330 } 331 332 if (iter->isTraced()) { 333 gpu->removeGpuTraceMarker(&newMarker); 334 } 335 } 336 337 // TODO see copious notes about hack 338 fBatchTarget.postFlush(); 339} 340 341void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) { 342 SkASSERT(state); 343 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 344 &state->fBatchTracker); 345 gpu->draw(args, fInfo); 346} 347 348void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) { 349 GrGpu::StencilPathState state; 350 state.fRenderTarget = fRenderTarget.get(); 351 state.fScissor = &fScissor; 352 state.fStencil = &fStencil; 353 state.fUseHWAA = fUseHWAA; 354 state.fViewMatrix = &fViewMatrix; 355 356 gpu->stencilPath(this->path(), state); 357} 358 359void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) { 360 SkASSERT(state); 361 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 362 &state->fBatchTracker); 363 gpu->drawPath(args, this->path(), fStencilSettings); 364} 365 366void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) { 367 SkASSERT(state); 368 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 369 &state->fBatchTracker); 370 gpu->drawPaths(args, this->pathRange(), 371 fIndices, fIndexType, 372 fTransforms, fTransformType, 373 fCount, fStencilSettings); 374} 375 376void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) { 377 SkASSERT(state); 378 fBatch->generateGeometry(fBatchTarget, state->getPipeline()); 379} 380 381void GrTargetCommands::SetState::execute(GrGpu* gpu, const SetState*) { 382 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we 383 // will only have GrBatch and we can delete this 384 if (fPrimitiveProcessor) { 385 gpu->buildProgramDesc(&fDesc, *fPrimitiveProcessor, *getPipeline(), fBatchTracker); 386 } 387} 388 389void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) { 390 if (GrColor_ILLEGAL == fColor) { 391 gpu->discard(this->renderTarget()); 392 } else { 393 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); 394 } 395} 396 397void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) { 398 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); 399} 400 401void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { 402 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); 403} 404 405GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb, 406 GrSurface* dst, 407 GrSurface* src, 408 const SkIRect& srcRect, 409 const SkIPoint& dstPoint) { 410 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { 411 this->closeBatch(); 412 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src)); 413 cs->fSrcRect = srcRect; 414 cs->fDstPoint = dstPoint; 415 return cs; 416 } 417 return NULL; 418} 419 420bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, 421 const GrPrimitiveProcessor* primProc, 422 const GrDrawTarget::PipelineInfo& pipelineInfo) { 423 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc)); 424 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); 425 426 if (ss->getPipeline()->mustSkip()) { 427 fCmdBuffer.pop_back(); 428 return false; 429 } 430 431 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, 432 ss->getPipeline()->getInitBatchTracker()); 433 434 if (fPrevState && fPrevState->fPrimitiveProcessor.get() && 435 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, 436 *ss->fPrimitiveProcessor, 437 ss->fBatchTracker) && 438 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { 439 fCmdBuffer.pop_back(); 440 } else { 441 fPrevState = ss; 442 iodb->recordTraceMarkersIfNecessary(ss); 443 } 444 return true; 445} 446 447bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, 448 GrBatch* batch, 449 const GrDrawTarget::PipelineInfo& pipelineInfo) { 450 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ()); 451 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); 452 453 if (ss->getPipeline()->mustSkip()) { 454 fCmdBuffer.pop_back(); 455 return false; 456 } 457 458 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker()); 459 460 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && 461 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { 462 fCmdBuffer.pop_back(); 463 } else { 464 this->closeBatch(); 465 fPrevState = ss; 466 iodb->recordTraceMarkersIfNecessary(ss); 467 } 468 return true; 469} 470 471