GrTargetCommands.cpp revision db3ce12c810ead7b76faa784e7293197aca0d9f1
1/* 2 * Copyright 2015 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrTargetCommands.h" 9 10#include "GrColor.h" 11#include "GrDefaultGeoProcFactory.h" 12#include "GrInOrderDrawBuffer.h" 13#include "GrTemplates.h" 14#include "SkPoint.h" 15 16void GrTargetCommands::closeBatch() { 17 if (fDrawBatch) { 18 fBatchTarget.resetNumberOfDraws(); 19 fDrawBatch->execute(NULL, fPrevState); 20 fDrawBatch->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); 21 fDrawBatch = NULL; 22 } 23} 24 25static bool path_fill_type_is_winding(const GrStencilSettings& pathStencilSettings) { 26 static const GrStencilSettings::Face pathFace = GrStencilSettings::kFront_Face; 27 bool isWinding = kInvert_StencilOp != pathStencilSettings.passOp(pathFace); 28 if (isWinding) { 29 // Double check that it is in fact winding. 30 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.passOp(pathFace)); 31 SkASSERT(kIncClamp_StencilOp == pathStencilSettings.failOp(pathFace)); 32 SkASSERT(0x1 != pathStencilSettings.writeMask(pathFace)); 33 SkASSERT(!pathStencilSettings.isTwoSided()); 34 } 35 return isWinding; 36} 37 38int GrTargetCommands::concatInstancedDraw(GrInOrderDrawBuffer* iodb, 39 const GrDrawTarget::DrawInfo& info) { 40 SkASSERT(!fCmdBuffer.empty()); 41 SkASSERT(info.isInstanced()); 42 43 const GrIndexBuffer* ib; 44 if (!iodb->canConcatToIndexBuffer(&ib)) { 45 return 0; 46 } 47 48 // Check if there is a draw info that is compatible that uses the same VB from the pool and 49 // the same IB 50 if (Cmd::kDraw_CmdType != fCmdBuffer.back().type()) { 51 return 0; 52 } 53 54 Draw* draw = static_cast<Draw*>(&fCmdBuffer.back()); 55 56 if (!draw->fInfo.isInstanced() || 57 draw->fInfo.primitiveType() != info.primitiveType() || 58 draw->fInfo.verticesPerInstance() != info.verticesPerInstance() || 59 draw->fInfo.indicesPerInstance() != info.indicesPerInstance() || 60 draw->fInfo.vertexBuffer() != info.vertexBuffer() || 61 draw->fInfo.indexBuffer() != ib) { 62 return 0; 63 } 64 if (draw->fInfo.startVertex() + draw->fInfo.vertexCount() != info.startVertex()) { 65 return 0; 66 } 67 68 // how many instances can be concat'ed onto draw given the size of the index buffer 69 int instancesToConcat = iodb->indexCountInCurrentSource() / info.indicesPerInstance(); 70 instancesToConcat -= draw->fInfo.instanceCount(); 71 instancesToConcat = SkTMin(instancesToConcat, info.instanceCount()); 72 73 draw->fInfo.adjustInstanceCount(instancesToConcat); 74 75 // update last fGpuCmdMarkers to include any additional trace markers that have been added 76 iodb->recordTraceMarkersIfNecessary(draw); 77 return instancesToConcat; 78} 79 80GrTargetCommands::Cmd* GrTargetCommands::recordDraw( 81 GrInOrderDrawBuffer* iodb, 82 const GrGeometryProcessor* gp, 83 const GrDrawTarget::DrawInfo& info, 84 const GrDrawTarget::PipelineInfo& pipelineInfo) { 85#ifdef USE_BITMAP_TEXTBLOBS 86 SkFAIL("Non-batch no longer supported\n"); 87#endif 88 SkASSERT(info.vertexBuffer() && (!info.isIndexed() || info.indexBuffer())); 89 CLOSE_BATCH 90 91 if (!this->setupPipelineAndShouldDraw(iodb, gp, pipelineInfo)) { 92 return NULL; 93 } 94 95 Draw* draw; 96 if (info.isInstanced()) { 97 int instancesConcated = this->concatInstancedDraw(iodb, info); 98 if (info.instanceCount() > instancesConcated) { 99 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 100 draw->fInfo.adjustInstanceCount(-instancesConcated); 101 } else { 102 return NULL; 103 } 104 } else { 105 draw = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Draw, (info)); 106 } 107 108 return draw; 109} 110 111GrTargetCommands::Cmd* GrTargetCommands::recordDrawBatch( 112 GrInOrderDrawBuffer* iodb, 113 GrBatch* batch, 114 const GrDrawTarget::PipelineInfo& pipelineInfo) { 115 if (!this->setupPipelineAndShouldDraw(iodb, batch, pipelineInfo)) { 116 return NULL; 117 } 118 119 // Check if there is a Batch Draw we can batch with 120 if (Cmd::kDrawBatch_CmdType != fCmdBuffer.back().type() || !fDrawBatch) { 121 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); 122 return fDrawBatch; 123 } 124 125 SkASSERT(&fCmdBuffer.back() == fDrawBatch); 126 if (!fDrawBatch->fBatch->combineIfPossible(batch)) { 127 CLOSE_BATCH 128 fDrawBatch = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawBatch, (batch, &fBatchTarget)); 129 } 130 131 return fDrawBatch; 132} 133 134GrTargetCommands::Cmd* GrTargetCommands::recordStencilPath( 135 GrInOrderDrawBuffer* iodb, 136 const GrPipelineBuilder& pipelineBuilder, 137 const GrPathProcessor* pathProc, 138 const GrPath* path, 139 const GrScissorState& scissorState, 140 const GrStencilSettings& stencilSettings) { 141 CLOSE_BATCH 142 143 StencilPath* sp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, StencilPath, 144 (path, pipelineBuilder.getRenderTarget())); 145 146 sp->fScissor = scissorState; 147 sp->fUseHWAA = pipelineBuilder.isHWAntialias(); 148 sp->fViewMatrix = pathProc->viewMatrix(); 149 sp->fStencil = stencilSettings; 150 return sp; 151} 152 153GrTargetCommands::Cmd* GrTargetCommands::recordDrawPath( 154 GrInOrderDrawBuffer* iodb, 155 const GrPathProcessor* pathProc, 156 const GrPath* path, 157 const GrStencilSettings& stencilSettings, 158 const GrDrawTarget::PipelineInfo& pipelineInfo) { 159 CLOSE_BATCH 160 161 // TODO: Only compare the subset of GrPipelineBuilder relevant to path covering? 162 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { 163 return NULL; 164 } 165 DrawPath* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPath, (path)); 166 dp->fStencilSettings = stencilSettings; 167 return dp; 168} 169 170GrTargetCommands::Cmd* GrTargetCommands::recordDrawPaths( 171 GrInOrderDrawBuffer* iodb, 172 const GrPathProcessor* pathProc, 173 const GrPathRange* pathRange, 174 const void* indexValues, 175 GrDrawTarget::PathIndexType indexType, 176 const float transformValues[], 177 GrDrawTarget::PathTransformType transformType, 178 int count, 179 const GrStencilSettings& stencilSettings, 180 const GrDrawTarget::PipelineInfo& pipelineInfo) { 181 SkASSERT(pathRange); 182 SkASSERT(indexValues); 183 SkASSERT(transformValues); 184 CLOSE_BATCH 185 186 if (!this->setupPipelineAndShouldDraw(iodb, pathProc, pipelineInfo)) { 187 return NULL; 188 } 189 190 char* savedIndices; 191 float* savedTransforms; 192 193 iodb->appendIndicesAndTransforms(indexValues, indexType, 194 transformValues, transformType, 195 count, &savedIndices, &savedTransforms); 196 197 if (Cmd::kDrawPaths_CmdType == fCmdBuffer.back().type()) { 198 // The previous command was also DrawPaths. Try to collapse this call into the one 199 // before. Note that stenciling all the paths at once, then covering, may not be 200 // equivalent to two separate draw calls if there is overlap. Blending won't work, 201 // and the combined calls may also cancel each other's winding numbers in some 202 // places. For now the winding numbers are only an issue if the fill is even/odd, 203 // because DrawPaths is currently only used for glyphs, and glyphs in the same 204 // font tend to all wind in the same direction. 205 DrawPaths* previous = static_cast<DrawPaths*>(&fCmdBuffer.back()); 206 if (pathRange == previous->pathRange() && 207 indexType == previous->fIndexType && 208 transformType == previous->fTransformType && 209 stencilSettings == previous->fStencilSettings && 210 path_fill_type_is_winding(stencilSettings) && 211 !pipelineInfo.willBlendWithDst(pathProc)) { 212 const int indexBytes = GrPathRange::PathIndexSizeInBytes(indexType); 213 const int xformSize = GrPathRendering::PathTransformSize(transformType); 214 if (&previous->fIndices[previous->fCount*indexBytes] == savedIndices && 215 (0 == xformSize || 216 &previous->fTransforms[previous->fCount*xformSize] == savedTransforms)) { 217 // Fold this DrawPaths call into the one previous. 218 previous->fCount += count; 219 return NULL; 220 } 221 } 222 } 223 224 DrawPaths* dp = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, DrawPaths, (pathRange)); 225 dp->fIndices = savedIndices; 226 dp->fIndexType = indexType; 227 dp->fTransforms = savedTransforms; 228 dp->fTransformType = transformType; 229 dp->fCount = count; 230 dp->fStencilSettings = stencilSettings; 231 return dp; 232} 233 234GrTargetCommands::Cmd* GrTargetCommands::recordClear(GrInOrderDrawBuffer* iodb, 235 const SkIRect* rect, 236 GrColor color, 237 bool canIgnoreRect, 238 GrRenderTarget* renderTarget) { 239 SkASSERT(renderTarget); 240 CLOSE_BATCH 241 242 SkIRect r; 243 if (NULL == rect) { 244 // We could do something smart and remove previous draws and clears to 245 // the current render target. If we get that smart we have to make sure 246 // those draws aren't read before this clear (render-to-texture). 247 r.setLTRB(0, 0, renderTarget->width(), renderTarget->height()); 248 rect = &r; 249 } 250 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 251 GrColorIsPMAssert(color); 252 clr->fColor = color; 253 clr->fRect = *rect; 254 clr->fCanIgnoreRect = canIgnoreRect; 255 return clr; 256} 257 258GrTargetCommands::Cmd* GrTargetCommands::recordClearStencilClip(GrInOrderDrawBuffer* iodb, 259 const SkIRect& rect, 260 bool insideClip, 261 GrRenderTarget* renderTarget) { 262 SkASSERT(renderTarget); 263 CLOSE_BATCH 264 265 ClearStencilClip* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, ClearStencilClip, (renderTarget)); 266 clr->fRect = rect; 267 clr->fInsideClip = insideClip; 268 return clr; 269} 270 271GrTargetCommands::Cmd* GrTargetCommands::recordDiscard(GrInOrderDrawBuffer* iodb, 272 GrRenderTarget* renderTarget) { 273 SkASSERT(renderTarget); 274 CLOSE_BATCH 275 276 Clear* clr = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, Clear, (renderTarget)); 277 clr->fColor = GrColor_ILLEGAL; 278 return clr; 279} 280 281void GrTargetCommands::reset() { 282 fCmdBuffer.reset(); 283 fPrevState = NULL; 284 fDrawBatch = NULL; 285} 286 287void GrTargetCommands::flush(GrInOrderDrawBuffer* iodb) { 288 if (fCmdBuffer.empty()) { 289 return; 290 } 291 292 // TODO this is temporary while batch is being rolled out 293 CLOSE_BATCH 294 295 // Updated every time we find a set state cmd to reflect the current state in the playback 296 // stream. 297 SetState* currentState = NULL; 298 299 GrGpu* gpu = iodb->getGpu(); 300 301#ifdef USE_BITMAP_TEXTBLOBS 302 // Loop over all batches and generate geometry 303 CmdBuffer::Iter genIter(fCmdBuffer); 304 while (genIter.next()) { 305 if (Cmd::kDrawBatch_CmdType == genIter->type()) { 306 DrawBatch* db = reinterpret_cast<DrawBatch*>(genIter.get()); 307 fBatchTarget.resetNumberOfDraws(); 308 db->execute(NULL, currentState); 309 db->fBatch->setNumberOfDraws(fBatchTarget.numberOfDraws()); 310 } else if (Cmd::kSetState_CmdType == genIter->type()) { 311 SetState* ss = reinterpret_cast<SetState*>(genIter.get()); 312 313 ss->execute(gpu, currentState); 314 currentState = ss; 315 } 316 } 317#endif 318 319 iodb->getVertexAllocPool()->unmap(); 320 iodb->getIndexAllocPool()->unmap(); 321 fBatchTarget.preFlush(); 322 323 CmdBuffer::Iter iter(fCmdBuffer); 324 325 while (iter.next()) { 326 GrGpuTraceMarker newMarker("", -1); 327 SkString traceString; 328 if (iter->isTraced()) { 329 traceString = iodb->getCmdString(iter->markerID()); 330 newMarker.fMarker = traceString.c_str(); 331 gpu->addGpuTraceMarker(&newMarker); 332 } 333 334 // TODO temporary hack 335 if (Cmd::kDrawBatch_CmdType == iter->type()) { 336 DrawBatch* db = reinterpret_cast<DrawBatch*>(iter.get()); 337 fBatchTarget.flushNext(db->fBatch->numberOfDraws()); 338 339 if (iter->isTraced()) { 340 gpu->removeGpuTraceMarker(&newMarker); 341 } 342 continue; 343 } 344 345 if (Cmd::kSetState_CmdType == iter->type()) { 346#ifndef USE_BITMAP_TEXTBLOBS 347 SetState* ss = reinterpret_cast<SetState*>(iter.get()); 348 349 ss->execute(gpu, currentState); 350 currentState = ss; 351#endif 352 } else { 353 iter->execute(gpu, currentState); 354 } 355 356 if (iter->isTraced()) { 357 gpu->removeGpuTraceMarker(&newMarker); 358 } 359 } 360 361 // TODO see copious notes about hack 362 fBatchTarget.postFlush(); 363} 364 365void GrTargetCommands::Draw::execute(GrGpu* gpu, const SetState* state) { 366 SkASSERT(state); 367 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 368 &state->fBatchTracker); 369 gpu->draw(args, fInfo); 370} 371 372void GrTargetCommands::StencilPath::execute(GrGpu* gpu, const SetState*) { 373 GrGpu::StencilPathState state; 374 state.fRenderTarget = fRenderTarget.get(); 375 state.fScissor = &fScissor; 376 state.fStencil = &fStencil; 377 state.fUseHWAA = fUseHWAA; 378 state.fViewMatrix = &fViewMatrix; 379 380 gpu->stencilPath(this->path(), state); 381} 382 383void GrTargetCommands::DrawPath::execute(GrGpu* gpu, const SetState* state) { 384 SkASSERT(state); 385 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 386 &state->fBatchTracker); 387 gpu->drawPath(args, this->path(), fStencilSettings); 388} 389 390void GrTargetCommands::DrawPaths::execute(GrGpu* gpu, const SetState* state) { 391 SkASSERT(state); 392 DrawArgs args(state->fPrimitiveProcessor.get(), state->getPipeline(), &state->fDesc, 393 &state->fBatchTracker); 394 gpu->drawPaths(args, this->pathRange(), 395 fIndices, fIndexType, 396 fTransforms, fTransformType, 397 fCount, fStencilSettings); 398} 399 400void GrTargetCommands::DrawBatch::execute(GrGpu*, const SetState* state) { 401 SkASSERT(state); 402 fBatch->generateGeometry(fBatchTarget, state->getPipeline()); 403} 404 405void GrTargetCommands::SetState::execute(GrGpu* gpu, const SetState*) { 406 // TODO sometimes we have a prim proc, othertimes we have a GrBatch. Eventually we 407 // will only have GrBatch and we can delete this 408 if (fPrimitiveProcessor) { 409 gpu->buildProgramDesc(&fDesc, *fPrimitiveProcessor, *getPipeline(), fBatchTracker); 410 } 411} 412 413void GrTargetCommands::Clear::execute(GrGpu* gpu, const SetState*) { 414 if (GrColor_ILLEGAL == fColor) { 415 gpu->discard(this->renderTarget()); 416 } else { 417 gpu->clear(&fRect, fColor, fCanIgnoreRect, this->renderTarget()); 418 } 419} 420 421void GrTargetCommands::ClearStencilClip::execute(GrGpu* gpu, const SetState*) { 422 gpu->clearStencilClip(fRect, fInsideClip, this->renderTarget()); 423} 424 425void GrTargetCommands::CopySurface::execute(GrGpu* gpu, const SetState*) { 426 gpu->copySurface(this->dst(), this->src(), fSrcRect, fDstPoint); 427} 428 429GrTargetCommands::Cmd* GrTargetCommands::recordCopySurface(GrInOrderDrawBuffer* iodb, 430 GrSurface* dst, 431 GrSurface* src, 432 const SkIRect& srcRect, 433 const SkIPoint& dstPoint) { 434 if (iodb->getGpu()->canCopySurface(dst, src, srcRect, dstPoint)) { 435 CLOSE_BATCH 436 CopySurface* cs = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, CopySurface, (dst, src)); 437 cs->fSrcRect = srcRect; 438 cs->fDstPoint = dstPoint; 439 return cs; 440 } 441 return NULL; 442} 443 444bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, 445 const GrPrimitiveProcessor* primProc, 446 const GrDrawTarget::PipelineInfo& pipelineInfo) { 447 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, (primProc)); 448 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); 449 450 if (ss->getPipeline()->mustSkip()) { 451 fCmdBuffer.pop_back(); 452 return false; 453 } 454 455 ss->fPrimitiveProcessor->initBatchTracker(&ss->fBatchTracker, 456 ss->getPipeline()->getInitBatchTracker()); 457 458 if (fPrevState && fPrevState->fPrimitiveProcessor.get() && 459 fPrevState->fPrimitiveProcessor->canMakeEqual(fPrevState->fBatchTracker, 460 *ss->fPrimitiveProcessor, 461 ss->fBatchTracker) && 462 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { 463 fCmdBuffer.pop_back(); 464 } else { 465 fPrevState = ss; 466 iodb->recordTraceMarkersIfNecessary(ss); 467 } 468 return true; 469} 470 471bool GrTargetCommands::setupPipelineAndShouldDraw(GrInOrderDrawBuffer* iodb, 472 GrBatch* batch, 473 const GrDrawTarget::PipelineInfo& pipelineInfo) { 474 SetState* ss = GrNEW_APPEND_TO_RECORDER(fCmdBuffer, SetState, ()); 475 iodb->setupPipeline(pipelineInfo, ss->pipelineLocation()); 476 477 if (ss->getPipeline()->mustSkip()) { 478 fCmdBuffer.pop_back(); 479 return false; 480 } 481 482 batch->initBatchTracker(ss->getPipeline()->getInitBatchTracker()); 483 484 if (fPrevState && !fPrevState->fPrimitiveProcessor.get() && 485 fPrevState->getPipeline()->isEqual(*ss->getPipeline())) { 486 fCmdBuffer.pop_back(); 487 } else { 488 CLOSE_BATCH 489 fPrevState = ss; 490 iodb->recordTraceMarkersIfNecessary(ss); 491 } 492 return true; 493} 494 495