GrGpu.h revision 42c456fd20bd45dd02f6d5eb0af7acf04341b1ee
1/* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#ifndef GrGpu_DEFINED 9#define GrGpu_DEFINED 10 11#include "GrGpuCommandBuffer.h" 12#include "GrProgramDesc.h" 13#include "GrSwizzle.h" 14#include "GrAllocator.h" 15#include "GrTextureProducer.h" 16#include "GrTypes.h" 17#include "GrXferProcessor.h" 18#include "SkPath.h" 19#include "SkTArray.h" 20#include <map> 21 22class GrBuffer; 23class GrContext; 24struct GrContextOptions; 25class GrGLContext; 26class GrMesh; 27class GrNonInstancedVertices; 28class GrPath; 29class GrPathRange; 30class GrPathRenderer; 31class GrPathRendererChain; 32class GrPathRendering; 33class GrPipeline; 34class GrPrimitiveProcessor; 35class GrRenderTarget; 36class GrSemaphore; 37class GrStencilAttachment; 38class GrStencilSettings; 39class GrSurface; 40class GrTexture; 41 42namespace gr_instanced { class InstancedRendering; } 43 44class GrGpu : public SkRefCnt { 45public: 46 /** 47 * Create an instance of GrGpu that matches the specified backend. If the requested backend is 48 * not supported (at compile-time or run-time) this returns nullptr. The context will not be 49 * fully constructed and should not be used by GrGpu until after this function returns. 50 */ 51 static GrGpu* Create(GrBackend, GrBackendContext, const GrContextOptions&, GrContext* context); 52 53 //////////////////////////////////////////////////////////////////////////// 54 55 GrGpu(GrContext* context); 56 ~GrGpu() override; 57 58 GrContext* getContext() { return fContext; } 59 const GrContext* getContext() const { return fContext; } 60 61 /** 62 * Gets the capabilities of the draw target. 63 */ 64 const GrCaps* caps() const { return fCaps.get(); } 65 66 GrPathRendering* pathRendering() { return fPathRendering.get(); } 67 68 enum class DisconnectType { 69 // No cleanup should be attempted, immediately cease making backend API calls 70 kAbandon, 71 // Free allocated resources (not known by GrResourceCache) before returning and 72 // ensure no backend backend 3D API calls will be made after disconnect() returns. 73 kCleanup, 74 }; 75 76 // Called by GrContext when the underlying backend context is already or will be destroyed 77 // before GrContext. 78 virtual void disconnect(DisconnectType); 79 80 /** 81 * The GrGpu object normally assumes that no outsider is setting state 82 * within the underlying 3D API's context/device/whatever. This call informs 83 * the GrGpu that the state was modified and it shouldn't make assumptions 84 * about the state. 85 */ 86 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; } 87 88 /** 89 * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can 90 * be used as a render target by calling GrTexture::asRenderTarget(). Not all 91 * pixel configs can be used as render targets. Support for configs as textures 92 * or render targets can be checked using GrCaps. 93 * 94 * @param desc describes the texture to be created. 95 * @param budgeted does this texture count against the resource cache budget? 96 * @param texels array of mipmap levels containing texel data to load. 97 * Each level begins with full-size palette data for paletted textures. 98 * For compressed formats the level contains the compressed pixel data. 99 * Otherwise, it contains width*height texels. If there is only one 100 * element and it contains nullptr fPixels, texture data is 101 * uninitialized. 102 * @return The texture object if successful, otherwise nullptr. 103 */ 104 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, 105 const SkTArray<GrMipLevel>& texels); 106 107 /** 108 * Simplified createTexture() interface for when there is no initial texel data to upload. 109 */ 110 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) { 111 return this->createTexture(desc, budgeted, SkTArray<GrMipLevel>()); 112 } 113 114 /** Simplified createTexture() interface for when there is only a base level */ 115 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const void* level0Data, 116 size_t rowBytes) { 117 SkASSERT(level0Data); 118 GrMipLevel level = { level0Data, rowBytes }; 119 SkSTArray<1, GrMipLevel> array; 120 array.push_back() = level; 121 return this->createTexture(desc, budgeted, array); 122 } 123 124 /** 125 * Implements GrResourceProvider::wrapBackendTexture 126 */ 127 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership); 128 129 /** 130 * Implements GrResourceProvider::wrapBackendRenderTarget 131 */ 132 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTargetDesc&,GrWrapOwnership); 133 134 /** 135 * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget 136 */ 137 sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&); 138 139 /** 140 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked. 141 * 142 * @param size size of buffer to create. 143 * @param intendedType hint to the graphics subsystem about what the buffer will be used for. 144 * @param accessPattern hint to the graphics subsystem about how the data will be accessed. 145 * @param data optional data with which to initialize the buffer. 146 * 147 * @return the buffer if successful, otherwise nullptr. 148 */ 149 GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern, 150 const void* data = nullptr); 151 152 /** 153 * Creates an instanced rendering object if it is supported on this platform. 154 */ 155 gr_instanced::InstancedRendering* createInstancedRendering(); 156 157 /** 158 * Resolves MSAA. 159 */ 160 void resolveRenderTarget(GrRenderTarget* target); 161 162 /** Info struct returned by getReadPixelsInfo about performing intermediate draws before 163 reading pixels for performance or correctness. */ 164 struct ReadPixelTempDrawInfo { 165 /** If the GrGpu is requesting that the caller do a draw to an intermediate surface then 166 this is descriptor for the temp surface. The draw should always be a rect with 167 dst 0,0,w,h. */ 168 GrSurfaceDesc fTempSurfaceDesc; 169 /** Indicates whether there is a performance advantage to using an exact match texture 170 (in terms of width and height) for the intermediate texture instead of approximate. */ 171 SkBackingFit fTempSurfaceFit; 172 /** Swizzle to apply during the draw. This is used to compensate for either feature or 173 performance limitations in the underlying 3D API. */ 174 GrSwizzle fSwizzle; 175 /** The config that should be used to read from the temp surface after the draw. This may be 176 different than the original read config in order to compensate for swizzling. The 177 read data will effectively be in the original read config. */ 178 GrPixelConfig fReadConfig; 179 }; 180 181 /** Describes why an intermediate draw must/should be performed before readPixels. */ 182 enum DrawPreference { 183 /** On input means that the caller would proceed without draw if the GrGpu doesn't request 184 one. 185 On output means that the GrGpu is not requesting a draw. */ 186 kNoDraw_DrawPreference, 187 /** Means that the client would prefer a draw for performance of the readback but 188 can satisfy a straight readPixels call on the inputs without an intermediate draw. 189 getReadPixelsInfo will never set the draw preference to this value but may leave 190 it set. */ 191 kCallerPrefersDraw_DrawPreference, 192 /** On output means that GrGpu would prefer a draw for performance of the readback but 193 can satisfy a straight readPixels call on the inputs without an intermediate draw. The 194 caller of getReadPixelsInfo should never specify this on intput. */ 195 kGpuPrefersDraw_DrawPreference, 196 /** On input means that the caller requires a draw to do a transformation and there is no 197 CPU fallback. 198 On output means that GrGpu can only satisfy the readPixels request if the intermediate 199 draw is performed. 200 */ 201 kRequireDraw_DrawPreference 202 }; 203 204 /** 205 * Used to negotiate whether and how an intermediate draw should or must be performed before 206 * a readPixels call. If this returns false then GrGpu could not deduce an intermediate draw 207 * that would allow a successful readPixels call. The passed width, height, and rowBytes, 208 * must be non-zero and already reflect clipping to the src bounds. 209 */ 210 bool getReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes, 211 GrPixelConfig readConfig, DrawPreference*, ReadPixelTempDrawInfo*); 212 213 /** Info struct returned by getWritePixelsInfo about performing an intermediate draw in order 214 to write pixels to a GrSurface for either performance or correctness reasons. */ 215 struct WritePixelTempDrawInfo { 216 /** If the GrGpu is requesting that the caller upload to an intermediate surface and draw 217 that to the dst then this is the descriptor for the intermediate surface. The caller 218 should upload the pixels such that the upper left pixel of the upload rect is at 0,0 in 219 the intermediate surface.*/ 220 GrSurfaceDesc fTempSurfaceDesc; 221 /** Swizzle to apply during the draw. This is used to compensate for either feature or 222 performance limitations in the underlying 3D API. */ 223 GrSwizzle fSwizzle; 224 /** The config that should be specified when uploading the *original* data to the temp 225 surface before the draw. This may be different than the original src data config in 226 order to compensate for swizzling that will occur when drawing. */ 227 GrPixelConfig fWriteConfig; 228 }; 229 230 /** 231 * Used to negotiate whether and how an intermediate surface should be used to write pixels to 232 * a GrSurface. If this returns false then GrGpu could not deduce an intermediate draw 233 * that would allow a successful transfer of the src pixels to the dst. The passed width, 234 * height, and rowBytes, must be non-zero and already reflect clipping to the dst bounds. 235 */ 236 bool getWritePixelsInfo(GrSurface* dstSurface, int width, int height, 237 GrPixelConfig srcConfig, DrawPreference*, WritePixelTempDrawInfo*); 238 239 /** 240 * Reads a rectangle of pixels from a render target. 241 * 242 * @param surface The surface to read from 243 * @param left left edge of the rectangle to read (inclusive) 244 * @param top top edge of the rectangle to read (inclusive) 245 * @param width width of rectangle to read in pixels. 246 * @param height height of rectangle to read in pixels. 247 * @param config the pixel config of the destination buffer 248 * @param buffer memory to read the rectangle into. 249 * @param rowBytes the number of bytes between consecutive rows. Zero 250 * means rows are tightly packed. 251 * @param invertY buffer should be populated bottom-to-top as opposed 252 * to top-to-bottom (skia's usual order) 253 * 254 * @return true if the read succeeded, false if not. The read can fail 255 * because of a unsupported pixel config or because no render 256 * target is currently set. 257 */ 258 bool readPixels(GrSurface* surface, 259 int left, int top, int width, int height, 260 GrPixelConfig config, void* buffer, size_t rowBytes); 261 262 /** 263 * Updates the pixels in a rectangle of a surface. 264 * 265 * @param surface The surface to write to. 266 * @param left left edge of the rectangle to write (inclusive) 267 * @param top top edge of the rectangle to write (inclusive) 268 * @param width width of rectangle to write in pixels. 269 * @param height height of rectangle to write in pixels. 270 * @param config the pixel config of the source buffer 271 * @param texels array of mipmap levels containing texture data 272 */ 273 bool writePixels(GrSurface* surface, 274 int left, int top, int width, int height, 275 GrPixelConfig config, 276 const SkTArray<GrMipLevel>& texels); 277 278 /** 279 * This function is a shim which creates a SkTArray<GrMipLevel> of size 1. 280 * It then calls writePixels with that SkTArray. 281 * 282 * @param buffer memory to read pixels from. 283 * @param rowBytes number of bytes between consecutive rows. Zero 284 * means rows are tightly packed. 285 */ 286 bool writePixels(GrSurface* surface, 287 int left, int top, int width, int height, 288 GrPixelConfig config, const void* buffer, 289 size_t rowBytes); 290 291 /** 292 * Updates the pixels in a rectangle of a surface using a buffer 293 * 294 * @param surface The surface to write to. 295 * @param left left edge of the rectangle to write (inclusive) 296 * @param top top edge of the rectangle to write (inclusive) 297 * @param width width of rectangle to write in pixels. 298 * @param height height of rectangle to write in pixels. 299 * @param config the pixel config of the source buffer 300 * @param transferBuffer GrBuffer to read pixels from (type must be "kCpuToGpu") 301 * @param offset offset from the start of the buffer 302 * @param rowBytes number of bytes between consecutive rows. Zero 303 * means rows are tightly packed. 304 */ 305 bool transferPixels(GrSurface* surface, 306 int left, int top, int width, int height, 307 GrPixelConfig config, GrBuffer* transferBuffer, 308 size_t offset, size_t rowBytes, GrFence* fence); 309 310 /** 311 * This is can be called before allocating a texture to be a dst for copySurface. This is only 312 * used for doing dst copies needed in blends, thus the src is always a GrRenderTarget. It will 313 * populate the origin, config, and flags fields of the desc such that copySurface can 314 * efficiently succeed. 315 */ 316 virtual bool initDescForDstCopy(const GrRenderTarget* src, GrSurfaceDesc* desc) const = 0; 317 318 // After the client interacts directly with the 3D context state the GrGpu 319 // must resync its internal state and assumptions about 3D context state. 320 // Each time this occurs the GrGpu bumps a timestamp. 321 // state of the 3D context 322 // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about 323 // a billion years. 324 typedef uint64_t ResetTimestamp; 325 326 // This timestamp is always older than the current timestamp 327 static const ResetTimestamp kExpiredTimestamp = 0; 328 // Returns a timestamp based on the number of times the context was reset. 329 // This timestamp can be used to lazily detect when cached 3D context state 330 // is dirty. 331 ResetTimestamp getResetTimestamp() const { return fResetTimestamp; } 332 333 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst 334 // take place at the GrOpList level and this function implement faster copy paths. The rect 335 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the 336 // src/dst bounds and non-empty. 337 bool copySurface(GrSurface* dst, 338 GrSurface* src, 339 const SkIRect& srcRect, 340 const SkIPoint& dstPoint); 341 342 struct MultisampleSpecs { 343 MultisampleSpecs(uint8_t uniqueID, int effectiveSampleCnt, const SkPoint* locations) 344 : fUniqueID(uniqueID), 345 fEffectiveSampleCnt(effectiveSampleCnt), 346 fSampleLocations(locations) {} 347 348 // Nonzero ID that uniquely identifies these multisample specs. 349 uint8_t fUniqueID; 350 // The actual number of samples the GPU will run. NOTE: this value can be greater than the 351 // the render target's sample count. 352 int fEffectiveSampleCnt; 353 // If sample locations are supported, points to the subpixel locations at which the GPU will 354 // sample. Pixel center is at (.5, .5), and (0, 0) indicates the top left corner. 355 const SkPoint* fSampleLocations; 356 }; 357 358 // Finds a render target's multisample specs. The pipeline is only needed in case we need to 359 // flush the draw state prior to querying multisample info. The pipeline is not expected to 360 // affect the multisample information itself. 361 const MultisampleSpecs& queryMultisampleSpecs(const GrPipeline&); 362 363 // Finds the multisample specs with a given unique id. 364 const MultisampleSpecs& getMultisampleSpecs(uint8_t uniqueID) { 365 SkASSERT(uniqueID > 0 && uniqueID < fMultisampleSpecs.count()); 366 return fMultisampleSpecs[uniqueID]; 367 } 368 369 // Creates a GrGpuCommandBuffer in which the GrOpList can send draw commands to instead of 370 // directly to the Gpu object. This currently does not take a GrRenderTarget. The command buffer 371 // is expected to infer the render target from the first draw, clear, or discard. This is an 372 // awkward workaround that goes away after MDB is complete and the render target is known from 373 // the GrRenderTargetOpList. 374 virtual GrGpuCommandBuffer* createCommandBuffer( 375 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo, 376 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) = 0; 377 378 // Called by GrOpList when flushing. 379 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). 380 virtual void finishOpList() {} 381 382 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0; 383 virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0; 384 virtual void deleteFence(GrFence) const = 0; 385 386 virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore() = 0; 387 virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 388 virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 389 390 // Ensures that all queued up driver-level commands have been sent to the GPU. For example, on 391 // OpenGL, this calls glFlush. 392 virtual void flush() = 0; 393 394 /////////////////////////////////////////////////////////////////////////// 395 // Debugging and Stats 396 397 class Stats { 398 public: 399#if GR_GPU_STATS 400 Stats() { this->reset(); } 401 402 void reset() { 403 fRenderTargetBinds = 0; 404 fShaderCompilations = 0; 405 fTextureCreates = 0; 406 fTextureUploads = 0; 407 fTransfersToTexture = 0; 408 fStencilAttachmentCreates = 0; 409 fNumDraws = 0; 410 fNumFailedDraws = 0; 411 } 412 413 int renderTargetBinds() const { return fRenderTargetBinds; } 414 void incRenderTargetBinds() { fRenderTargetBinds++; } 415 int shaderCompilations() const { return fShaderCompilations; } 416 void incShaderCompilations() { fShaderCompilations++; } 417 int textureCreates() const { return fTextureCreates; } 418 void incTextureCreates() { fTextureCreates++; } 419 int textureUploads() const { return fTextureUploads; } 420 void incTextureUploads() { fTextureUploads++; } 421 int transfersToTexture() const { return fTransfersToTexture; } 422 void incTransfersToTexture() { fTransfersToTexture++; } 423 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; } 424 void incNumDraws() { fNumDraws++; } 425 void incNumFailedDraws() { ++fNumFailedDraws; } 426 void dump(SkString*); 427 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values); 428 int numDraws() const { return fNumDraws; } 429 int numFailedDraws() const { return fNumFailedDraws; } 430 private: 431 int fRenderTargetBinds; 432 int fShaderCompilations; 433 int fTextureCreates; 434 int fTextureUploads; 435 int fTransfersToTexture; 436 int fStencilAttachmentCreates; 437 int fNumDraws; 438 int fNumFailedDraws; 439#else 440 void dump(SkString*) {} 441 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {} 442 void incRenderTargetBinds() {} 443 void incShaderCompilations() {} 444 void incTextureCreates() {} 445 void incTextureUploads() {} 446 void incTransfersToTexture() {} 447 void incStencilAttachmentCreates() {} 448 void incNumDraws() {} 449 void incNumFailedDraws() {} 450#endif 451 }; 452 453 Stats* stats() { return &fStats; } 454 455 /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is 456 only to be used for testing (particularly for testing the methods that import an externally 457 created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */ 458 virtual GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h, 459 GrPixelConfig config, 460 bool isRenderTarget = false) = 0; 461 /** Check a handle represents an actual texture in the backend API that has not been freed. */ 462 virtual bool isTestingOnlyBackendTexture(GrBackendObject) const = 0; 463 /** If ownership of the backend texture has been transferred pass true for abandonTexture. This 464 will do any necessary cleanup of the handle without freeing the texture in the backend 465 API. */ 466 virtual void deleteTestingOnlyBackendTexture(GrBackendObject, 467 bool abandonTexture = false) = 0; 468 469 // width and height may be larger than rt (if underlying API allows it). 470 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on 471 // the GrStencilAttachment. 472 virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*, 473 int width, 474 int height) = 0; 475 // clears target's entire stencil buffer to 0 476 virtual void clearStencil(GrRenderTarget* target) = 0; 477 478 // draws an outline rectangle for debugging/visualization purposes. 479 virtual void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) = 0; 480 481 // Determines whether a texture will need to be rescaled in order to be used with the 482 // GrSamplerParams. This variation is called when the caller will create a new texture using the 483 // resource provider from a non-texture src (cpu-backed image, ...). 484 bool makeCopyForTextureParams(int width, int height, const GrSamplerParams&, 485 GrTextureProducer::CopyParams*, SkScalar scaleAdjust[2]) const; 486 487 // Like the above but this variation should be called when the caller is not creating the 488 // original texture but rather was handed the original texture. It adds additional checks 489 // relevant to original textures that were created external to Skia via 490 // GrResourceProvider::wrap methods. 491 bool makeCopyForTextureParams(GrTexture* texture, const GrSamplerParams& params, 492 GrTextureProducer::CopyParams* copyParams, 493 SkScalar scaleAdjust[2]) const { 494 if (this->makeCopyForTextureParams(texture->width(), texture->height(), params, 495 copyParams, scaleAdjust)) { 496 return true; 497 } 498 return this->onMakeCopyForTextureParams(texture, params, copyParams, scaleAdjust); 499 } 500 501 // This is only to be used in GL-specific tests. 502 virtual const GrGLContext* glContextForTesting() const { return nullptr; } 503 504 // This is only to be used by testing code 505 virtual void resetShaderCacheForTesting() const {} 506 507 void handleDirtyContext() { 508 if (fResetBits) { 509 this->resetContext(); 510 } 511 } 512 513protected: 514 static void ElevateDrawPreference(GrGpu::DrawPreference* preference, 515 GrGpu::DrawPreference elevation) { 516 GR_STATIC_ASSERT(GrGpu::kCallerPrefersDraw_DrawPreference > GrGpu::kNoDraw_DrawPreference); 517 GR_STATIC_ASSERT(GrGpu::kGpuPrefersDraw_DrawPreference > 518 GrGpu::kCallerPrefersDraw_DrawPreference); 519 GR_STATIC_ASSERT(GrGpu::kRequireDraw_DrawPreference > 520 GrGpu::kGpuPrefersDraw_DrawPreference); 521 *preference = SkTMax(*preference, elevation); 522 } 523 524 // Handles cases where a surface will be updated without a call to flushRenderTarget 525 void didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels = 1) const; 526 527 Stats fStats; 528 std::unique_ptr<GrPathRendering> fPathRendering; 529 // Subclass must initialize this in its constructor. 530 sk_sp<const GrCaps> fCaps; 531 532 typedef SkTArray<SkPoint, true> SamplePattern; 533 534private: 535 // called when the 3D context state is unknown. Subclass should emit any 536 // assumed 3D context state and dirty any state cache. 537 virtual void onResetContext(uint32_t resetBits) = 0; 538 539 // Called before certain draws in order to guarantee coherent results from dst reads. 540 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0; 541 542 // overridden by backend-specific derived class to create objects. 543 // Texture size and sample size will have already been validated in base class before 544 // onCreateTexture/CompressedTexture are called. 545 virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc, 546 SkBudgeted budgeted, 547 const SkTArray<GrMipLevel>& texels) = 0; 548 virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, 549 SkBudgeted budgeted, 550 const SkTArray<GrMipLevel>& texels) = 0; 551 552 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) = 0; 553 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&, 554 GrWrapOwnership) = 0; 555 virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&)=0; 556 virtual GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, 557 const void* data) = 0; 558 559 virtual gr_instanced::InstancedRendering* onCreateInstancedRendering() = 0; 560 561 virtual bool onMakeCopyForTextureParams(GrTexture* texture, const GrSamplerParams&, 562 GrTextureProducer::CopyParams*, 563 SkScalar scaleAdjust[2]) const { return false; } 564 565 virtual bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, 566 size_t rowBytes, GrPixelConfig readConfig, DrawPreference*, 567 ReadPixelTempDrawInfo*) = 0; 568 virtual bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 569 GrPixelConfig srcConfig, DrawPreference*, 570 WritePixelTempDrawInfo*) = 0; 571 572 // overridden by backend-specific derived class to perform the surface read 573 virtual bool onReadPixels(GrSurface*, 574 int left, int top, 575 int width, int height, 576 GrPixelConfig, 577 void* buffer, 578 size_t rowBytes) = 0; 579 580 // overridden by backend-specific derived class to perform the surface write 581 virtual bool onWritePixels(GrSurface*, 582 int left, int top, int width, int height, 583 GrPixelConfig config, 584 const SkTArray<GrMipLevel>& texels) = 0; 585 586 // overridden by backend-specific derived class to perform the surface write 587 virtual bool onTransferPixels(GrSurface*, 588 int left, int top, int width, int height, 589 GrPixelConfig config, GrBuffer* transferBuffer, 590 size_t offset, size_t rowBytes) = 0; 591 592 // overridden by backend-specific derived class to perform the resolve 593 virtual void onResolveRenderTarget(GrRenderTarget* target) = 0; 594 595 // overridden by backend specific derived class to perform the copy surface 596 virtual bool onCopySurface(GrSurface* dst, 597 GrSurface* src, 598 const SkIRect& srcRect, 599 const SkIPoint& dstPoint) = 0; 600 601 // overridden by backend specific derived class to perform the multisample queries 602 virtual void onQueryMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&, 603 int* effectiveSampleCnt, SamplePattern*) = 0; 604 605 void resetContext() { 606 this->onResetContext(fResetBits); 607 fResetBits = 0; 608 ++fResetTimestamp; 609 } 610 611 struct SamplePatternComparator { 612 bool operator()(const SamplePattern&, const SamplePattern&) const; 613 }; 614 615 typedef std::map<SamplePattern, uint8_t, SamplePatternComparator> MultisampleSpecsIdMap; 616 617 ResetTimestamp fResetTimestamp; 618 uint32_t fResetBits; 619 MultisampleSpecsIdMap fMultisampleSpecsIdMap; 620 SkSTArray<1, MultisampleSpecs, true> fMultisampleSpecs; 621 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu. 622 GrContext* fContext; 623 624 friend class GrPathRendering; 625 friend class gr_instanced::InstancedRendering; 626 typedef SkRefCnt INHERITED; 627}; 628 629#endif 630