GrContext.h revision fd01ce05ef7902c49b0272b3524a389693c72b35
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrContext_DEFINED
9#define GrContext_DEFINED
10
11#include "GrCaps.h"
12#include "GrClip.h"
13#include "GrColor.h"
14#include "GrPaint.h"
15#include "GrRenderTarget.h"
16#include "GrTextureProvider.h"
17#include "SkMatrix.h"
18#include "SkPathEffect.h"
19#include "SkTypes.h"
20#include "../private/GrAuditTrail.h"
21#include "../private/GrSingleOwner.h"
22#include "../private/SkMutex.h"
23
24struct GrBatchAtlasConfig;
25class GrBatchFontCache;
26struct GrContextOptions;
27class GrContextPriv;
28class GrContextThreadSafeProxy;
29class GrDrawingManager;
30class GrRenderTargetContext;
31class GrFragmentProcessor;
32class GrGpu;
33class GrIndexBuffer;
34class GrOvalRenderer;
35class GrPath;
36class GrPipelineBuilder;
37class GrResourceEntry;
38class GrResourceCache;
39class GrResourceProvider;
40class GrTestTarget;
41class GrTextBlobCache;
42class GrTextContext;
43class GrTextureParams;
44class GrVertexBuffer;
45class GrSwizzle;
46class SkTraceMemoryDump;
47
48class SK_API GrContext : public SkRefCnt {
49public:
50    /**
51     * Creates a GrContext for a backend context.
52     */
53    static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
54    static GrContext* Create(GrBackend, GrBackendContext);
55
56    /**
57     * Only defined in test apps.
58     */
59    static GrContext* CreateMockContext();
60
61    virtual ~GrContext();
62
63    sk_sp<GrContextThreadSafeProxy> threadSafeProxy();
64
65    /**
66     * The GrContext normally assumes that no outsider is setting state
67     * within the underlying 3D API's context/device/whatever. This call informs
68     * the context that the state was modified and it should resend. Shouldn't
69     * be called frequently for good performance.
70     * The flag bits, state, is dpendent on which backend is used by the
71     * context, either GL or D3D (possible in future).
72     */
73    void resetContext(uint32_t state = kAll_GrBackendState);
74
75    /**
76     * Callback function to allow classes to cleanup on GrContext destruction.
77     * The 'info' field is filled in with the 'info' passed to addCleanUp.
78     */
79    typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
80
81    /**
82     * Add a function to be called from within GrContext's destructor.
83     * This gives classes a chance to free resources held on a per context basis.
84     * The 'info' parameter will be stored and passed to the callback function.
85     */
86    void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
87        CleanUpData* entry = fCleanUpData.push();
88
89        entry->fFunc = cleanUp;
90        entry->fInfo = info;
91    }
92
93    /**
94     * Abandons all GPU resources and assumes the underlying backend 3D API context is not longer
95     * usable. Call this if you have lost the associated GPU context, and thus internal texture,
96     * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
97     * GrContext and any of its created resource objects will not make backend 3D API calls. Content
98     * rendered but not previously flushed may be lost. After this function is called all subsequent
99     * calls on the GrContext will fail or be no-ops.
100     *
101     * The typical use case for this function is that the underlying 3D context was lost and further
102     * API calls may crash.
103     */
104    void abandonContext();
105
106    /**
107     * This is similar to abandonContext() however the underlying 3D context is not yet lost and
108     * the GrContext will cleanup all allocated resources before returning. After returning it will
109     * assume that the underlying context may no longer be valid.
110     *
111     * The typical use case for this function is that the client is going to destroy the 3D context
112     * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
113     * elsewhere by either the client or Skia objects).
114     */
115    void releaseResourcesAndAbandonContext();
116
117    ///////////////////////////////////////////////////////////////////////////
118    // Resource Cache
119
120    /**
121     *  Return the current GPU resource cache limits.
122     *
123     *  @param maxResources If non-null, returns maximum number of resources that
124     *                      can be held in the cache.
125     *  @param maxResourceBytes If non-null, returns maximum number of bytes of
126     *                          video memory that can be held in the cache.
127     */
128    void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
129
130    /**
131     *  Gets the current GPU resource cache usage.
132     *
133     *  @param resourceCount If non-null, returns the number of resources that are held in the
134     *                       cache.
135     *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
136     *                          in the cache.
137     */
138    void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
139
140    /**
141     *  Specify the GPU resource cache limits. If the current cache exceeds either
142     *  of these, it will be purged (LRU) to keep the cache within these limits.
143     *
144     *  @param maxResources The maximum number of resources that can be held in
145     *                      the cache.
146     *  @param maxResourceBytes The maximum number of bytes of video memory
147     *                          that can be held in the cache.
148     */
149    void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
150
151    GrTextureProvider* textureProvider() { return fTextureProvider; }
152    const GrTextureProvider* textureProvider() const { return fTextureProvider; }
153
154    /**
155     * Frees GPU created by the context. Can be called to reduce GPU memory
156     * pressure.
157     */
158    void freeGpuResources();
159
160    /**
161     * Purge all the unlocked resources from the cache.
162     * This entry point is mainly meant for timing texture uploads
163     * and is not defined in normal builds of Skia.
164     */
165    void purgeAllUnlockedResources();
166
167    /** Access the context capabilities */
168    const GrCaps* caps() const { return fCaps; }
169
170    /**
171     * Returns the recommended sample count for a render target when using this
172     * context.
173     *
174     * @param  config the configuration of the render target.
175     * @param  dpi the display density in dots per inch.
176     *
177     * @return sample count that should be perform well and have good enough
178     *         rendering quality for the display. Alternatively returns 0 if
179     *         MSAA is not supported or recommended to be used by default.
180     */
181    int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
182
183    /**
184     * Create both a GrRenderTarget and a matching GrRenderTargetContext to wrap it.
185     * We guarantee that "asTexture" will succeed for renderTargetContexts created
186     * via this entry point.
187     */
188    sk_sp<GrRenderTargetContext> makeRenderTargetContext(
189                                                 SkBackingFit fit,
190                                                 int width, int height,
191                                                 GrPixelConfig config,
192                                                 sk_sp<SkColorSpace> colorSpace,
193                                                 int sampleCnt = 0,
194                                                 GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
195                                                 const SkSurfaceProps* surfaceProps = nullptr,
196                                                 SkBudgeted = SkBudgeted::kYes);
197
198    // Create a new render target context as above but have it backed by a deferred-style
199    // GrRenderTargetProxy rather than one that is backed by an actual GrRenderTarget
200    sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContext(
201                                                 SkBackingFit fit,
202                                                 int width, int height,
203                                                 GrPixelConfig config,
204                                                 sk_sp<SkColorSpace> colorSpace,
205                                                 int sampleCnt = 0,
206                                                 GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
207                                                 const SkSurfaceProps* surfaceProps = nullptr,
208                                                 SkBudgeted = SkBudgeted::kYes);
209    /*
210     * This method will attempt to create a renderTargetContext that has, at least, the number of
211     * channels and precision per channel as requested in 'config' (e.g., A8 and 888 can be
212     * converted to 8888). It may also swizzle the channels (e.g., BGRA -> RGBA).
213     * SRGB-ness will be preserved.
214     */
215    sk_sp<GrRenderTargetContext> makeRenderTargetContextWithFallback(
216                                                 SkBackingFit fit,
217                                                 int width, int height,
218                                                 GrPixelConfig config,
219                                                 sk_sp<SkColorSpace> colorSpace,
220                                                 int sampleCnt = 0,
221                                                 GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
222                                                 const SkSurfaceProps* surfaceProps = nullptr,
223                                                 SkBudgeted budgeted = SkBudgeted::kYes);
224
225    // Create a new render target context as above but have it backed by a deferred-style
226    // GrRenderTargetProxy rather than one that is backed by an actual GrRenderTarget
227    sk_sp<GrRenderTargetContext> makeDeferredRenderTargetContextWithFallback(
228                                                 SkBackingFit fit,
229                                                 int width, int height,
230                                                 GrPixelConfig config,
231                                                 sk_sp<SkColorSpace> colorSpace,
232                                                 int sampleCnt = 0,
233                                                 GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin,
234                                                 const SkSurfaceProps* surfaceProps = nullptr,
235                                                 SkBudgeted budgeted = SkBudgeted::kYes);
236
237    ///////////////////////////////////////////////////////////////////////////
238    // Misc.
239
240    /**
241     * Call to ensure all drawing to the context has been issued to the
242     * underlying 3D API.
243     */
244    void flush();
245
246   /**
247    * These flags can be used with the read/write pixels functions below.
248    */
249    enum PixelOpsFlags {
250        /** The GrContext will not be flushed before the surface read or write. This means that
251            the read or write may occur before previous draws have executed. */
252        kDontFlush_PixelOpsFlag = 0x1,
253        /** Any surface writes should be flushed to the backend 3D API after the surface operation
254            is complete */
255        kFlushWrites_PixelOp = 0x2,
256        /** The src for write or dst read is unpremultiplied. This is only respected if both the
257            config src and dst configs are an RGBA/BGRA 8888 format. */
258        kUnpremul_PixelOpsFlag  = 0x4,
259    };
260
261    /**
262     * Reads a rectangle of pixels from a surface.
263     * @param surface       the surface to read from.
264     * @param left          left edge of the rectangle to read (inclusive)
265     * @param top           top edge of the rectangle to read (inclusive)
266     * @param width         width of rectangle to read in pixels.
267     * @param height        height of rectangle to read in pixels.
268     * @param config        the pixel config of the destination buffer
269     * @param buffer        memory to read the rectangle into.
270     * @param rowBytes      number of bytes bewtween consecutive rows. Zero means rows are tightly
271     *                      packed.
272     * @param pixelOpsFlags see PixelOpsFlags enum above.
273     *
274     * @return true if the read succeeded, false if not. The read can fail because of an unsupported
275     *         pixel configs
276     */
277    bool readSurfacePixels(GrSurface* surface,
278                           int left, int top, int width, int height,
279                           GrPixelConfig config, void* buffer,
280                           size_t rowBytes = 0,
281                           uint32_t pixelOpsFlags = 0);
282
283    /**
284     * Writes a rectangle of pixels to a surface.
285     * @param surface       the surface to write to.
286     * @param left          left edge of the rectangle to write (inclusive)
287     * @param top           top edge of the rectangle to write (inclusive)
288     * @param width         width of rectangle to write in pixels.
289     * @param height        height of rectangle to write in pixels.
290     * @param config        the pixel config of the source buffer
291     * @param buffer        memory to read pixels from
292     * @param rowBytes      number of bytes between consecutive rows. Zero
293     *                      means rows are tightly packed.
294     * @param pixelOpsFlags see PixelOpsFlags enum above.
295     * @return true if the write succeeded, false if not. The write can fail because of an
296     *         unsupported combination of surface and src configs.
297     */
298    bool writeSurfacePixels(GrSurface* surface,
299                            int left, int top, int width, int height,
300                            GrPixelConfig config, const void* buffer,
301                            size_t rowBytes,
302                            uint32_t pixelOpsFlags = 0);
303
304    /**
305     * Copies a rectangle of texels from src to dst.
306     * @param dst           the surface to copy to.
307     * @param src           the surface to copy from.
308     * @param srcRect       the rectangle of the src that should be copied.
309     * @param dstPoint      the translation applied when writing the srcRect's pixels to the dst.
310     */
311    bool copySurface(GrSurface* dst,
312                     GrSurface* src,
313                     const SkIRect& srcRect,
314                     const SkIPoint& dstPoint);
315
316    /** Helper that copies the whole surface but fails when the two surfaces are not identically
317        sized. */
318    bool copySurface(GrSurface* dst, GrSurface* src) {
319        return this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
320                                 SkIPoint::Make(0,0));
321    }
322
323    /**
324     * After this returns any pending writes to the surface will have been issued to the backend 3D API.
325     */
326    void flushSurfaceWrites(GrSurface* surface);
327
328    /**
329     * After this returns any pending reads or writes to the surface will have been issued to the
330     * backend 3D API.
331     */
332    void flushSurfaceIO(GrSurface* surface);
333
334    /**
335     * Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
336     * if necessary.
337     *
338     * It is not necessary to call this before reading the render target via Skia/GrContext.
339     * GrContext will detect when it must perform a resolve before reading pixels back from the
340     * surface or using it as a texture.
341     */
342    void prepareSurfaceForExternalIO(GrSurface*);
343
344    /**
345     * An ID associated with this context, guaranteed to be unique.
346     */
347    uint32_t uniqueID() { return fUniqueID; }
348
349    ///////////////////////////////////////////////////////////////////////////
350    // Functions intended for internal use only.
351    GrGpu* getGpu() { return fGpu; }
352    const GrGpu* getGpu() const { return fGpu; }
353    GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
354    GrTextBlobCache* getTextBlobCache() { return fTextBlobCache.get(); }
355    bool abandoned() const;
356    GrResourceProvider* resourceProvider() { return fResourceProvider; }
357    const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
358    GrResourceCache* getResourceCache() { return fResourceCache; }
359
360    // Called by tests that draw directly to the context via GrRenderTargetContext
361    void getTestTarget(GrTestTarget*, sk_sp<GrRenderTargetContext>);
362
363    /** Reset GPU stats */
364    void resetGpuStats() const ;
365
366    /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
367    void dumpCacheStats(SkString*) const;
368    void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
369    void printCacheStats() const;
370
371    /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
372    void dumpGpuStats(SkString*) const;
373    void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
374    void printGpuStats() const;
375
376    /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
377        this is for testing only */
378    void setTextBlobCacheLimit_ForTesting(size_t bytes);
379
380    /** Specify the sizes of the GrAtlasTextContext atlases.  The configs pointer below should be
381        to an array of 3 entries */
382    void setTextContextAtlasSizes_ForTesting(const GrBatchAtlasConfig* configs);
383
384    /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
385    void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
386
387    /** Get pointer to atlas texture for given mask format */
388    GrTexture* getFontAtlasTexture(GrMaskFormat format);
389
390    GrAuditTrail* getAuditTrail() { return &fAuditTrail; }
391
392    /** This is only useful for debug purposes */
393    SkDEBUGCODE(GrSingleOwner* debugSingleOwner() const { return &fSingleOwner; } )
394
395    // Provides access to functions that aren't part of the public API.
396    GrContextPriv contextPriv();
397    const GrContextPriv contextPriv() const;
398
399private:
400    GrGpu*                                  fGpu;
401    const GrCaps*                           fCaps;
402    GrResourceCache*                        fResourceCache;
403    // this union exists because the inheritance of GrTextureProvider->GrResourceProvider
404    // is in a private header.
405    union {
406        GrResourceProvider*                 fResourceProvider;
407        GrTextureProvider*                  fTextureProvider;
408    };
409
410    sk_sp<GrContextThreadSafeProxy>         fThreadSafeProxy;
411
412    GrBatchFontCache*                       fBatchFontCache;
413    std::unique_ptr<GrTextBlobCache>        fTextBlobCache;
414
415    bool                                    fDidTestPMConversions;
416    int                                     fPMToUPMConversion;
417    int                                     fUPMToPMConversion;
418    // The sw backend may call GrContext::readSurfacePixels on multiple threads
419    // We may transfer the responsibilty for using a mutex to the sw backend
420    // when there are fewer code paths that lead to a readSurfacePixels call
421    // from the sw backend. readSurfacePixels is reentrant in one case - when performing
422    // the PM conversions test. To handle this we do the PM conversions test outside
423    // of fReadPixelsMutex and use a separate mutex to guard it. When it re-enters
424    // readSurfacePixels it will grab fReadPixelsMutex and release it before the outer
425    // readSurfacePixels proceeds to grab it.
426    // TODO: Stop pretending to make GrContext thread-safe for sw rasterization and provide
427    // a mechanism to make a SkPicture safe for multithreaded sw rasterization.
428    SkMutex                                 fReadPixelsMutex;
429    SkMutex                                 fTestPMConversionsMutex;
430
431    // In debug builds we guard against improper thread handling
432    // This guard is passed to the GrDrawingManager and, from there to all the
433    // GrRenderTargetContexts.  It is also passed to the GrTextureProvider and SkGpuDevice.
434    mutable GrSingleOwner                   fSingleOwner;
435
436    struct CleanUpData {
437        PFCleanUpFunc fFunc;
438        void*         fInfo;
439    };
440
441    SkTDArray<CleanUpData>                  fCleanUpData;
442
443    const uint32_t                          fUniqueID;
444
445    std::unique_ptr<GrDrawingManager>       fDrawingManager;
446
447    GrAuditTrail                            fAuditTrail;
448
449    // TODO: have the GrClipStackClip use renderTargetContexts and rm this friending
450    friend class GrContextPriv;
451
452    GrContext(); // init must be called after the constructor.
453    bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
454
455    void initMockContext();
456    void initCommon(const GrContextOptions&);
457
458    /**
459     * These functions create premul <-> unpremul effects if it is possible to generate a pair
460     * of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
461     * return NULL. They also can perform a swizzle as part of the draw.
462     */
463    sk_sp<GrFragmentProcessor> createPMToUPMEffect(GrTexture*, const GrSwizzle&,
464                                                   const SkMatrix&) const;
465    sk_sp<GrFragmentProcessor> createUPMToPMEffect(GrTexture*, const GrSwizzle&,
466                                                   const SkMatrix&) const;
467    /** Called before either of the above two functions to determine the appropriate fragment
468        processors for conversions. This must be called by readSurfacePixels before a mutex is
469        taken, since testingvPM conversions itself will call readSurfacePixels */
470    void testPMConversionsIfNecessary(uint32_t flags);
471    /** Returns true if we've already determined that createPMtoUPMEffect and createUPMToPMEffect
472        will fail. In such cases fall back to SW conversion. */
473    bool didFailPMUPMConversionTest() const;
474
475    /**
476     * A callback similar to the above for use by the TextBlobCache
477     * TODO move textblob draw calls below context so we can use the call above.
478     */
479    static void TextBlobCacheOverBudgetCB(void* data);
480
481    typedef SkRefCnt INHERITED;
482};
483
484/**
485 * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
486 * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
487 */
488class GrContextThreadSafeProxy : public SkRefCnt {
489private:
490    GrContextThreadSafeProxy(sk_sp<const GrCaps> caps, uint32_t uniqueID)
491        : fCaps(std::move(caps))
492        , fContextUniqueID(uniqueID) {}
493
494    sk_sp<const GrCaps> fCaps;
495    uint32_t            fContextUniqueID;
496
497    friend class GrContext;
498    friend class SkImage;
499
500    typedef SkRefCnt INHERITED;
501};
502
503#endif
504