GrContext.h revision d4c741e3d0e0fa633399691c47f76b6c7841ee83
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrContext_DEFINED
9#define GrContext_DEFINED
10
11#include "GrCaps.h"
12#include "GrClip.h"
13#include "GrColor.h"
14#include "GrPaint.h"
15#include "GrRenderTarget.h"
16#include "GrTextureProvider.h"
17#include "SkMatrix.h"
18#include "SkPathEffect.h"
19#include "SkTypes.h"
20#include "../private/GrAuditTrail.h"
21#include "../private/GrSingleOwner.h"
22#include "../private/SkMutex.h"
23
24struct GrBatchAtlasConfig;
25class GrBatchFontCache;
26struct GrContextOptions;
27class GrContextThreadSafeProxy;
28class GrDrawingManager;
29class GrDrawContext;
30class GrDrawTarget;
31class GrFragmentProcessor;
32class GrGpu;
33class GrIndexBuffer;
34class GrLayerCache;
35class GrOvalRenderer;
36class GrPath;
37class GrPipelineBuilder;
38class GrResourceEntry;
39class GrResourceCache;
40class GrResourceProvider;
41class GrTestTarget;
42class GrTextBlobCache;
43class GrTextContext;
44class GrTextureParams;
45class GrVertexBuffer;
46class GrStrokeInfo;
47class GrSwizzle;
48class SkTraceMemoryDump;
49
50class SK_API GrContext : public SkRefCnt {
51public:
52    /**
53     * Creates a GrContext for a backend context.
54     */
55    static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
56    static GrContext* Create(GrBackend, GrBackendContext);
57
58    /**
59     * Only defined in test apps.
60     */
61    static GrContext* CreateMockContext();
62
63    virtual ~GrContext();
64
65    GrContextThreadSafeProxy* threadSafeProxy();
66
67    /**
68     * The GrContext normally assumes that no outsider is setting state
69     * within the underlying 3D API's context/device/whatever. This call informs
70     * the context that the state was modified and it should resend. Shouldn't
71     * be called frequently for good performance.
72     * The flag bits, state, is dpendent on which backend is used by the
73     * context, either GL or D3D (possible in future).
74     */
75    void resetContext(uint32_t state = kAll_GrBackendState);
76
77    /**
78     * Callback function to allow classes to cleanup on GrContext destruction.
79     * The 'info' field is filled in with the 'info' passed to addCleanUp.
80     */
81    typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
82
83    /**
84     * Add a function to be called from within GrContext's destructor.
85     * This gives classes a chance to free resources held on a per context basis.
86     * The 'info' parameter will be stored and passed to the callback function.
87     */
88    void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
89        CleanUpData* entry = fCleanUpData.push();
90
91        entry->fFunc = cleanUp;
92        entry->fInfo = info;
93    }
94
95    /**
96     * Abandons all GPU resources and assumes the underlying backend 3D API context is not longer
97     * usable. Call this if you have lost the associated GPU context, and thus internal texture,
98     * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the
99     * GrContext and any of its created resource objects will not make backend 3D API calls. Content
100     * rendered but not previously flushed may be lost. After this function is called all subsequent
101     * calls on the GrContext will fail or be no-ops.
102     *
103     * The typical use case for this function is that the underlying 3D context was lost and further
104     * API calls may crash.
105     */
106    void abandonContext();
107
108    /**
109     * This is similar to abandonContext() however the underlying 3D context is not yet lost and
110     * the GrContext will cleanup all allocated resources before returning. After returning it will
111     * assume that the underlying context may no longer be valid.
112     *
113     * The typical use case for this function is that the client is going to destroy the 3D context
114     * but can't guarantee that GrContext will be destroyed first (perhaps because it may be ref'ed
115     * elsewhere by either the client or Skia objects).
116     */
117    void releaseResourcesAndAbandonContext();
118
119    ///////////////////////////////////////////////////////////////////////////
120    // Resource Cache
121
122    /**
123     *  Return the current GPU resource cache limits.
124     *
125     *  @param maxResources If non-null, returns maximum number of resources that
126     *                      can be held in the cache.
127     *  @param maxResourceBytes If non-null, returns maximum number of bytes of
128     *                          video memory that can be held in the cache.
129     */
130    void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
131
132    /**
133     *  Gets the current GPU resource cache usage.
134     *
135     *  @param resourceCount If non-null, returns the number of resources that are held in the
136     *                       cache.
137     *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
138     *                          in the cache.
139     */
140    void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
141
142    /**
143     *  Specify the GPU resource cache limits. If the current cache exceeds either
144     *  of these, it will be purged (LRU) to keep the cache within these limits.
145     *
146     *  @param maxResources The maximum number of resources that can be held in
147     *                      the cache.
148     *  @param maxResourceBytes The maximum number of bytes of video memory
149     *                          that can be held in the cache.
150     */
151    void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
152
153    GrTextureProvider* textureProvider() { return fTextureProvider; }
154    const GrTextureProvider* textureProvider() const { return fTextureProvider; }
155
156    /**
157     * Frees GPU created by the context. Can be called to reduce GPU memory
158     * pressure.
159     */
160    void freeGpuResources();
161
162    /**
163     * Purge all the unlocked resources from the cache.
164     * This entry point is mainly meant for timing texture uploads
165     * and is not defined in normal builds of Skia.
166     */
167    void purgeAllUnlockedResources();
168
169    /** Access the context capabilities */
170    const GrCaps* caps() const { return fCaps; }
171
172    /**
173     * Returns the recommended sample count for a render target when using this
174     * context.
175     *
176     * @param  config the configuration of the render target.
177     * @param  dpi the display density in dots per inch.
178     *
179     * @return sample count that should be perform well and have good enough
180     *         rendering quality for the display. Alternatively returns 0 if
181     *         MSAA is not supported or recommended to be used by default.
182     */
183    int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
184
185    /**
186     * Returns a helper object to orchestrate draws.
187     * Callers assume the creation ref of the drawContext
188     * NULL will be returned if the context has been abandoned.
189     *
190     * @param  rt           the render target receiving the draws
191     * @param  surfaceProps the surface properties (mainly defines text drawing)
192     *
193     * @return a draw context
194     */
195    sk_sp<GrDrawContext> drawContext(sk_sp<GrRenderTarget> rt, const SkSurfaceProps* = nullptr);
196
197    enum BackingFit {
198        kTight_BackingFit,
199        kLoose_BackingFit
200    };
201
202    /**
203     * Create both a GrRenderTarget and a matching GrDrawContext to wrap it.
204     * The created GrRenderTarget will always be budgeted.
205     */
206    sk_sp<GrDrawContext> newDrawContext(BackingFit fit,
207                                        int width, int height,
208                                        GrPixelConfig config,
209                                        int sampleCnt = 0,
210                                        GrSurfaceOrigin origin = kDefault_GrSurfaceOrigin);
211
212    ///////////////////////////////////////////////////////////////////////////
213    // Misc.
214
215    /**
216     * Flags that affect flush() behavior.
217     */
218    enum FlushBits {
219        /**
220         * A client may reach a point where it has partially rendered a frame
221         * through a GrContext that it knows the user will never see. This flag
222         * causes the flush to skip submission of deferred content to the 3D API
223         * during the flush.
224         */
225        kDiscard_FlushBit                    = 0x2,
226    };
227
228    /**
229     * Call to ensure all drawing to the context has been issued to the
230     * underlying 3D API.
231     * @param flagsBitfield     flags that control the flushing behavior. See
232     *                          FlushBits.
233     */
234    void flush(int flagsBitfield = 0);
235
236    void flushIfNecessary() {
237        if (fFlushToReduceCacheSize || this->caps()->immediateFlush()) {
238            this->flush();
239        }
240    }
241
242   /**
243    * These flags can be used with the read/write pixels functions below.
244    */
245    enum PixelOpsFlags {
246        /** The GrContext will not be flushed before the surface read or write. This means that
247            the read or write may occur before previous draws have executed. */
248        kDontFlush_PixelOpsFlag = 0x1,
249        /** Any surface writes should be flushed to the backend 3D API after the surface operation
250            is complete */
251        kFlushWrites_PixelOp = 0x2,
252        /** The src for write or dst read is unpremultiplied. This is only respected if both the
253            config src and dst configs are an RGBA/BGRA 8888 format. */
254        kUnpremul_PixelOpsFlag  = 0x4,
255    };
256
257    /**
258     * Reads a rectangle of pixels from a surface.
259     * @param surface       the surface to read from.
260     * @param left          left edge of the rectangle to read (inclusive)
261     * @param top           top edge of the rectangle to read (inclusive)
262     * @param width         width of rectangle to read in pixels.
263     * @param height        height of rectangle to read in pixels.
264     * @param config        the pixel config of the destination buffer
265     * @param buffer        memory to read the rectangle into.
266     * @param rowBytes      number of bytes bewtween consecutive rows. Zero means rows are tightly
267     *                      packed.
268     * @param pixelOpsFlags see PixelOpsFlags enum above.
269     *
270     * @return true if the read succeeded, false if not. The read can fail because of an unsupported
271     *         pixel configs
272     */
273    bool readSurfacePixels(GrSurface* surface,
274                           int left, int top, int width, int height,
275                           GrPixelConfig config, void* buffer,
276                           size_t rowBytes = 0,
277                           uint32_t pixelOpsFlags = 0);
278
279    /**
280     * Writes a rectangle of pixels to a surface.
281     * @param surface       the surface to write to.
282     * @param left          left edge of the rectangle to write (inclusive)
283     * @param top           top edge of the rectangle to write (inclusive)
284     * @param width         width of rectangle to write in pixels.
285     * @param height        height of rectangle to write in pixels.
286     * @param config        the pixel config of the source buffer
287     * @param buffer        memory to read pixels from
288     * @param rowBytes      number of bytes between consecutive rows. Zero
289     *                      means rows are tightly packed.
290     * @param pixelOpsFlags see PixelOpsFlags enum above.
291     * @return true if the write succeeded, false if not. The write can fail because of an
292     *         unsupported combination of surface and src configs.
293     */
294    bool writeSurfacePixels(GrSurface* surface,
295                            int left, int top, int width, int height,
296                            GrPixelConfig config, const void* buffer,
297                            size_t rowBytes,
298                            uint32_t pixelOpsFlags = 0);
299
300    /**
301     * Copies a rectangle of texels from src to dst.
302     * bounds.
303     * @param dst           the surface to copy to.
304     * @param src           the surface to copy from.
305     * @param srcRect       the rectangle of the src that should be copied.
306     * @param dstPoint      the translation applied when writing the srcRect's pixels to the dst.
307     */
308    bool copySurface(GrSurface* dst,
309                     GrSurface* src,
310                     const SkIRect& srcRect,
311                     const SkIPoint& dstPoint);
312
313    /** Helper that copies the whole surface but fails when the two surfaces are not identically
314        sized. */
315    bool copySurface(GrSurface* dst, GrSurface* src) {
316        return this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
317                                 SkIPoint::Make(0,0));
318    }
319
320    /**
321     * After this returns any pending writes to the surface will have been issued to the backend 3D API.
322     */
323    void flushSurfaceWrites(GrSurface* surface);
324
325    /**
326     * Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
327     * if necessary.
328     *
329     * It is not necessary to call this before reading the render target via Skia/GrContext.
330     * GrContext will detect when it must perform a resolve before reading pixels back from the
331     * surface or using it as a texture.
332     */
333    void prepareSurfaceForExternalIO(GrSurface*);
334
335    /**
336     * An ID associated with this context, guaranteed to be unique.
337     */
338    uint32_t uniqueID() { return fUniqueID; }
339
340    ///////////////////////////////////////////////////////////////////////////
341    // Functions intended for internal use only.
342    GrGpu* getGpu() { return fGpu; }
343    const GrGpu* getGpu() const { return fGpu; }
344    GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
345    GrLayerCache* getLayerCache() { return fLayerCache.get(); }
346    GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
347    bool abandoned() const;
348    GrResourceProvider* resourceProvider() { return fResourceProvider; }
349    const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
350    GrResourceCache* getResourceCache() { return fResourceCache; }
351
352    // Called by tests that draw directly to the context via GrDrawTarget
353    void getTestTarget(GrTestTarget*, GrRenderTarget* rt);
354
355    /** Reset GPU stats */
356    void resetGpuStats() const ;
357
358    /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
359    void dumpCacheStats(SkString*) const;
360    void dumpCacheStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
361    void printCacheStats() const;
362
363    /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
364    void dumpGpuStats(SkString*) const;
365    void dumpGpuStatsKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values) const;
366    void printGpuStats() const;
367
368    /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
369        this is for testing only */
370    void setTextBlobCacheLimit_ForTesting(size_t bytes);
371
372    /** Specify the sizes of the GrAtlasTextContext atlases.  The configs pointer below should be
373        to an array of 3 entries */
374    void setTextContextAtlasSizes_ForTesting(const GrBatchAtlasConfig* configs);
375
376    /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
377    void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
378
379    /** Get pointer to atlas texture for given mask format */
380    GrTexture* getFontAtlasTexture(GrMaskFormat format);
381
382    GrAuditTrail* getAuditTrail() { return &fAuditTrail; }
383
384    /** This is only useful for debug purposes */
385    SkDEBUGCODE(GrSingleOwner* debugSingleOwner() const { return &fSingleOwner; } )
386
387private:
388    GrGpu*                                  fGpu;
389    const GrCaps*                           fCaps;
390    GrResourceCache*                        fResourceCache;
391    // this union exists because the inheritance of GrTextureProvider->GrResourceProvider
392    // is in a private header.
393    union {
394        GrResourceProvider*                 fResourceProvider;
395        GrTextureProvider*                  fTextureProvider;
396    };
397
398    SkAutoTUnref<GrContextThreadSafeProxy>  fThreadSafeProxy;
399
400    GrBatchFontCache*                       fBatchFontCache;
401    SkAutoTDelete<GrLayerCache>             fLayerCache;
402    SkAutoTDelete<GrTextBlobCache>          fTextBlobCache;
403
404    // Set by OverbudgetCB() to request that GrContext flush before exiting a draw.
405    bool                                    fFlushToReduceCacheSize;
406    bool                                    fDidTestPMConversions;
407    int                                     fPMToUPMConversion;
408    int                                     fUPMToPMConversion;
409    // The sw backend may call GrContext::readSurfacePixels on multiple threads
410    // We may transfer the responsibilty for using a mutex to the sw backend
411    // when there are fewer code paths that lead to a readSurfacePixels call
412    // from the sw backend. readSurfacePixels is reentrant in one case - when performing
413    // the PM conversions test. To handle this we do the PM conversions test outside
414    // of fReadPixelsMutex and use a separate mutex to guard it. When it re-enters
415    // readSurfacePixels it will grab fReadPixelsMutex and release it before the outer
416    // readSurfacePixels proceeds to grab it.
417    // TODO: Stop pretending to make GrContext thread-safe for sw rasterization and provide
418    // a mechanism to make a SkPicture safe for multithreaded sw rasterization.
419    SkMutex                                 fReadPixelsMutex;
420    SkMutex                                 fTestPMConversionsMutex;
421
422    // In debug builds we guard against improper thread handling
423    // This guard is passed to the GrDrawingManager and, from there to all the
424    // GrDrawContexts.  It is also passed to the GrTextureProvider and SkGpuDevice.
425    mutable GrSingleOwner                   fSingleOwner;
426
427    struct CleanUpData {
428        PFCleanUpFunc fFunc;
429        void*         fInfo;
430    };
431
432    SkTDArray<CleanUpData>                  fCleanUpData;
433
434    const uint32_t                          fUniqueID;
435
436    SkAutoTDelete<GrDrawingManager>         fDrawingManager;
437
438    GrAuditTrail                            fAuditTrail;
439
440    // TODO: have the CMM use drawContexts and rm this friending
441    friend class GrClipMaskManager; // the CMM is friended just so it can call 'drawingManager'
442    friend class GrDrawingManager;  // for access to drawingManager for ProgramUnitTest
443    GrDrawingManager* drawingManager() { return fDrawingManager; }
444
445    GrContext(); // init must be called after the constructor.
446    bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
447
448    void initMockContext();
449    void initCommon(const GrContextOptions&);
450
451    /**
452     * These functions create premul <-> unpremul effects if it is possible to generate a pair
453     * of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
454     * return NULL. They also can perform a swizzle as part of the draw.
455     */
456    const GrFragmentProcessor* createPMToUPMEffect(GrTexture*, const GrSwizzle&,
457                                                   const SkMatrix&) const;
458    const GrFragmentProcessor* createUPMToPMEffect(GrTexture*, const GrSwizzle&,
459                                                   const SkMatrix&) const;
460    /** Called before either of the above two functions to determine the appropriate fragment
461        processors for conversions. This must be called by readSurfacePixels before a mutex is
462        taken, since testingvPM conversions itself will call readSurfacePixels */
463    void testPMConversionsIfNecessary(uint32_t flags);
464    /** Returns true if we've already determined that createPMtoUPMEffect and createUPMToPMEffect
465        will fail. In such cases fall back to SW conversion. */
466    bool didFailPMUPMConversionTest() const;
467
468    /**
469     *  This callback allows the resource cache to callback into the GrContext
470     *  when the cache is still over budget after a purge.
471     */
472    static void OverBudgetCB(void* data);
473
474    /**
475     * A callback similar to the above for use by the TextBlobCache
476     * TODO move textblob draw calls below context so we can use the call above.
477     */
478    static void TextBlobCacheOverBudgetCB(void* data);
479
480    typedef SkRefCnt INHERITED;
481};
482
483/**
484 * Can be used to perform actions related to the generating GrContext in a thread safe manner. The
485 * proxy does not access the 3D API (e.g. OpenGL) that backs the generating GrContext.
486 */
487class GrContextThreadSafeProxy : public SkRefCnt {
488private:
489    GrContextThreadSafeProxy(const GrCaps* caps, uint32_t uniqueID)
490        : fCaps(SkRef(caps))
491        , fContextUniqueID(uniqueID) {}
492
493    SkAutoTUnref<const GrCaps>  fCaps;
494    uint32_t                    fContextUniqueID;
495
496    friend class GrContext;
497    friend class SkImage;
498
499    typedef SkRefCnt INHERITED;
500};
501
502#endif
503