GrContext.h revision 2d70dcbe5cf4b1d26bb03070d4f8cffd756dd509
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrContext_DEFINED
9#define GrContext_DEFINED
10
11#include "GrClip.h"
12#include "GrColor.h"
13#include "GrPaint.h"
14#include "GrPathRendererChain.h"
15#include "GrRenderTarget.h"
16#include "GrTextureProvider.h"
17#include "SkMatrix.h"
18#include "../private/SkMutex.h"
19#include "SkPathEffect.h"
20#include "SkTypes.h"
21
22struct GrBatchAtlasConfig;
23class GrBatchFontCache;
24class GrCaps;
25struct GrContextOptions;
26class GrDrawContext;
27class GrDrawTarget;
28class GrFragmentProcessor;
29class GrGpu;
30class GrIndexBuffer;
31class GrLayerCache;
32class GrOvalRenderer;
33class GrPath;
34class GrPathRenderer;
35class GrPipelineBuilder;
36class GrResourceEntry;
37class GrResourceCache;
38class GrResourceProvider;
39class GrTestTarget;
40class GrTextBlobCache;
41class GrTextContext;
42class GrTextureParams;
43class GrVertexBuffer;
44class GrStrokeInfo;
45class GrSoftwarePathRenderer;
46class SkTraceMemoryDump;
47
48class SK_API GrContext : public SkRefCnt {
49public:
50    /**
51     * Creates a GrContext for a backend context.
52     */
53    static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
54    static GrContext* Create(GrBackend, GrBackendContext);
55
56    /**
57     * Only defined in test apps.
58     */
59    static GrContext* CreateMockContext();
60
61    virtual ~GrContext();
62
63    /**
64     * The GrContext normally assumes that no outsider is setting state
65     * within the underlying 3D API's context/device/whatever. This call informs
66     * the context that the state was modified and it should resend. Shouldn't
67     * be called frequently for good performance.
68     * The flag bits, state, is dpendent on which backend is used by the
69     * context, either GL or D3D (possible in future).
70     */
71    void resetContext(uint32_t state = kAll_GrBackendState);
72
73    /**
74     * Callback function to allow classes to cleanup on GrContext destruction.
75     * The 'info' field is filled in with the 'info' passed to addCleanUp.
76     */
77    typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
78
79    /**
80     * Add a function to be called from within GrContext's destructor.
81     * This gives classes a chance to free resources held on a per context basis.
82     * The 'info' parameter will be stored and passed to the callback function.
83     */
84    void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
85        CleanUpData* entry = fCleanUpData.push();
86
87        entry->fFunc = cleanUp;
88        entry->fInfo = info;
89    }
90
91    /**
92     * Abandons all GPU resources and assumes the underlying backend 3D API
93     * context is not longer usable. Call this if you have lost the associated
94     * GPU context, and thus internal texture, buffer, etc. references/IDs are
95     * now invalid. Should be called even when GrContext is no longer going to
96     * be used for two reasons:
97     *  1) ~GrContext will not try to free the objects in the 3D API.
98     *  2) Any GrGpuResources created by this GrContext that outlive
99     *     will be marked as invalid (GrGpuResource::wasDestroyed()) and
100     *     when they're destroyed no 3D API calls will be made.
101     * Content drawn since the last GrContext::flush() may be lost. After this
102     * function is called the only valid action on the GrContext or
103     * GrGpuResources it created is to destroy them.
104     */
105    void abandonContext();
106
107    ///////////////////////////////////////////////////////////////////////////
108    // Resource Cache
109
110    /**
111     *  Return the current GPU resource cache limits.
112     *
113     *  @param maxResources If non-null, returns maximum number of resources that
114     *                      can be held in the cache.
115     *  @param maxResourceBytes If non-null, returns maximum number of bytes of
116     *                          video memory that can be held in the cache.
117     */
118    void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
119
120    /**
121     *  Gets the current GPU resource cache usage.
122     *
123     *  @param resourceCount If non-null, returns the number of resources that are held in the
124     *                       cache.
125     *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
126     *                          in the cache.
127     */
128    void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
129
130    /**
131     *  Specify the GPU resource cache limits. If the current cache exceeds either
132     *  of these, it will be purged (LRU) to keep the cache within these limits.
133     *
134     *  @param maxResources The maximum number of resources that can be held in
135     *                      the cache.
136     *  @param maxResourceBytes The maximum number of bytes of video memory
137     *                          that can be held in the cache.
138     */
139    void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
140
141    GrTextureProvider* textureProvider() { return fTextureProvider; }
142    const GrTextureProvider* textureProvider() const { return fTextureProvider; }
143
144    /**
145     * Frees GPU created by the context. Can be called to reduce GPU memory
146     * pressure.
147     */
148    void freeGpuResources();
149
150    /**
151     * Purge all the unlocked resources from the cache.
152     * This entry point is mainly meant for timing texture uploads
153     * and is not defined in normal builds of Skia.
154     */
155    void purgeAllUnlockedResources();
156
157    /** Access the context capabilities */
158    const GrCaps* caps() const { return fCaps; }
159
160    /**
161     * Returns the recommended sample count for a render target when using this
162     * context.
163     *
164     * @param  config the configuration of the render target.
165     * @param  dpi the display density in dots per inch.
166     *
167     * @return sample count that should be perform well and have good enough
168     *         rendering quality for the display. Alternatively returns 0 if
169     *         MSAA is not supported or recommended to be used by default.
170     */
171    int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
172
173    /**
174     * Returns a helper object to orchestrate draws.
175     * Callers should take a ref if they rely on the GrDrawContext sticking around.
176     * NULL will be returned if the context has been abandoned.
177     *
178     * @param  surfaceProps the surface properties (mainly defines text drawing)
179     *
180     * @return a draw context
181     */
182    GrDrawContext* drawContext(const SkSurfaceProps* surfaceProps = NULL) {
183        return fDrawingMgr.drawContext(surfaceProps);
184    }
185
186    GrTextContext* textContext(const SkSurfaceProps& surfaceProps, GrRenderTarget* rt) {
187        return fDrawingMgr.textContext(surfaceProps, rt);
188    }
189
190    ///////////////////////////////////////////////////////////////////////////
191    // Misc.
192
193    /**
194     * Flags that affect flush() behavior.
195     */
196    enum FlushBits {
197        /**
198         * A client may reach a point where it has partially rendered a frame
199         * through a GrContext that it knows the user will never see. This flag
200         * causes the flush to skip submission of deferred content to the 3D API
201         * during the flush.
202         */
203        kDiscard_FlushBit                    = 0x2,
204    };
205
206    /**
207     * Call to ensure all drawing to the context has been issued to the
208     * underlying 3D API.
209     * @param flagsBitfield     flags that control the flushing behavior. See
210     *                          FlushBits.
211     */
212    void flush(int flagsBitfield = 0);
213
214    void flushIfNecessary() {
215        if (fFlushToReduceCacheSize) {
216            this->flush();
217        }
218    }
219
220   /**
221    * These flags can be used with the read/write pixels functions below.
222    */
223    enum PixelOpsFlags {
224        /** The GrContext will not be flushed before the surface read or write. This means that
225            the read or write may occur before previous draws have executed. */
226        kDontFlush_PixelOpsFlag = 0x1,
227        /** Any surface writes should be flushed to the backend 3D API after the surface operation
228            is complete */
229        kFlushWrites_PixelOp = 0x2,
230        /** The src for write or dst read is unpremultiplied. This is only respected if both the
231            config src and dst configs are an RGBA/BGRA 8888 format. */
232        kUnpremul_PixelOpsFlag  = 0x4,
233    };
234
235    /**
236     * Reads a rectangle of pixels from a surface.
237     * @param surface       the surface to read from.
238     * @param left          left edge of the rectangle to read (inclusive)
239     * @param top           top edge of the rectangle to read (inclusive)
240     * @param width         width of rectangle to read in pixels.
241     * @param height        height of rectangle to read in pixels.
242     * @param config        the pixel config of the destination buffer
243     * @param buffer        memory to read the rectangle into.
244     * @param rowBytes      number of bytes bewtween consecutive rows. Zero means rows are tightly
245     *                      packed.
246     * @param pixelOpsFlags see PixelOpsFlags enum above.
247     *
248     * @return true if the read succeeded, false if not. The read can fail because of an unsupported
249     *         pixel configs
250     */
251    bool readSurfacePixels(GrSurface* surface,
252                           int left, int top, int width, int height,
253                           GrPixelConfig config, void* buffer,
254                           size_t rowBytes = 0,
255                           uint32_t pixelOpsFlags = 0);
256
257    /**
258     * Writes a rectangle of pixels to a surface.
259     * @param surface       the surface to write to.
260     * @param left          left edge of the rectangle to write (inclusive)
261     * @param top           top edge of the rectangle to write (inclusive)
262     * @param width         width of rectangle to write in pixels.
263     * @param height        height of rectangle to write in pixels.
264     * @param config        the pixel config of the source buffer
265     * @param buffer        memory to read pixels from
266     * @param rowBytes      number of bytes between consecutive rows. Zero
267     *                      means rows are tightly packed.
268     * @param pixelOpsFlags see PixelOpsFlags enum above.
269     * @return true if the write succeeded, false if not. The write can fail because of an
270     *         unsupported combination of surface and src configs.
271     */
272    bool writeSurfacePixels(GrSurface* surface,
273                            int left, int top, int width, int height,
274                            GrPixelConfig config, const void* buffer,
275                            size_t rowBytes,
276                            uint32_t pixelOpsFlags = 0);
277
278    /**
279     * Copies a rectangle of texels from src to dst.
280     * bounds.
281     * @param dst           the surface to copy to.
282     * @param src           the surface to copy from.
283     * @param srcRect       the rectangle of the src that should be copied.
284     * @param dstPoint      the translation applied when writing the srcRect's pixels to the dst.
285     * @param pixelOpsFlags see PixelOpsFlags enum above. (kUnpremul_PixelOpsFlag is not allowed).
286     */
287    void copySurface(GrSurface* dst,
288                     GrSurface* src,
289                     const SkIRect& srcRect,
290                     const SkIPoint& dstPoint,
291                     uint32_t pixelOpsFlags = 0);
292
293    /** Helper that copies the whole surface but fails when the two surfaces are not identically
294        sized. */
295    bool copySurface(GrSurface* dst, GrSurface* src) {
296        if (NULL == dst || NULL == src || dst->width() != src->width() ||
297            dst->height() != src->height()) {
298            return false;
299        }
300        this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
301                          SkIPoint::Make(0,0));
302        return true;
303    }
304
305    /**
306     * After this returns any pending writes to the surface will have been issued to the backend 3D API.
307     */
308    void flushSurfaceWrites(GrSurface* surface);
309
310    /**
311     * Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
312     * if necessary.
313     *
314     * It is not necessary to call this before reading the render target via Skia/GrContext.
315     * GrContext will detect when it must perform a resolve before reading pixels back from the
316     * surface or using it as a texture.
317     */
318    void prepareSurfaceForExternalIO(GrSurface*);
319
320    /**
321     * An ID associated with this context, guaranteed to be unique.
322     */
323    uint32_t uniqueID() { return fUniqueID; }
324
325    ///////////////////////////////////////////////////////////////////////////
326    // Functions intended for internal use only.
327    GrGpu* getGpu() { return fGpu; }
328    const GrGpu* getGpu() const { return fGpu; }
329    GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
330    GrLayerCache* getLayerCache() { return fLayerCache.get(); }
331    GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
332    bool abandoned() const { return fDrawingMgr.abandoned(); }
333    GrResourceProvider* resourceProvider() { return fResourceProvider; }
334    const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
335    GrResourceCache* getResourceCache() { return fResourceCache; }
336
337    // Called by tests that draw directly to the context via GrDrawTarget
338    void getTestTarget(GrTestTarget*);
339
340    GrPathRenderer* getPathRenderer(
341                    const GrDrawTarget* target,
342                    const GrPipelineBuilder*,
343                    const SkMatrix& viewMatrix,
344                    const SkPath& path,
345                    const GrStrokeInfo& stroke,
346                    bool allowSW,
347                    GrPathRendererChain::DrawType drawType = GrPathRendererChain::kColor_DrawType,
348                    GrPathRendererChain::StencilSupport* stencilSupport = NULL);
349
350    /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
351    void dumpCacheStats(SkString*) const;
352    void printCacheStats() const;
353
354    /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
355    void dumpGpuStats(SkString*) const;
356    void printGpuStats() const;
357
358    /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
359        this is for testing only */
360    void setTextBlobCacheLimit_ForTesting(size_t bytes);
361
362    /** Specify the sizes of the GrAtlasTextContext atlases.  The configs pointer below should be
363        to an array of 3 entries */
364    void setTextContextAtlasSizes_ForTesting(const GrBatchAtlasConfig* configs);
365
366    /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
367    void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
368
369private:
370    GrGpu*                          fGpu;
371    const GrCaps*                   fCaps;
372    GrResourceCache*                fResourceCache;
373    // this union exists because the inheritance of GrTextureProvider->GrResourceProvider
374    // is in a private header.
375    union {
376        GrResourceProvider*         fResourceProvider;
377        GrTextureProvider*          fTextureProvider;
378    };
379
380    GrBatchFontCache*               fBatchFontCache;
381    SkAutoTDelete<GrLayerCache>     fLayerCache;
382    SkAutoTDelete<GrTextBlobCache>  fTextBlobCache;
383
384    GrPathRendererChain*            fPathRendererChain;
385    GrSoftwarePathRenderer*         fSoftwarePathRenderer;
386
387    // Set by OverbudgetCB() to request that GrContext flush before exiting a draw.
388    bool                            fFlushToReduceCacheSize;
389    bool                            fDidTestPMConversions;
390    int                             fPMToUPMConversion;
391    int                             fUPMToPMConversion;
392    // The sw backend may call GrContext::readSurfacePixels on multiple threads
393    // We may transfer the responsibilty for using a mutex to the sw backend
394    // when there are fewer code paths that lead to a readSurfacePixels call
395    // from the sw backend. readSurfacePixels is reentrant in one case - when performing
396    // the PM conversions test. To handle this we do the PM conversions test outside
397    // of fReadPixelsMutex and use a separate mutex to guard it. When it re-enters
398    // readSurfacePixels it will grab fReadPixelsMutex and release it before the outer
399    // readSurfacePixels proceeds to grab it.
400    // TODO: Stop pretending to make GrContext thread-safe for sw rasterization and provide
401    // a mechanism to make a SkPicture safe for multithreaded sw rasterization.
402    SkMutex                         fReadPixelsMutex;
403    SkMutex                         fTestPMConversionsMutex;
404
405    struct CleanUpData {
406        PFCleanUpFunc fFunc;
407        void*         fInfo;
408    };
409
410    SkTDArray<CleanUpData>          fCleanUpData;
411
412    const uint32_t                  fUniqueID;
413
414    GrContext(); // init must be called after the constructor.
415    bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
416
417    // Currently the DrawingMgr creates a separate GrTextContext for each
418    // combination of text drawing options (pixel geometry x DFT use)
419    // and hands the appropriate one back given the DrawContext's request.
420    //
421    // It allocates a new GrDrawContext for each GrRenderTarget
422    // but all of them still land in the same GrDrawTarget!
423    //
424    // In the future this class will allocate a new GrDrawContext for
425    // each GrRenderTarget/GrDrawTarget and manage the DAG.
426    class DrawingMgr {
427    public:
428        DrawingMgr() : fDrawTarget(nullptr), fNVPRTextContext(nullptr) {
429            sk_bzero(fTextContexts, sizeof(fTextContexts));
430        }
431
432        ~DrawingMgr();
433
434        void init(GrContext* context);
435
436        void abandon();
437        bool abandoned() const { return NULL == fDrawTarget; }
438
439        void reset();
440        void flush();
441
442        // Callers assume the creation ref of the drawContext!
443        // NULL will be returned if the context has been abandoned.
444        GrDrawContext* drawContext(const SkSurfaceProps* surfaceProps);
445
446        GrTextContext* textContext(const SkSurfaceProps& props, GrRenderTarget* rt);
447
448    private:
449        void cleanup();
450
451        friend class GrContext;  // for access to fDrawTarget for testing
452
453        static const int kNumPixelGeometries = 5; // The different pixel geometries
454        static const int kNumDFTOptions = 2;      // DFT or no DFT
455
456        GrContext*        fContext;
457        GrDrawTarget*     fDrawTarget;
458
459        GrTextContext*    fNVPRTextContext;
460        GrTextContext*    fTextContexts[kNumPixelGeometries][kNumDFTOptions];
461    };
462
463    DrawingMgr                      fDrawingMgr;
464
465    void initMockContext();
466    void initCommon();
467
468    /**
469     * These functions create premul <-> unpremul effects if it is possible to generate a pair
470     * of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
471     * return NULL.
472     */
473    const GrFragmentProcessor* createPMToUPMEffect(GrProcessorDataManager*, GrTexture*,
474                                                   bool swapRAndB, const SkMatrix&) const;
475    const GrFragmentProcessor* createUPMToPMEffect(GrProcessorDataManager*, GrTexture*,
476                                                   bool swapRAndB, const SkMatrix&) const;
477    /** Called before either of the above two functions to determine the appropriate fragment
478        processors for conversions. This must be called by readSurfacePixels befor a mutex is taken,
479        since testingvPM conversions itself will call readSurfacePixels */
480    void testPMConversionsIfNecessary(uint32_t flags);
481    /** Returns true if we've already determined that createPMtoUPMEffect and createUPMToPMEffect
482        will fail. In such cases fall back to SW conversion. */
483    bool didFailPMUPMConversionTest() const;
484
485    /**
486     *  This callback allows the resource cache to callback into the GrContext
487     *  when the cache is still over budget after a purge.
488     */
489    static void OverBudgetCB(void* data);
490
491    /**
492     * A callback similar to the above for use by the TextBlobCache
493     * TODO move textblob draw calls below context so we can use the call above.
494     */
495    static void TextBlobCacheOverBudgetCB(void* data);
496
497    typedef SkRefCnt INHERITED;
498};
499
500#endif
501