GrContext.h revision 24cdec1744892d499ce7ef59b81ea63b47193e24
1/*
2 * Copyright 2010 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8#ifndef GrContext_DEFINED
9#define GrContext_DEFINED
10
11#include "GrClip.h"
12#include "GrColor.h"
13#include "GrPaint.h"
14#include "GrPathRendererChain.h"
15#include "GrRenderTarget.h"
16#include "GrTextureProvider.h"
17#include "SkMatrix.h"
18#include "../private/SkMutex.h"
19#include "SkPathEffect.h"
20#include "SkTypes.h"
21
22struct GrBatchAtlasConfig;
23class GrBatchFontCache;
24class GrCaps;
25struct GrContextOptions;
26class GrDrawingManager;
27class GrDrawContext;
28class GrDrawTarget;
29class GrFragmentProcessor;
30class GrGpu;
31class GrIndexBuffer;
32class GrLayerCache;
33class GrOvalRenderer;
34class GrPath;
35class GrPathRenderer;
36class GrPipelineBuilder;
37class GrResourceEntry;
38class GrResourceCache;
39class GrResourceProvider;
40class GrTestTarget;
41class GrTextBlobCache;
42class GrTextContext;
43class GrTextureParams;
44class GrVertexBuffer;
45class GrStrokeInfo;
46class GrSoftwarePathRenderer;
47class SkTraceMemoryDump;
48
49class SK_API GrContext : public SkRefCnt {
50public:
51    /**
52     * Creates a GrContext for a backend context.
53     */
54    static GrContext* Create(GrBackend, GrBackendContext, const GrContextOptions& options);
55    static GrContext* Create(GrBackend, GrBackendContext);
56
57    /**
58     * Only defined in test apps.
59     */
60    static GrContext* CreateMockContext();
61
62    virtual ~GrContext();
63
64    /**
65     * The GrContext normally assumes that no outsider is setting state
66     * within the underlying 3D API's context/device/whatever. This call informs
67     * the context that the state was modified and it should resend. Shouldn't
68     * be called frequently for good performance.
69     * The flag bits, state, is dpendent on which backend is used by the
70     * context, either GL or D3D (possible in future).
71     */
72    void resetContext(uint32_t state = kAll_GrBackendState);
73
74    /**
75     * Callback function to allow classes to cleanup on GrContext destruction.
76     * The 'info' field is filled in with the 'info' passed to addCleanUp.
77     */
78    typedef void (*PFCleanUpFunc)(const GrContext* context, void* info);
79
80    /**
81     * Add a function to be called from within GrContext's destructor.
82     * This gives classes a chance to free resources held on a per context basis.
83     * The 'info' parameter will be stored and passed to the callback function.
84     */
85    void addCleanUp(PFCleanUpFunc cleanUp, void* info) {
86        CleanUpData* entry = fCleanUpData.push();
87
88        entry->fFunc = cleanUp;
89        entry->fInfo = info;
90    }
91
92    /**
93     * Abandons all GPU resources and assumes the underlying backend 3D API
94     * context is not longer usable. Call this if you have lost the associated
95     * GPU context, and thus internal texture, buffer, etc. references/IDs are
96     * now invalid. Should be called even when GrContext is no longer going to
97     * be used for two reasons:
98     *  1) ~GrContext will not try to free the objects in the 3D API.
99     *  2) Any GrGpuResources created by this GrContext that outlive
100     *     will be marked as invalid (GrGpuResource::wasDestroyed()) and
101     *     when they're destroyed no 3D API calls will be made.
102     * Content drawn since the last GrContext::flush() may be lost. After this
103     * function is called the only valid action on the GrContext or
104     * GrGpuResources it created is to destroy them.
105     */
106    void abandonContext();
107
108    ///////////////////////////////////////////////////////////////////////////
109    // Resource Cache
110
111    /**
112     *  Return the current GPU resource cache limits.
113     *
114     *  @param maxResources If non-null, returns maximum number of resources that
115     *                      can be held in the cache.
116     *  @param maxResourceBytes If non-null, returns maximum number of bytes of
117     *                          video memory that can be held in the cache.
118     */
119    void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const;
120
121    /**
122     *  Gets the current GPU resource cache usage.
123     *
124     *  @param resourceCount If non-null, returns the number of resources that are held in the
125     *                       cache.
126     *  @param maxResourceBytes If non-null, returns the total number of bytes of video memory held
127     *                          in the cache.
128     */
129    void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const;
130
131    /**
132     *  Specify the GPU resource cache limits. If the current cache exceeds either
133     *  of these, it will be purged (LRU) to keep the cache within these limits.
134     *
135     *  @param maxResources The maximum number of resources that can be held in
136     *                      the cache.
137     *  @param maxResourceBytes The maximum number of bytes of video memory
138     *                          that can be held in the cache.
139     */
140    void setResourceCacheLimits(int maxResources, size_t maxResourceBytes);
141
142    GrTextureProvider* textureProvider() { return fTextureProvider; }
143    const GrTextureProvider* textureProvider() const { return fTextureProvider; }
144
145    /**
146     * Frees GPU created by the context. Can be called to reduce GPU memory
147     * pressure.
148     */
149    void freeGpuResources();
150
151    /**
152     * Purge all the unlocked resources from the cache.
153     * This entry point is mainly meant for timing texture uploads
154     * and is not defined in normal builds of Skia.
155     */
156    void purgeAllUnlockedResources();
157
158    /** Access the context capabilities */
159    const GrCaps* caps() const { return fCaps; }
160
161    /**
162     * Returns the recommended sample count for a render target when using this
163     * context.
164     *
165     * @param  config the configuration of the render target.
166     * @param  dpi the display density in dots per inch.
167     *
168     * @return sample count that should be perform well and have good enough
169     *         rendering quality for the display. Alternatively returns 0 if
170     *         MSAA is not supported or recommended to be used by default.
171     */
172    int getRecommendedSampleCount(GrPixelConfig config, SkScalar dpi) const;
173
174    /**
175     * Returns a helper object to orchestrate draws.
176     * Callers assume the creation ref of the drawContext
177     * NULL will be returned if the context has been abandoned.
178     *
179     * @param  rt           the render target receiving the draws
180     * @param  surfaceProps the surface properties (mainly defines text drawing)
181     *
182     * @return a draw context
183     */
184    GrDrawContext* drawContext(GrRenderTarget* rt, const SkSurfaceProps* surfaceProps = NULL);
185
186    ///////////////////////////////////////////////////////////////////////////
187    // Misc.
188
189    /**
190     * Flags that affect flush() behavior.
191     */
192    enum FlushBits {
193        /**
194         * A client may reach a point where it has partially rendered a frame
195         * through a GrContext that it knows the user will never see. This flag
196         * causes the flush to skip submission of deferred content to the 3D API
197         * during the flush.
198         */
199        kDiscard_FlushBit                    = 0x2,
200    };
201
202    /**
203     * Call to ensure all drawing to the context has been issued to the
204     * underlying 3D API.
205     * @param flagsBitfield     flags that control the flushing behavior. See
206     *                          FlushBits.
207     */
208    void flush(int flagsBitfield = 0);
209
210    void flushIfNecessary() {
211        if (fFlushToReduceCacheSize) {
212            this->flush();
213        }
214    }
215
216   /**
217    * These flags can be used with the read/write pixels functions below.
218    */
219    enum PixelOpsFlags {
220        /** The GrContext will not be flushed before the surface read or write. This means that
221            the read or write may occur before previous draws have executed. */
222        kDontFlush_PixelOpsFlag = 0x1,
223        /** Any surface writes should be flushed to the backend 3D API after the surface operation
224            is complete */
225        kFlushWrites_PixelOp = 0x2,
226        /** The src for write or dst read is unpremultiplied. This is only respected if both the
227            config src and dst configs are an RGBA/BGRA 8888 format. */
228        kUnpremul_PixelOpsFlag  = 0x4,
229    };
230
231    /**
232     * Reads a rectangle of pixels from a surface.
233     * @param surface       the surface to read from.
234     * @param left          left edge of the rectangle to read (inclusive)
235     * @param top           top edge of the rectangle to read (inclusive)
236     * @param width         width of rectangle to read in pixels.
237     * @param height        height of rectangle to read in pixels.
238     * @param config        the pixel config of the destination buffer
239     * @param buffer        memory to read the rectangle into.
240     * @param rowBytes      number of bytes bewtween consecutive rows. Zero means rows are tightly
241     *                      packed.
242     * @param pixelOpsFlags see PixelOpsFlags enum above.
243     *
244     * @return true if the read succeeded, false if not. The read can fail because of an unsupported
245     *         pixel configs
246     */
247    bool readSurfacePixels(GrSurface* surface,
248                           int left, int top, int width, int height,
249                           GrPixelConfig config, void* buffer,
250                           size_t rowBytes = 0,
251                           uint32_t pixelOpsFlags = 0);
252
253    /**
254     * Writes a rectangle of pixels to a surface.
255     * @param surface       the surface to write to.
256     * @param left          left edge of the rectangle to write (inclusive)
257     * @param top           top edge of the rectangle to write (inclusive)
258     * @param width         width of rectangle to write in pixels.
259     * @param height        height of rectangle to write in pixels.
260     * @param config        the pixel config of the source buffer
261     * @param buffer        memory to read pixels from
262     * @param rowBytes      number of bytes between consecutive rows. Zero
263     *                      means rows are tightly packed.
264     * @param pixelOpsFlags see PixelOpsFlags enum above.
265     * @return true if the write succeeded, false if not. The write can fail because of an
266     *         unsupported combination of surface and src configs.
267     */
268    bool writeSurfacePixels(GrSurface* surface,
269                            int left, int top, int width, int height,
270                            GrPixelConfig config, const void* buffer,
271                            size_t rowBytes,
272                            uint32_t pixelOpsFlags = 0);
273
274    /**
275     * Copies a rectangle of texels from src to dst.
276     * bounds.
277     * @param dst           the surface to copy to.
278     * @param src           the surface to copy from.
279     * @param srcRect       the rectangle of the src that should be copied.
280     * @param dstPoint      the translation applied when writing the srcRect's pixels to the dst.
281     * @param pixelOpsFlags see PixelOpsFlags enum above. (kUnpremul_PixelOpsFlag is not allowed).
282     */
283    void copySurface(GrSurface* dst,
284                     GrSurface* src,
285                     const SkIRect& srcRect,
286                     const SkIPoint& dstPoint,
287                     uint32_t pixelOpsFlags = 0);
288
289    /** Helper that copies the whole surface but fails when the two surfaces are not identically
290        sized. */
291    bool copySurface(GrSurface* dst, GrSurface* src) {
292        if (NULL == dst || NULL == src || dst->width() != src->width() ||
293            dst->height() != src->height()) {
294            return false;
295        }
296        this->copySurface(dst, src, SkIRect::MakeWH(dst->width(), dst->height()),
297                          SkIPoint::Make(0,0));
298        return true;
299    }
300
301    /**
302     * After this returns any pending writes to the surface will have been issued to the backend 3D API.
303     */
304    void flushSurfaceWrites(GrSurface* surface);
305
306    /**
307     * Finalizes all pending reads and writes to the surface and also performs an MSAA resolve
308     * if necessary.
309     *
310     * It is not necessary to call this before reading the render target via Skia/GrContext.
311     * GrContext will detect when it must perform a resolve before reading pixels back from the
312     * surface or using it as a texture.
313     */
314    void prepareSurfaceForExternalIO(GrSurface*);
315
316    /**
317     * An ID associated with this context, guaranteed to be unique.
318     */
319    uint32_t uniqueID() { return fUniqueID; }
320
321    ///////////////////////////////////////////////////////////////////////////
322    // Functions intended for internal use only.
323    GrGpu* getGpu() { return fGpu; }
324    const GrGpu* getGpu() const { return fGpu; }
325    GrBatchFontCache* getBatchFontCache() { return fBatchFontCache; }
326    GrLayerCache* getLayerCache() { return fLayerCache.get(); }
327    GrTextBlobCache* getTextBlobCache() { return fTextBlobCache; }
328    bool abandoned() const;
329    GrResourceProvider* resourceProvider() { return fResourceProvider; }
330    const GrResourceProvider* resourceProvider() const { return fResourceProvider; }
331    GrResourceCache* getResourceCache() { return fResourceCache; }
332
333    // Called by tests that draw directly to the context via GrDrawTarget
334    void getTestTarget(GrTestTarget*);
335
336    GrPathRenderer* getPathRenderer(
337                    const GrPipelineBuilder*,
338                    const SkMatrix& viewMatrix,
339                    const SkPath& path,
340                    const GrStrokeInfo& stroke,
341                    bool allowSW,
342                    GrPathRendererChain::DrawType drawType = GrPathRendererChain::kColor_DrawType,
343                    GrPathRendererChain::StencilSupport* stencilSupport = NULL);
344
345    /** Prints cache stats to the string if GR_CACHE_STATS == 1. */
346    void dumpCacheStats(SkString*) const;
347    void printCacheStats() const;
348
349    /** Prints GPU stats to the string if GR_GPU_STATS == 1. */
350    void dumpGpuStats(SkString*) const;
351    void printGpuStats() const;
352
353    /** Specify the TextBlob cache limit. If the current cache exceeds this limit it will purge.
354        this is for testing only */
355    void setTextBlobCacheLimit_ForTesting(size_t bytes);
356
357    /** Specify the sizes of the GrAtlasTextContext atlases.  The configs pointer below should be
358        to an array of 3 entries */
359    void setTextContextAtlasSizes_ForTesting(const GrBatchAtlasConfig* configs);
360
361    /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */
362    void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const;
363
364private:
365    GrGpu*                          fGpu;
366    const GrCaps*                   fCaps;
367    GrResourceCache*                fResourceCache;
368    // this union exists because the inheritance of GrTextureProvider->GrResourceProvider
369    // is in a private header.
370    union {
371        GrResourceProvider*         fResourceProvider;
372        GrTextureProvider*          fTextureProvider;
373    };
374
375    GrBatchFontCache*               fBatchFontCache;
376    SkAutoTDelete<GrLayerCache>     fLayerCache;
377    SkAutoTDelete<GrTextBlobCache>  fTextBlobCache;
378
379    GrPathRendererChain*            fPathRendererChain;
380    GrSoftwarePathRenderer*         fSoftwarePathRenderer;
381
382    // Set by OverbudgetCB() to request that GrContext flush before exiting a draw.
383    bool                            fFlushToReduceCacheSize;
384    bool                            fDidTestPMConversions;
385    int                             fPMToUPMConversion;
386    int                             fUPMToPMConversion;
387    // The sw backend may call GrContext::readSurfacePixels on multiple threads
388    // We may transfer the responsibilty for using a mutex to the sw backend
389    // when there are fewer code paths that lead to a readSurfacePixels call
390    // from the sw backend. readSurfacePixels is reentrant in one case - when performing
391    // the PM conversions test. To handle this we do the PM conversions test outside
392    // of fReadPixelsMutex and use a separate mutex to guard it. When it re-enters
393    // readSurfacePixels it will grab fReadPixelsMutex and release it before the outer
394    // readSurfacePixels proceeds to grab it.
395    // TODO: Stop pretending to make GrContext thread-safe for sw rasterization and provide
396    // a mechanism to make a SkPicture safe for multithreaded sw rasterization.
397    SkMutex                         fReadPixelsMutex;
398    SkMutex                         fTestPMConversionsMutex;
399
400    struct CleanUpData {
401        PFCleanUpFunc fFunc;
402        void*         fInfo;
403    };
404
405    SkTDArray<CleanUpData>          fCleanUpData;
406
407    const uint32_t                  fUniqueID;
408
409    SkAutoTDelete<GrDrawingManager> fDrawingManager;
410
411    GrContext(); // init must be called after the constructor.
412    bool init(GrBackend, GrBackendContext, const GrContextOptions& options);
413
414    void initMockContext();
415    void initCommon(const GrContextOptions& options);
416
417    /**
418     * These functions create premul <-> unpremul effects if it is possible to generate a pair
419     * of effects that make a readToUPM->writeToPM->readToUPM cycle invariant. Otherwise, they
420     * return NULL.
421     */
422    const GrFragmentProcessor* createPMToUPMEffect(GrTexture*, bool swapRAndB,
423                                                   const SkMatrix&) const;
424    const GrFragmentProcessor* createUPMToPMEffect(GrTexture*, bool swapRAndB,
425                                                   const SkMatrix&) const;
426    /** Called before either of the above two functions to determine the appropriate fragment
427        processors for conversions. This must be called by readSurfacePixels before a mutex is
428        taken, since testingvPM conversions itself will call readSurfacePixels */
429    void testPMConversionsIfNecessary(uint32_t flags);
430    /** Returns true if we've already determined that createPMtoUPMEffect and createUPMToPMEffect
431        will fail. In such cases fall back to SW conversion. */
432    bool didFailPMUPMConversionTest() const;
433
434    /**
435     *  This callback allows the resource cache to callback into the GrContext
436     *  when the cache is still over budget after a purge.
437     */
438    static void OverBudgetCB(void* data);
439
440    /**
441     * A callback similar to the above for use by the TextBlobCache
442     * TODO move textblob draw calls below context so we can use the call above.
443     */
444    static void TextBlobCacheOverBudgetCB(void* data);
445
446    typedef SkRefCnt INHERITED;
447};
448
449#endif
450