hwc_mdpcomp.h revision 07bbf1e89c031a5d41a7561433e832d396c311a5
1/*
2 * Copyright (C) 2012-2015, The Linux Foundation. All rights reserved.
3 *
4 * Not a Contribution, Apache license notifications and license are retained
5 * for attribution purposes only.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20#ifndef HWC_MDP_COMP
21#define HWC_MDP_COMP
22
23#include <hwc_utils.h>
24#include <idle_invalidator.h>
25#include <cutils/properties.h>
26#include <overlay.h>
27
28#define MAX_PIPES_PER_MIXER 4
29
30namespace overlay {
31class Rotator;
32};
33
34namespace qhwc {
35namespace ovutils = overlay::utils;
36
37class MDPComp {
38public:
39    explicit MDPComp(int);
40    virtual ~MDPComp(){};
41    /*sets up mdp comp for the current frame */
42    int prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list);
43    /* draw */
44    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list) = 0;
45    //Reset values
46    void reset();
47    /* dumpsys */
48    void dump(android::String8& buf, hwc_context_t *ctx);
49    bool isGLESOnlyComp() { return (mCurrentFrame.mdpCount == 0); }
50    bool isMDPComp() { return mModeOn; }
51    int drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list);
52    static MDPComp* getObject(hwc_context_t *ctx, const int& dpy);
53    /* Handler to invoke frame redraw on Idle Timer expiry */
54    static void timeout_handler(void *udata);
55    /* Initialize MDP comp*/
56    static bool init(hwc_context_t *ctx);
57    static void resetIdleFallBack() { sIdleFallBack = false; }
58    static bool isIdleFallback() { return sIdleFallBack; }
59    static void dynamicDebug(bool enable){ sDebugLogs = enable; }
60    static void setIdleTimeout(const uint32_t& timeout);
61    static int setPartialUpdatePref(hwc_context_t *ctx, bool enable);
62    void setDynRefreshRate(hwc_context_t *ctx, hwc_display_contents_1_t* list);
63    static int getPartialUpdatePref(hwc_context_t *ctx);
64    static void enablePartialUpdate(bool enable)
65                                          { sIsPartialUpdateActive = enable; };
66
67protected:
68    enum { MAX_SEC_LAYERS = 1 }; //TODO add property support
69
70    enum ePipeType {
71        MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB,
72        MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG,
73        MDPCOMP_OV_DMA = ovutils::OV_MDP_PIPE_DMA,
74        MDPCOMP_OV_ANY,
75    };
76
77    //Simulation flags
78    enum {
79        MDPCOMP_AVOID_FULL_MDP = 0x001,
80        MDPCOMP_AVOID_CACHE_MDP = 0x002,
81        MDPCOMP_AVOID_LOAD_MDP = 0x004,
82        MDPCOMP_AVOID_VIDEO_ONLY = 0x008,
83        MDPCOMP_AVOID_MDP_ONLY_LAYERS = 0x010,
84    };
85
86    /* mdp pipe data */
87    struct MdpPipeInfo {
88        int zOrder;
89        virtual ~MdpPipeInfo(){};
90    };
91
92    struct MdpYUVPipeInfo : public MdpPipeInfo{
93        ovutils::eDest lIndex;
94        ovutils::eDest rIndex;
95        virtual ~MdpYUVPipeInfo(){};
96    };
97
98    /* per layer data */
99    struct PipeLayerPair {
100        MdpPipeInfo *pipeInfo;
101        overlay::Rotator* rot;
102        int listIndex;
103    };
104
105    /* per frame data */
106    struct FrameInfo {
107        /* maps layer list to mdp list */
108        int layerCount;
109        int layerToMDP[MAX_NUM_APP_LAYERS];
110
111        /* maps mdp list to layer list */
112        int mdpCount;
113        struct PipeLayerPair mdpToLayer[MAX_PIPES_PER_MIXER];
114
115        /* layer composing on FB? */
116        int fbCount;
117        bool isFBComposed[MAX_NUM_APP_LAYERS];
118        /* layers lying outside ROI. Will
119         * be dropped off from the composition */
120        int dropCount;
121        bool drop[MAX_NUM_APP_LAYERS];
122
123        bool needsRedraw;
124        int fbZ;
125
126        /* c'tor */
127        FrameInfo();
128        /* clear old frame data */
129        void reset(const int& numLayers);
130        void map();
131    };
132
133    /* cached data */
134    struct LayerCache {
135        int layerCount;
136        bool isFBComposed[MAX_NUM_APP_LAYERS];
137        bool drop[MAX_NUM_APP_LAYERS];
138
139        /* c'tor */
140        LayerCache();
141        /* clear caching info*/
142        void reset();
143        void updateCounts(const FrameInfo&);
144        bool isSameFrame(const FrameInfo& curFrame,
145                         hwc_display_contents_1_t* list);
146        bool isSameFrame(hwc_context_t *ctx, int dpy,
147                                        hwc_display_contents_1_t* list);
148    };
149
150    /* allocates pipe from pipe book */
151    virtual bool allocLayerPipes(hwc_context_t *ctx,
152                                 hwc_display_contents_1_t* list) = 0;
153    /* configures MPD pipes */
154    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
155                          PipeLayerPair& pipeLayerPair) = 0;
156    /* Increments mdpCount if 4k2k yuv layer split is enabled.
157     * updates framebuffer z order if fb lies above source-split layer */
158    virtual void adjustForSourceSplit(hwc_context_t *ctx,
159            hwc_display_contents_1_t* list) = 0;
160    /* configures 4kx2k yuv layer*/
161    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
162            PipeLayerPair& PipeLayerPair) = 0;
163    /* generates ROI based on the modified area of the frame */
164    virtual void generateROI(hwc_context_t *ctx,
165            hwc_display_contents_1_t* list) = 0;
166    /* validates the ROI generated for fallback conditions */
167    virtual bool validateAndApplyROI(hwc_context_t *ctx,
168            hwc_display_contents_1_t* list) = 0;
169    /* Trims fbRect calculated against ROI generated */
170    virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) = 0;
171
172    /* set/reset flags for MDPComp */
173    void setMDPCompLayerFlags(hwc_context_t *ctx,
174                              hwc_display_contents_1_t* list);
175    void setRedraw(hwc_context_t *ctx,
176            hwc_display_contents_1_t* list);
177    /* checks for conditions where mdpcomp is not possible */
178    bool isFrameDoable(hwc_context_t *ctx);
179    /* checks for conditions where RGB layers cannot be bypassed */
180    bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list);
181    /* checks if full MDP comp can be done */
182    bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
183    /* Full MDP Composition with Peripheral Tiny Overlap Removal */
184    bool fullMDPCompWithPTOR(hwc_context_t *ctx,hwc_display_contents_1_t* list);
185    /* check if we can use layer cache to do at least partial MDP comp */
186    bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
187    /* Partial MDP comp that uses caching to save power as primary goal */
188    bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
189    /* Partial MDP comp that balances the load between MDP and GPU such that
190     * MDP is loaded to the max of its capacity. The lower z order layers are
191     * fed to MDP, whereas the upper ones to GPU, because the upper ones have
192     * lower number of pixels and can reduce GPU processing time */
193    bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
194    /* Checks if its worth doing load based partial comp */
195    bool isLoadBasedCompDoable(hwc_context_t *ctx);
196    /* checks for conditions where only video can be bypassed */
197    bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
198    bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
199            bool secureOnly);
200    /* checks for conditions where only secure RGB and video can be bypassed */
201    bool tryMDPOnlyLayers(hwc_context_t *ctx, hwc_display_contents_1_t* list);
202    bool mdpOnlyLayersComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
203            bool secureOnly);
204    /* checks for conditions where YUV layers cannot be bypassed */
205    bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
206    /* checks for conditions where Secure RGB layers cannot be bypassed */
207    bool isSecureRGBDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
208    /* checks if MDP/MDSS can process current list w.r.to HW limitations
209     * All peculiar HW limitations should go here */
210    bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
211    /* Is debug enabled */
212    static bool isDebug() { return sDebugLogs ? true : false; };
213    /* Is feature enabled */
214    static bool isEnabled() { return sEnabled; };
215    /* checks for mdp comp dimension limitation */
216    bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer);
217    /* tracks non updating layers*/
218    void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list,
219                          FrameInfo& frame);
220    /* optimize layers for mdp comp*/
221    bool markLayersForCaching(hwc_context_t* ctx,
222            hwc_display_contents_1_t* list);
223    int getBatch(hwc_display_contents_1_t* list,
224            int& maxBatchStart, int& maxBatchEnd,
225            int& maxBatchCount);
226    bool canPushBatchToTop(const hwc_display_contents_1_t* list,
227            int fromIndex, int toIndex);
228    bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
229            int fromIndex, int toIndex, int targetLayerIndex);
230
231    /* drop other non-AIV layers from external display list.*/
232    void dropNonAIVLayers(hwc_context_t* ctx, hwc_display_contents_1_t* list);
233
234        /* updates cache map with YUV info */
235    void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
236            bool secureOnly, FrameInfo& frame);
237    /* updates cache map with secure RGB info */
238    void updateSecureRGB(hwc_context_t* ctx,
239            hwc_display_contents_1_t* list);
240    /* Validates if the GPU/MDP layer split chosen by a strategy is supported
241     * by MDP.
242     * Sets up MDP comp data structures to reflect covnversion from layers to
243     * overlay pipes.
244     * Configures overlay.
245     * Configures if GPU should redraw.
246     */
247    bool postHeuristicsHandling(hwc_context_t *ctx,
248            hwc_display_contents_1_t* list);
249    void reset(hwc_context_t *ctx);
250    bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
251    bool resourceCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
252    hwc_rect_t getUpdatingFBRect(hwc_context_t *ctx,
253            hwc_display_contents_1_t* list);
254    /* checks for conditions to enable partial udpate */
255    bool canPartialUpdate(hwc_context_t *ctx, hwc_display_contents_1_t* list);
256
257    int mDpy;
258    static bool sEnabled;
259    static bool sEnableMixedMode;
260    static int sSimulationFlags;
261    static bool sDebugLogs;
262    static bool sIdleFallBack;
263    static int sMaxPipesPerMixer;
264    static bool sSrcSplitEnabled;
265    static IdleInvalidator *sIdleInvalidator;
266    struct FrameInfo mCurrentFrame;
267    struct LayerCache mCachedFrame;
268    static bool sIsPartialUpdateActive;
269    //Enable 4kx2k yuv layer split
270    static bool sEnableYUVsplit;
271    bool mModeOn; // if prepare happened
272    bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index);
273    bool mPrevModeOn; //if previous prepare happened
274    //Enable Partial Update for MDP3 targets
275    static bool enablePartialUpdateForMDP3;
276};
277
278class MDPCompNonSplit : public MDPComp {
279public:
280    explicit MDPCompNonSplit(int dpy):MDPComp(dpy){};
281    virtual ~MDPCompNonSplit(){};
282    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
283
284private:
285    struct MdpPipeInfoNonSplit : public MdpPipeInfo {
286        ovutils::eDest index;
287        virtual ~MdpPipeInfoNonSplit() {};
288    };
289
290    /* configure's overlay pipes for the frame */
291    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
292                          PipeLayerPair& pipeLayerPair);
293
294    /* allocates pipes to selected candidates */
295    virtual bool allocLayerPipes(hwc_context_t *ctx,
296                                 hwc_display_contents_1_t* list);
297
298    /* Increments mdpCount if 4k2k yuv layer split is enabled.
299     * updates framebuffer z order if fb lies above source-split layer */
300    virtual void adjustForSourceSplit(hwc_context_t *ctx,
301            hwc_display_contents_1_t* list);
302
303    /* configures 4kx2k yuv layer to 2 VG pipes*/
304    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
305            PipeLayerPair& PipeLayerPair);
306    /* generates ROI based on the modified area of the frame */
307    virtual void generateROI(hwc_context_t *ctx,
308            hwc_display_contents_1_t* list);
309    /* validates the ROI generated for fallback conditions */
310    virtual bool validateAndApplyROI(hwc_context_t *ctx,
311            hwc_display_contents_1_t* list);
312    /* Trims fbRect calculated against ROI generated */
313    virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect);
314};
315
316class MDPCompSplit : public MDPComp {
317public:
318    explicit MDPCompSplit(int dpy):MDPComp(dpy){};
319    virtual ~MDPCompSplit(){};
320    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
321
322protected:
323    struct MdpPipeInfoSplit : public MdpPipeInfo {
324        ovutils::eDest lIndex;
325        ovutils::eDest rIndex;
326        virtual ~MdpPipeInfoSplit() {};
327    };
328
329    virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
330                         MdpPipeInfoSplit& pipe_info);
331
332    /* configure's overlay pipes for the frame */
333    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
334                          PipeLayerPair& pipeLayerPair);
335
336    /* allocates pipes to selected candidates */
337    virtual bool allocLayerPipes(hwc_context_t *ctx,
338                                 hwc_display_contents_1_t* list);
339private:
340    /* Increments mdpCount if 4k2k yuv layer split is enabled.
341     * updates framebuffer z order if fb lies above source-split layer */
342    virtual void adjustForSourceSplit(hwc_context_t *ctx,
343            hwc_display_contents_1_t* list);
344
345    /* configures 4kx2k yuv layer*/
346    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
347            PipeLayerPair& PipeLayerPair);
348    /* generates ROI based on the modified area of the frame */
349    virtual void generateROI(hwc_context_t *ctx,
350            hwc_display_contents_1_t* list);
351    /* validates the ROI generated for fallback conditions */
352    virtual bool validateAndApplyROI(hwc_context_t *ctx,
353            hwc_display_contents_1_t* list);
354    /* Trims fbRect calculated against ROI generated */
355    virtual void trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect);
356};
357
358class MDPCompSrcSplit : public MDPCompSplit {
359public:
360    explicit MDPCompSrcSplit(int dpy) : MDPCompSplit(dpy){};
361    virtual ~MDPCompSrcSplit(){};
362private:
363    virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
364            MdpPipeInfoSplit& pipe_info);
365
366    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
367            PipeLayerPair& pipeLayerPair);
368};
369
370}; //namespace
371#endif
372