1/*
2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved.
3 *
4 * Not a Contribution, Apache license notifications and license are retained
5 * for attribution purposes only.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 *      http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 */
19
20#ifndef HWC_MDP_COMP
21#define HWC_MDP_COMP
22
23#include <hwc_utils.h>
24#include <idle_invalidator.h>
25#include <cutils/properties.h>
26#include <overlay.h>
27
28#define DEFAULT_IDLE_TIME 70
29#define MAX_PIPES_PER_MIXER 4
30
31namespace overlay {
32class Rotator;
33};
34
35namespace qhwc {
36namespace ovutils = overlay::utils;
37
38class MDPComp {
39public:
40    explicit MDPComp(int);
41    virtual ~MDPComp(){};
42    /*sets up mdp comp for the current frame */
43    int prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list);
44    /* draw */
45    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list) = 0;
46    /* dumpsys */
47    void dump(android::String8& buf);
48    bool isGLESOnlyComp() { return (mCurrentFrame.mdpCount == 0); }
49    static MDPComp* getObject(hwc_context_t *ctx, const int& dpy);
50    /* Handler to invoke frame redraw on Idle Timer expiry */
51    static void timeout_handler(void *udata);
52    /* Initialize MDP comp*/
53    static bool init(hwc_context_t *ctx);
54    static void resetIdleFallBack() { sIdleFallBack = false; }
55    static void reset() { sHandleTimeout = false; };
56    static bool isIdleFallback() { return sIdleFallBack; }
57
58protected:
59    enum { MAX_SEC_LAYERS = 1 }; //TODO add property support
60
61    enum ePipeType {
62        MDPCOMP_OV_RGB = ovutils::OV_MDP_PIPE_RGB,
63        MDPCOMP_OV_VG = ovutils::OV_MDP_PIPE_VG,
64        MDPCOMP_OV_DMA = ovutils::OV_MDP_PIPE_DMA,
65        MDPCOMP_OV_ANY,
66    };
67
68    /* mdp pipe data */
69    struct MdpPipeInfo {
70        int zOrder;
71        virtual ~MdpPipeInfo(){};
72    };
73
74    struct MdpYUVPipeInfo : public MdpPipeInfo{
75        ovutils::eDest lIndex;
76        ovutils::eDest rIndex;
77        virtual ~MdpYUVPipeInfo(){};
78    };
79
80    /* per layer data */
81    struct PipeLayerPair {
82        MdpPipeInfo *pipeInfo;
83        overlay::Rotator* rot;
84        int listIndex;
85    };
86
87    /* per frame data */
88    struct FrameInfo {
89        /* maps layer list to mdp list */
90        int layerCount;
91        int layerToMDP[MAX_NUM_APP_LAYERS];
92
93        /* maps mdp list to layer list */
94        int mdpCount;
95        struct PipeLayerPair mdpToLayer[MAX_PIPES_PER_MIXER];
96
97        /* layer composing on FB? */
98        int fbCount;
99        bool isFBComposed[MAX_NUM_APP_LAYERS];
100        /* layers lying outside ROI. Will
101         * be dropped off from the composition */
102        int dropCount;
103        bool drop[MAX_NUM_APP_LAYERS];
104
105        bool needsRedraw;
106        int fbZ;
107
108        /* c'tor */
109        FrameInfo();
110        /* clear old frame data */
111        void reset(const int& numLayers);
112        void map();
113    };
114
115    /* cached data */
116    struct LayerCache {
117        int layerCount;
118        buffer_handle_t hnd[MAX_NUM_APP_LAYERS];
119        bool isFBComposed[MAX_NUM_APP_LAYERS];
120        bool drop[MAX_NUM_APP_LAYERS];
121
122        /* c'tor */
123        LayerCache();
124        /* clear caching info*/
125        void reset();
126        void cacheAll(hwc_display_contents_1_t* list);
127        void updateCounts(const FrameInfo&);
128        bool isSameFrame(const FrameInfo& curFrame,
129                         hwc_display_contents_1_t* list);
130    };
131
132    /* allocates pipe from pipe book */
133    virtual bool allocLayerPipes(hwc_context_t *ctx,
134                                 hwc_display_contents_1_t* list) = 0;
135    /* allocate MDP pipes from overlay */
136    ovutils::eDest getMdpPipe(hwc_context_t *ctx, ePipeType type, int mixer);
137    /* configures MPD pipes */
138    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
139                          PipeLayerPair& pipeLayerPair) = 0;
140    /* Increments mdpCount if 4k2k yuv layer split is enabled.
141     * updates framebuffer z order if fb lies above source-split layer */
142    virtual void adjustForSourceSplit(hwc_context_t *ctx,
143            hwc_display_contents_1_t* list) = 0;
144    /* configures 4kx2k yuv layer*/
145    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
146            PipeLayerPair& PipeLayerPair) = 0;
147    /* set/reset flags for MDPComp */
148    void setMDPCompLayerFlags(hwc_context_t *ctx,
149                              hwc_display_contents_1_t* list);
150    void setRedraw(hwc_context_t *ctx,
151            hwc_display_contents_1_t* list);
152    /* checks for conditions where mdpcomp is not possible */
153    bool isFrameDoable(hwc_context_t *ctx);
154    /* checks for conditions where RGB layers cannot be bypassed */
155    bool tryFullFrame(hwc_context_t *ctx, hwc_display_contents_1_t* list);
156    /* checks if full MDP comp can be done */
157    bool fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
158    /* check if we can use layer cache to do at least partial MDP comp */
159    bool partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
160    /* Partial MDP comp that uses caching to save power as primary goal */
161    bool cacheBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
162    /* Partial MDP comp that balances the load between MDP and GPU such that
163     * MDP is loaded to the max of its capacity. The lower z order layers are
164     * fed to MDP, whereas the upper ones to GPU, because the upper ones have
165     * lower number of pixels and can reduce GPU processing time */
166    bool loadBasedComp(hwc_context_t *ctx, hwc_display_contents_1_t* list);
167    /* Checks if its worth doing load based partial comp */
168    bool isLoadBasedCompDoable(hwc_context_t *ctx);
169    /* checks for conditions where only video can be bypassed */
170    bool tryVideoOnly(hwc_context_t *ctx, hwc_display_contents_1_t* list);
171    bool videoOnlyComp(hwc_context_t *ctx, hwc_display_contents_1_t* list,
172            bool secureOnly);
173    /* checks for conditions where YUV layers cannot be bypassed */
174    bool isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer);
175    /* checks if MDP/MDSS can process current list w.r.to HW limitations
176     * All peculiar HW limitations should go here */
177    bool hwLimitationsCheck(hwc_context_t* ctx, hwc_display_contents_1_t* list);
178    /* generates ROI based on the modified area of the frame */
179    void generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list);
180    bool validateAndApplyROI(hwc_context_t *ctx, hwc_display_contents_1_t* list,
181                             hwc_rect_t roi);
182
183    /* Is debug enabled */
184    static bool isDebug() { return sDebugLogs ? true : false; };
185    /* Is feature enabled */
186    static bool isEnabled() { return sEnabled; };
187    /* checks for mdp comp dimension limitation */
188    bool isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer);
189    /* tracks non updating layers*/
190    void updateLayerCache(hwc_context_t* ctx, hwc_display_contents_1_t* list);
191    /* optimize layers for mdp comp*/
192    bool markLayersForCaching(hwc_context_t* ctx,
193            hwc_display_contents_1_t* list);
194    int getBatch(hwc_display_contents_1_t* list,
195            int& maxBatchStart, int& maxBatchEnd,
196            int& maxBatchCount);
197    bool canPushBatchToTop(const hwc_display_contents_1_t* list,
198            int fromIndex, int toIndex);
199    bool intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
200            int fromIndex, int toIndex, int targetLayerIndex);
201
202        /* updates cache map with YUV info */
203    void updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
204            bool secureOnly);
205    /* Validates if the GPU/MDP layer split chosen by a strategy is supported
206     * by MDP.
207     * Sets up MDP comp data structures to reflect covnversion from layers to
208     * overlay pipes.
209     * Configures overlay.
210     * Configures if GPU should redraw.
211     */
212    bool postHeuristicsHandling(hwc_context_t *ctx,
213            hwc_display_contents_1_t* list);
214    void reset(hwc_context_t *ctx);
215    bool isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer);
216    bool resourceCheck(hwc_context_t *ctx, hwc_display_contents_1_t *list);
217    hwc_rect_t getUpdatingFBRect(hwc_context_t *ctx,
218            hwc_display_contents_1_t* list);
219
220    int mDpy;
221    static bool sEnabled;
222    static bool sEnableMixedMode;
223    /* Enables Partial frame composition */
224    static bool sEnablePartialFrameUpdate;
225    static bool sDebugLogs;
226    static bool sIdleFallBack;
227    /* Handles the timeout event from kernel, if the value is set to true */
228    static bool sHandleTimeout;
229    static int sMaxPipesPerMixer;
230    static bool sSrcSplitEnabled;
231    static IdleInvalidator *idleInvalidator;
232    struct FrameInfo mCurrentFrame;
233    struct LayerCache mCachedFrame;
234    //Enable 4kx2k yuv layer split
235    static bool sEnable4k2kYUVSplit;
236    bool allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index);
237};
238
239class MDPCompNonSplit : public MDPComp {
240public:
241    explicit MDPCompNonSplit(int dpy):MDPComp(dpy){};
242    virtual ~MDPCompNonSplit(){};
243    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
244
245private:
246    struct MdpPipeInfoNonSplit : public MdpPipeInfo {
247        ovutils::eDest index;
248        virtual ~MdpPipeInfoNonSplit() {};
249    };
250
251    /* configure's overlay pipes for the frame */
252    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
253                          PipeLayerPair& pipeLayerPair);
254
255    /* allocates pipes to selected candidates */
256    virtual bool allocLayerPipes(hwc_context_t *ctx,
257                                 hwc_display_contents_1_t* list);
258
259    /* Increments mdpCount if 4k2k yuv layer split is enabled.
260     * updates framebuffer z order if fb lies above source-split layer */
261    virtual void adjustForSourceSplit(hwc_context_t *ctx,
262            hwc_display_contents_1_t* list);
263
264    /* configures 4kx2k yuv layer to 2 VG pipes*/
265    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
266            PipeLayerPair& PipeLayerPair);
267};
268
269class MDPCompSplit : public MDPComp {
270public:
271    explicit MDPCompSplit(int dpy):MDPComp(dpy){};
272    virtual ~MDPCompSplit(){};
273    virtual bool draw(hwc_context_t *ctx, hwc_display_contents_1_t *list);
274
275protected:
276    struct MdpPipeInfoSplit : public MdpPipeInfo {
277        ovutils::eDest lIndex;
278        ovutils::eDest rIndex;
279        virtual ~MdpPipeInfoSplit() {};
280    };
281
282    virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
283                         MdpPipeInfoSplit& pipe_info, ePipeType type);
284
285    /* configure's overlay pipes for the frame */
286    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
287                          PipeLayerPair& pipeLayerPair);
288
289    /* allocates pipes to selected candidates */
290    virtual bool allocLayerPipes(hwc_context_t *ctx,
291                                 hwc_display_contents_1_t* list);
292
293private:
294    /* Increments mdpCount if 4k2k yuv layer split is enabled.
295     * updates framebuffer z order if fb lies above source-split layer */
296    virtual void adjustForSourceSplit(hwc_context_t *ctx,
297            hwc_display_contents_1_t* list);
298
299    /* configures 4kx2k yuv layer*/
300    virtual int configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
301            PipeLayerPair& PipeLayerPair);
302};
303
304class MDPCompSrcSplit : public MDPCompSplit {
305public:
306    explicit MDPCompSrcSplit(int dpy) : MDPCompSplit(dpy){};
307    virtual ~MDPCompSrcSplit(){};
308private:
309    virtual bool acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
310            MdpPipeInfoSplit& pipe_info, ePipeType type);
311
312    virtual bool allocLayerPipes(hwc_context_t *ctx,
313            hwc_display_contents_1_t* list);
314
315    virtual int configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
316            PipeLayerPair& pipeLayerPair);
317};
318
319}; //namespace
320#endif
321