1/*
2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved.
3 * Not a Contribution, Apache license notifications and license are retained
4 * for attribution purposes only.
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 *      http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include <math.h>
20#include "hwc_mdpcomp.h"
21#include <sys/ioctl.h>
22#include "external.h"
23#include "virtual.h"
24#include "qdMetaData.h"
25#include "mdp_version.h"
26#include "hwc_fbupdate.h"
27#include "hwc_ad.h"
28#include <overlayRotator.h>
29
30using namespace overlay;
31using namespace qdutils;
32using namespace overlay::utils;
33namespace ovutils = overlay::utils;
34
35namespace qhwc {
36
37//==============MDPComp========================================================
38
39IdleInvalidator *MDPComp::idleInvalidator = NULL;
40bool MDPComp::sIdleFallBack = false;
41bool MDPComp::sHandleTimeout = false;
42bool MDPComp::sDebugLogs = false;
43bool MDPComp::sEnabled = false;
44bool MDPComp::sEnableMixedMode = true;
45bool MDPComp::sEnablePartialFrameUpdate = false;
46int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
47bool MDPComp::sEnable4k2kYUVSplit = false;
48bool MDPComp::sSrcSplitEnabled = false;
49MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) {
50
51    if(isDisplaySplit(ctx, dpy)) {
52        if(qdutils::MDPVersion::getInstance().isSrcSplit()) {
53            sSrcSplitEnabled = true;
54            return new MDPCompSrcSplit(dpy);
55        }
56        return new MDPCompSplit(dpy);
57    }
58    return new MDPCompNonSplit(dpy);
59}
60
61MDPComp::MDPComp(int dpy):mDpy(dpy){};
62
63void MDPComp::dump(android::String8& buf)
64{
65    if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS)
66        return;
67
68    dumpsys_log(buf,"HWC Map for Dpy: %s \n",
69                (mDpy == 0) ? "\"PRIMARY\"" :
70                (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\"");
71    dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d "
72                "fbCount:%2d \n", mCurrentFrame.layerCount,
73                mCurrentFrame.mdpCount, mCurrentFrame.fbCount);
74    dumpsys_log(buf,"needsFBRedraw:%3s  pipesUsed:%2d  MaxPipesPerMixer: %d \n",
75                (mCurrentFrame.needsRedraw? "YES" : "NO"),
76                mCurrentFrame.mdpCount, sMaxPipesPerMixer);
77    dumpsys_log(buf," ---------------------------------------------  \n");
78    dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype  |  Z  \n");
79    dumpsys_log(buf," ---------------------------------------------  \n");
80    for(int index = 0; index < mCurrentFrame.layerCount; index++ )
81        dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n",
82                    index,
83                    (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"),
84                     mCurrentFrame.layerToMDP[index],
85                    (mCurrentFrame.isFBComposed[index] ?
86                    (mCurrentFrame.drop[index] ? "DROP" :
87                    (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"),
88                    (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ :
89    mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder));
90    dumpsys_log(buf,"\n");
91}
92
93bool MDPComp::init(hwc_context_t *ctx) {
94
95    if(!ctx) {
96        ALOGE("%s: Invalid hwc context!!",__FUNCTION__);
97        return false;
98    }
99
100    char property[PROPERTY_VALUE_MAX];
101
102    sEnabled = false;
103    if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) &&
104       (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
105        (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
106        sEnabled = true;
107    }
108
109    sEnableMixedMode = true;
110    if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) &&
111       (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
112        (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
113        sEnableMixedMode = false;
114    }
115
116    if(property_get("debug.mdpcomp.logs", property, NULL) > 0) {
117        if(atoi(property) != 0)
118            sDebugLogs = true;
119    }
120
121    if(property_get("persist.hwc.partialupdate", property, NULL) > 0) {
122        if((atoi(property) != 0) && ctx->mMDP.panel == MIPI_CMD_PANEL &&
123           qdutils::MDPVersion::getInstance().is8x74v2())
124            sEnablePartialFrameUpdate = true;
125    }
126    ALOGE_IF(isDebug(), "%s: Partial Update applicable?: %d",__FUNCTION__,
127                                                    sEnablePartialFrameUpdate);
128
129    sMaxPipesPerMixer = MAX_PIPES_PER_MIXER;
130    if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) {
131        int val = atoi(property);
132        if(val >= 0)
133            sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER);
134    }
135
136    if(ctx->mMDP.panel != MIPI_CMD_PANEL) {
137        // Idle invalidation is not necessary on command mode panels
138        long idle_timeout = DEFAULT_IDLE_TIME;
139        if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) {
140            if(atoi(property) != 0)
141                idle_timeout = atoi(property);
142        }
143
144        //create Idle Invalidator only when not disabled through property
145        if(idle_timeout != -1)
146            idleInvalidator = IdleInvalidator::getInstance();
147
148        if(idleInvalidator == NULL) {
149            ALOGE("%s: failed to instantiate idleInvalidator object",
150                  __FUNCTION__);
151        } else {
152            idleInvalidator->init(timeout_handler, ctx, idle_timeout);
153        }
154    }
155
156    if((property_get("debug.mdpcomp.4k2kSplit", property, "0") > 0) &&
157            (!strncmp(property, "1", PROPERTY_VALUE_MAX ) ||
158             (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) {
159        sEnable4k2kYUVSplit = true;
160    }
161    return true;
162}
163
164void MDPComp::reset(hwc_context_t *ctx) {
165    const int numLayers = ctx->listStats[mDpy].numAppLayers;
166    mCurrentFrame.reset(numLayers);
167    ctx->mOverlay->clear(mDpy);
168    ctx->mLayerRotMap[mDpy]->clear();
169}
170
171void MDPComp::timeout_handler(void *udata) {
172    struct hwc_context_t* ctx = (struct hwc_context_t*)(udata);
173
174    if(!ctx) {
175        ALOGE("%s: received empty data in timer callback", __FUNCTION__);
176        return;
177    }
178    Locker::Autolock _l(ctx->mDrawLock);
179    // Handle timeout event only if the previous composition is MDP or MIXED.
180    if(!sHandleTimeout) {
181        ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__);
182        return;
183    }
184    if(!ctx->proc) {
185        ALOGE("%s: HWC proc not registered", __FUNCTION__);
186        return;
187    }
188    sIdleFallBack = true;
189    /* Trigger SF to redraw the current frame */
190    ctx->proc->invalidate(ctx->proc);
191}
192
193void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx,
194                                   hwc_display_contents_1_t* list) {
195    LayerProp *layerProp = ctx->layerProp[mDpy];
196
197    for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) {
198        hwc_layer_1_t* layer = &(list->hwLayers[index]);
199        if(!mCurrentFrame.isFBComposed[index]) {
200            layerProp[index].mFlags |= HWC_MDPCOMP;
201            layer->compositionType = HWC_OVERLAY;
202            layer->hints |= HWC_HINT_CLEAR_FB;
203        } else {
204            /* Drop the layer when its already present in FB OR when it lies
205             * outside frame's ROI */
206            if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) {
207                layer->compositionType = HWC_OVERLAY;
208            }
209        }
210    }
211}
212
213void MDPComp::setRedraw(hwc_context_t *ctx,
214        hwc_display_contents_1_t* list) {
215    mCurrentFrame.needsRedraw = false;
216    if(!mCachedFrame.isSameFrame(mCurrentFrame, list) ||
217            (list->flags & HWC_GEOMETRY_CHANGED) ||
218            isSkipPresent(ctx, mDpy)) {
219        mCurrentFrame.needsRedraw = true;
220    }
221}
222
223MDPComp::FrameInfo::FrameInfo() {
224    memset(&mdpToLayer, 0, sizeof(mdpToLayer));
225    reset(0);
226}
227
228void MDPComp::FrameInfo::reset(const int& numLayers) {
229    for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) {
230        if(mdpToLayer[i].pipeInfo) {
231            delete mdpToLayer[i].pipeInfo;
232            mdpToLayer[i].pipeInfo = NULL;
233            //We dont own the rotator
234            mdpToLayer[i].rot = NULL;
235        }
236    }
237
238    memset(&mdpToLayer, 0, sizeof(mdpToLayer));
239    memset(&layerToMDP, -1, sizeof(layerToMDP));
240    memset(&isFBComposed, 1, sizeof(isFBComposed));
241
242    layerCount = numLayers;
243    fbCount = numLayers;
244    mdpCount = 0;
245    needsRedraw = true;
246    fbZ = -1;
247}
248
249void MDPComp::FrameInfo::map() {
250    // populate layer and MDP maps
251    int mdpIdx = 0;
252    for(int idx = 0; idx < layerCount; idx++) {
253        if(!isFBComposed[idx]) {
254            mdpToLayer[mdpIdx].listIndex = idx;
255            layerToMDP[idx] = mdpIdx++;
256        }
257    }
258}
259
260MDPComp::LayerCache::LayerCache() {
261    reset();
262}
263
264void MDPComp::LayerCache::reset() {
265    memset(&hnd, 0, sizeof(hnd));
266    memset(&isFBComposed, true, sizeof(isFBComposed));
267    memset(&drop, false, sizeof(drop));
268    layerCount = 0;
269}
270
271void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) {
272    const int numAppLayers = list->numHwLayers - 1;
273    for(int i = 0; i < numAppLayers; i++) {
274        hnd[i] = list->hwLayers[i].handle;
275    }
276}
277
278void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) {
279    layerCount = curFrame.layerCount;
280    memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed));
281    memcpy(&drop, &curFrame.drop, sizeof(drop));
282}
283
284bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame,
285                                      hwc_display_contents_1_t* list) {
286    if(layerCount != curFrame.layerCount)
287        return false;
288    for(int i = 0; i < curFrame.layerCount; i++) {
289        if((curFrame.isFBComposed[i] != isFBComposed[i]) ||
290                (curFrame.drop[i] != drop[i])) {
291            return false;
292        }
293        if(curFrame.isFBComposed[i] &&
294           (hnd[i] != list->hwLayers[i].handle)){
295            return false;
296        }
297    }
298    return true;
299}
300
301bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) {
302    private_handle_t *hnd = (private_handle_t *)layer->handle;
303    if((not isYuvBuffer(hnd) and has90Transform(layer)) or
304        (not isValidDimension(ctx,layer))
305        //More conditions here, SKIP, sRGB+Blend etc
306        ) {
307        return false;
308    }
309    return true;
310}
311
312bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) {
313    private_handle_t *hnd = (private_handle_t *)layer->handle;
314
315    if(!hnd) {
316        if (layer->flags & HWC_COLOR_FILL) {
317            // Color layer
318            return true;
319        }
320        ALOGE("%s: layer handle is NULL", __FUNCTION__);
321        return false;
322    }
323
324    //XXX: Investigate doing this with pixel phase on MDSS
325    if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf))
326        return false;
327
328    hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
329    hwc_rect_t dst = layer->displayFrame;
330    int crop_w = crop.right - crop.left;
331    int crop_h = crop.bottom - crop.top;
332    int dst_w = dst.right - dst.left;
333    int dst_h = dst.bottom - dst.top;
334    float w_scale = ((float)crop_w / (float)dst_w);
335    float h_scale = ((float)crop_h / (float)dst_h);
336
337    /* Workaround for MDP HW limitation in DSI command mode panels where
338     * FPS will not go beyond 30 if buffers on RGB pipes are of width or height
339     * less than 5 pixels
340     * There also is a HW limilation in MDP, minimum block size is 2x2
341     * Fallback to GPU if height is less than 2.
342     */
343    if((crop_w < 5)||(crop_h < 5))
344        return false;
345
346    if((w_scale > 1.0f) || (h_scale > 1.0f)) {
347        const uint32_t maxMDPDownscale =
348            qdutils::MDPVersion::getInstance().getMaxMDPDownscale();
349        const float w_dscale = w_scale;
350        const float h_dscale = h_scale;
351
352        if(ctx->mMDP.version >= qdutils::MDSS_V5) {
353
354            if(!qdutils::MDPVersion::getInstance().supportsDecimation()) {
355                /* On targets that doesnt support Decimation (eg.,8x26)
356                 * maximum downscale support is overlay pipe downscale.
357                 */
358                if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale ||
359                        h_dscale > maxMDPDownscale)
360                    return false;
361            } else {
362                // Decimation on macrotile format layers is not supported.
363                if(isTileRendered(hnd)) {
364                    /* MDP can read maximum MAX_DISPLAY_DIM width.
365                     * Bail out if
366                     *      1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp
367                     *      2. exceeds maximum downscale limit
368                     */
369                    if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) ||
370                            w_dscale > maxMDPDownscale ||
371                            h_dscale > maxMDPDownscale) {
372                        return false;
373                    }
374                } else if(w_dscale > 64 || h_dscale > 64)
375                    return false;
376            }
377        } else { //A-family
378            if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale)
379                return false;
380        }
381    }
382
383    if((w_scale < 1.0f) || (h_scale < 1.0f)) {
384        const uint32_t upscale =
385            qdutils::MDPVersion::getInstance().getMaxMDPUpscale();
386        const float w_uscale = 1.0f / w_scale;
387        const float h_uscale = 1.0f / h_scale;
388
389        if(w_uscale > upscale || h_uscale > upscale)
390            return false;
391    }
392
393    return true;
394}
395
396ovutils::eDest MDPComp::getMdpPipe(hwc_context_t *ctx, ePipeType type,
397        int mixer) {
398    overlay::Overlay& ov = *ctx->mOverlay;
399    ovutils::eDest mdp_pipe = ovutils::OV_INVALID;
400
401    switch(type) {
402    case MDPCOMP_OV_DMA:
403        mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_DMA, mDpy, mixer);
404        if(mdp_pipe != ovutils::OV_INVALID) {
405            return mdp_pipe;
406        }
407    case MDPCOMP_OV_ANY:
408    case MDPCOMP_OV_RGB:
409        mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy, mixer);
410        if(mdp_pipe != ovutils::OV_INVALID) {
411            return mdp_pipe;
412        }
413
414        if(type == MDPCOMP_OV_RGB) {
415            //Requested only for RGB pipe
416            break;
417        }
418    case  MDPCOMP_OV_VG:
419        return ov.nextPipe(ovutils::OV_MDP_PIPE_VG, mDpy, mixer);
420    default:
421        ALOGE("%s: Invalid pipe type",__FUNCTION__);
422        return ovutils::OV_INVALID;
423    };
424    return ovutils::OV_INVALID;
425}
426
427bool MDPComp::isFrameDoable(hwc_context_t *ctx) {
428    bool ret = true;
429
430    if(!isEnabled()) {
431        ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__);
432        ret = false;
433    } else if(qdutils::MDPVersion::getInstance().is8x26() &&
434            ctx->mVideoTransFlag &&
435            isSecondaryConnected(ctx)) {
436        //1 Padding round to shift pipes across mixers
437        ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round",
438                __FUNCTION__);
439        ret = false;
440    } else if(isSecondaryConfiguring(ctx)) {
441        ALOGD_IF( isDebug(),"%s: External Display connection is pending",
442                  __FUNCTION__);
443        ret = false;
444    } else if(ctx->isPaddingRound) {
445        ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d",
446                 __FUNCTION__,mDpy);
447        ret = false;
448    }
449    return ret;
450}
451
452/*
453 * 1) Identify layers that are not visible in the updating ROI and drop them
454 * from composition.
455 * 2) If we have a scaling layers which needs cropping against generated ROI.
456 * Reset ROI to full resolution.
457 */
458bool MDPComp::validateAndApplyROI(hwc_context_t *ctx,
459                               hwc_display_contents_1_t* list, hwc_rect_t roi) {
460    int numAppLayers = ctx->listStats[mDpy].numAppLayers;
461
462    if(!isValidRect(roi))
463        return false;
464
465    hwc_rect_t visibleRect = roi;
466
467    for(int i = numAppLayers - 1; i >= 0; i--){
468
469        if(!isValidRect(visibleRect)) {
470            mCurrentFrame.drop[i] = true;
471            mCurrentFrame.dropCount++;
472            continue;
473        }
474
475        const hwc_layer_1_t* layer =  &list->hwLayers[i];
476
477        hwc_rect_t dstRect = layer->displayFrame;
478        hwc_rect_t srcRect = integerizeSourceCrop(layer->sourceCropf);
479
480        hwc_rect_t res  = getIntersection(visibleRect, dstRect);
481
482        int res_w = res.right - res.left;
483        int res_h = res.bottom - res.top;
484        int dst_w = dstRect.right - dstRect.left;
485        int dst_h = dstRect.bottom - dstRect.top;
486
487        if(!isValidRect(res)) {
488            mCurrentFrame.drop[i] = true;
489            mCurrentFrame.dropCount++;
490        }else {
491            /* Reset frame ROI when any layer which needs scaling also needs ROI
492             * cropping */
493            if((res_w != dst_w || res_h != dst_h) && needsScaling (layer)) {
494                ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__);
495                memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
496                mCurrentFrame.dropCount = 0;
497                return false;
498            }
499
500            /* deduct any opaque region from visibleRect */
501            if (layer->blending == HWC_BLENDING_NONE)
502                visibleRect = deductRect(visibleRect, res);
503        }
504    }
505    return true;
506}
507
508void MDPComp::generateROI(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
509    int numAppLayers = ctx->listStats[mDpy].numAppLayers;
510
511    if(!sEnablePartialFrameUpdate) {
512        return;
513    }
514
515    if(mDpy || isDisplaySplit(ctx, mDpy)){
516        ALOGE_IF(isDebug(), "%s: ROI not supported for"
517                 "the (1) external / virtual display's (2) dual DSI displays",
518                 __FUNCTION__);
519        return;
520    }
521
522    if(isSkipPresent(ctx, mDpy))
523        return;
524
525    if(list->flags & HWC_GEOMETRY_CHANGED)
526        return;
527
528    struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0};
529    for(int index = 0; index < numAppLayers; index++ ) {
530        if ((mCachedFrame.hnd[index] != list->hwLayers[index].handle) ||
531            isYuvBuffer((private_handle_t *)list->hwLayers[index].handle)) {
532            hwc_rect_t dstRect = list->hwLayers[index].displayFrame;
533            hwc_rect_t srcRect = integerizeSourceCrop(
534                                        list->hwLayers[index].sourceCropf);
535
536            /* Intersect against display boundaries */
537            roi = getUnion(roi, dstRect);
538        }
539    }
540
541    if(!validateAndApplyROI(ctx, list, roi)){
542        roi = (struct hwc_rect) {0, 0,
543                    (int)ctx->dpyAttr[mDpy].xres, (int)ctx->dpyAttr[mDpy].yres};
544    }
545
546    ctx->listStats[mDpy].roi.x = roi.left;
547    ctx->listStats[mDpy].roi.y = roi.top;
548    ctx->listStats[mDpy].roi.w = roi.right - roi.left;
549    ctx->listStats[mDpy].roi.h = roi.bottom - roi.top;
550
551    ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__,
552                               roi.left, roi.top, roi.right, roi.bottom);
553}
554
555/* Checks for conditions where all the layers marked for MDP comp cannot be
556 * bypassed. On such conditions we try to bypass atleast YUV layers */
557bool MDPComp::tryFullFrame(hwc_context_t *ctx,
558                                hwc_display_contents_1_t* list){
559
560    const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
561    int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres;
562
563    if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) {
564        ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy);
565        return false;
566    }
567
568    if(isSkipPresent(ctx, mDpy)) {
569        ALOGD_IF(isDebug(),"%s: SKIP present: %d",
570                __FUNCTION__,
571                isSkipPresent(ctx, mDpy));
572        return false;
573    }
574
575    if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) &&
576                              (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) {
577        // Disable MDP comp on Secondary when the primary is highres panel and
578        // the secondary is a normal 1080p, because, MDP comp on secondary under
579        // in such usecase, decimation gets used for downscale and there will be
580        // a quality mismatch when there will be a fallback to GPU comp
581        ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp",
582              __FUNCTION__);
583        return false;
584    }
585
586    // check for action safe flag and downscale mode which requires scaling.
587    if(ctx->dpyAttr[mDpy].mActionSafePresent
588            || ctx->dpyAttr[mDpy].mDownScaleMode) {
589        ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__);
590        return false;
591    }
592
593    for(int i = 0; i < numAppLayers; ++i) {
594        hwc_layer_1_t* layer = &list->hwLayers[i];
595        private_handle_t *hnd = (private_handle_t *)layer->handle;
596
597        if(isYuvBuffer(hnd) && has90Transform(layer)) {
598            if(!canUseRotator(ctx, mDpy)) {
599                ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d",
600                        __FUNCTION__, mDpy);
601                return false;
602            }
603        }
604
605        //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp
606        // may not need it if Gfx pre-rotation can handle all flips & rotations
607        if(qdutils::MDPVersion::getInstance().is8x26() &&
608                                (ctx->dpyAttr[mDpy].xres > 1024) &&
609                                (layer->transform & HWC_TRANSFORM_FLIP_H) &&
610                                (!isYuvBuffer(hnd)))
611                   return false;
612    }
613
614    if(ctx->mAD->isDoable()) {
615        return false;
616    }
617
618    //If all above hard conditions are met we can do full or partial MDP comp.
619    bool ret = false;
620    if(fullMDPComp(ctx, list)) {
621        ret = true;
622    } else if(partialMDPComp(ctx, list)) {
623        ret = true;
624    }
625
626    return ret;
627}
628
629bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
630    //Will benefit presentation / secondary-only layer.
631    if((mDpy > HWC_DISPLAY_PRIMARY) &&
632            (list->numHwLayers - 1) > MAX_SEC_LAYERS) {
633        ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
634        return false;
635    }
636
637    const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
638    for(int i = 0; i < numAppLayers; i++) {
639        hwc_layer_1_t* layer = &list->hwLayers[i];
640        if(not isSupportedForMDPComp(ctx, layer)) {
641            ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__);
642            return false;
643        }
644
645        //For 8x26, if there is only one layer which needs scale for secondary
646        //while no scale for primary display, DMA pipe is occupied by primary.
647        //If need to fall back to GLES composition, virtual display lacks DMA
648        //pipe and error is reported.
649        if(qdutils::MDPVersion::getInstance().is8x26() &&
650                                mDpy >= HWC_DISPLAY_EXTERNAL &&
651                                qhwc::needsScaling(layer))
652            return false;
653    }
654
655    mCurrentFrame.fbCount = 0;
656    memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop,
657           sizeof(mCurrentFrame.isFBComposed));
658    mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount -
659        mCurrentFrame.dropCount;
660
661    if(sEnable4k2kYUVSplit){
662        adjustForSourceSplit(ctx, list);
663    }
664
665    if(!postHeuristicsHandling(ctx, list)) {
666        ALOGD_IF(isDebug(), "post heuristic handling failed");
667        reset(ctx);
668        return false;
669    }
670
671    return true;
672}
673
674bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list)
675{
676    if(!sEnableMixedMode) {
677        //Mixed mode is disabled. No need to even try caching.
678        return false;
679    }
680
681    bool ret = false;
682    if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first
683        ret =   loadBasedComp(ctx, list) or
684                cacheBasedComp(ctx, list);
685    } else {
686        ret =   cacheBasedComp(ctx, list) or
687                loadBasedComp(ctx, list);
688    }
689
690    return ret;
691}
692
693bool MDPComp::cacheBasedComp(hwc_context_t *ctx,
694        hwc_display_contents_1_t* list) {
695    int numAppLayers = ctx->listStats[mDpy].numAppLayers;
696    mCurrentFrame.reset(numAppLayers);
697    updateLayerCache(ctx, list);
698
699    //If an MDP marked layer is unsupported cannot do partial MDP Comp
700    for(int i = 0; i < numAppLayers; i++) {
701        if(!mCurrentFrame.isFBComposed[i]) {
702            hwc_layer_1_t* layer = &list->hwLayers[i];
703            if(not isSupportedForMDPComp(ctx, layer)) {
704                ALOGD_IF(isDebug(), "%s: Unsupported layer in list",
705                        __FUNCTION__);
706                reset(ctx);
707                return false;
708            }
709        }
710    }
711
712    updateYUV(ctx, list, false /*secure only*/);
713    bool ret = markLayersForCaching(ctx, list); //sets up fbZ also
714    if(!ret) {
715        ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy);
716        reset(ctx);
717        return false;
718    }
719
720    int mdpCount = mCurrentFrame.mdpCount;
721
722    if(sEnable4k2kYUVSplit){
723        adjustForSourceSplit(ctx, list);
724    }
725
726    //Will benefit cases where a video has non-updating background.
727    if((mDpy > HWC_DISPLAY_PRIMARY) and
728            (mdpCount > MAX_SEC_LAYERS)) {
729        ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__);
730        reset(ctx);
731        return false;
732    }
733
734    if(!postHeuristicsHandling(ctx, list)) {
735        ALOGD_IF(isDebug(), "post heuristic handling failed");
736        reset(ctx);
737        return false;
738    }
739
740    return true;
741}
742
743bool MDPComp::loadBasedComp(hwc_context_t *ctx,
744        hwc_display_contents_1_t* list) {
745    if(not isLoadBasedCompDoable(ctx)) {
746        return false;
747    }
748
749    const int numAppLayers = ctx->listStats[mDpy].numAppLayers;
750    const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount;
751    const int stagesForMDP = min(sMaxPipesPerMixer,
752            ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT));
753
754    int mdpBatchSize = stagesForMDP - 1; //1 stage for FB
755    int fbBatchSize = numNonDroppedLayers - mdpBatchSize;
756    int lastMDPSupportedIndex = numAppLayers;
757    int dropCount = 0;
758
759    //Find the minimum MDP batch size
760    for(int i = 0; i < numAppLayers;i++) {
761        if(mCurrentFrame.drop[i]) {
762            dropCount++;
763            continue;
764        }
765        hwc_layer_1_t* layer = &list->hwLayers[i];
766        if(not isSupportedForMDPComp(ctx, layer)) {
767            lastMDPSupportedIndex = i;
768            mdpBatchSize = min(i - dropCount, stagesForMDP - 1);
769            fbBatchSize = numNonDroppedLayers - mdpBatchSize;
770            break;
771        }
772    }
773
774    ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d "
775            "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize,
776            mCurrentFrame.dropCount);
777
778    //Start at a point where the fb batch should at least have 2 layers, for
779    //this mode to be justified.
780    while(fbBatchSize < 2) {
781        ++fbBatchSize;
782        --mdpBatchSize;
783    }
784
785    //If there are no layers for MDP, this mode doesnt make sense.
786    if(mdpBatchSize < 1) {
787        ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch",
788                __FUNCTION__);
789        return false;
790    }
791
792    mCurrentFrame.reset(numAppLayers);
793
794    //Try with successively smaller mdp batch sizes until we succeed or reach 1
795    while(mdpBatchSize > 0) {
796        //Mark layers for MDP comp
797        int mdpBatchLeft = mdpBatchSize;
798        for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) {
799            if(mCurrentFrame.drop[i]) {
800                continue;
801            }
802            mCurrentFrame.isFBComposed[i] = false;
803            --mdpBatchLeft;
804        }
805
806        mCurrentFrame.fbZ = mdpBatchSize;
807        mCurrentFrame.fbCount = fbBatchSize;
808        mCurrentFrame.mdpCount = mdpBatchSize;
809
810        ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d",
811                __FUNCTION__, mdpBatchSize, fbBatchSize,
812                mCurrentFrame.dropCount);
813
814        if(postHeuristicsHandling(ctx, list)) {
815            ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded",
816                    __FUNCTION__);
817            return true;
818        }
819
820        reset(ctx);
821        --mdpBatchSize;
822        ++fbBatchSize;
823    }
824
825    return false;
826}
827
828bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) {
829    if(mDpy or isSecurePresent(ctx, mDpy) or
830            isYuvPresent(ctx, mDpy)) {
831        return false;
832    }
833    return true;
834}
835
836bool MDPComp::tryVideoOnly(hwc_context_t *ctx,
837        hwc_display_contents_1_t* list) {
838    const bool secureOnly = true;
839    return videoOnlyComp(ctx, list, not secureOnly) or
840            videoOnlyComp(ctx, list, secureOnly);
841}
842
843bool MDPComp::videoOnlyComp(hwc_context_t *ctx,
844        hwc_display_contents_1_t* list, bool secureOnly) {
845    int numAppLayers = ctx->listStats[mDpy].numAppLayers;
846
847    mCurrentFrame.reset(numAppLayers);
848    updateYUV(ctx, list, secureOnly);
849    int mdpCount = mCurrentFrame.mdpCount;
850
851    if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) {
852        reset(ctx);
853        return false;
854    }
855
856    /* Bail out if we are processing only secured video layers
857     * and we dont have any */
858    if(!isSecurePresent(ctx, mDpy) && secureOnly){
859        reset(ctx);
860        return false;
861    }
862
863    if(mCurrentFrame.fbCount)
864        mCurrentFrame.fbZ = mCurrentFrame.mdpCount;
865
866    if(sEnable4k2kYUVSplit){
867        adjustForSourceSplit(ctx, list);
868    }
869
870    if(!postHeuristicsHandling(ctx, list)) {
871        ALOGD_IF(isDebug(), "post heuristic handling failed");
872        reset(ctx);
873        return false;
874    }
875
876    return true;
877}
878
879/* Checks for conditions where YUV layers cannot be bypassed */
880bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) {
881    if(isSkipLayer(layer)) {
882        ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy);
883        return false;
884    }
885
886    if(layer->transform & HWC_TRANSFORM_ROT_90 && !canUseRotator(ctx,mDpy)) {
887        ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__);
888        return false;
889    }
890
891    if(isSecuring(ctx, layer)) {
892        ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__);
893        return false;
894    }
895
896    if(!isValidDimension(ctx, layer)) {
897        ALOGD_IF(isDebug(), "%s: Buffer is of invalid width",
898            __FUNCTION__);
899        return false;
900    }
901
902    if(layer->planeAlpha < 0xFF) {
903        ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\
904                 in video only mode",
905                 __FUNCTION__);
906        return false;
907    }
908
909    return true;
910}
911
912/* starts at fromIndex and check for each layer to find
913 * if it it has overlapping with any Updating layer above it in zorder
914 * till the end of the batch. returns true if it finds any intersection */
915bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list,
916        int fromIndex, int toIndex) {
917    for(int i = fromIndex; i < toIndex; i++) {
918        if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
919            if(intersectingUpdatingLayers(list, i+1, toIndex, i)) {
920                return false;
921            }
922        }
923    }
924    return true;
925}
926
927/* Checks if given layer at targetLayerIndex has any
928 * intersection with all the updating layers in beween
929 * fromIndex and toIndex. Returns true if it finds intersectiion */
930bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list,
931        int fromIndex, int toIndex, int targetLayerIndex) {
932    for(int i = fromIndex; i <= toIndex; i++) {
933        if(!mCurrentFrame.isFBComposed[i]) {
934            if(areLayersIntersecting(&list->hwLayers[i],
935                        &list->hwLayers[targetLayerIndex]))  {
936                return true;
937            }
938        }
939    }
940    return false;
941}
942
943int MDPComp::getBatch(hwc_display_contents_1_t* list,
944        int& maxBatchStart, int& maxBatchEnd,
945        int& maxBatchCount) {
946    int i = 0;
947    int fbZOrder =-1;
948    int droppedLayerCt = 0;
949    while (i < mCurrentFrame.layerCount) {
950        int batchCount = 0;
951        int batchStart = i;
952        int batchEnd = i;
953        /* Adjust batch Z order with the dropped layers so far */
954        int fbZ = batchStart - droppedLayerCt;
955        int firstZReverseIndex = -1;
956        int updatingLayersAbove = 0;//Updating layer count in middle of batch
957        while(i < mCurrentFrame.layerCount) {
958            if(!mCurrentFrame.isFBComposed[i]) {
959                if(!batchCount) {
960                    i++;
961                    break;
962                }
963                updatingLayersAbove++;
964                i++;
965                continue;
966            } else {
967                if(mCurrentFrame.drop[i]) {
968                    i++;
969                    droppedLayerCt++;
970                    continue;
971                } else if(updatingLayersAbove <= 0) {
972                    batchCount++;
973                    batchEnd = i;
974                    i++;
975                    continue;
976                } else { //Layer is FBComposed, not a drop & updatingLayer > 0
977
978                    // We have a valid updating layer already. If layer-i not
979                    // have overlapping with all updating layers in between
980                    // batch-start and i, then we can add layer i to batch.
981                    if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) {
982                        batchCount++;
983                        batchEnd = i;
984                        i++;
985                        continue;
986                    } else if(canPushBatchToTop(list, batchStart, i)) {
987                        //If All the non-updating layers with in this batch
988                        //does not have intersection with the updating layers
989                        //above in z-order, then we can safely move the batch to
990                        //higher z-order. Increment fbZ as it is moving up.
991                        if( firstZReverseIndex < 0) {
992                            firstZReverseIndex = i;
993                        }
994                        batchCount++;
995                        batchEnd = i;
996                        fbZ += updatingLayersAbove;
997                        i++;
998                        updatingLayersAbove = 0;
999                        continue;
1000                    } else {
1001                        //both failed.start the loop again from here.
1002                        if(firstZReverseIndex >= 0) {
1003                            i = firstZReverseIndex;
1004                        }
1005                        break;
1006                    }
1007                }
1008            }
1009        }
1010        if(batchCount > maxBatchCount) {
1011            maxBatchCount = batchCount;
1012            maxBatchStart = batchStart;
1013            maxBatchEnd = batchEnd;
1014            fbZOrder = fbZ;
1015        }
1016    }
1017    return fbZOrder;
1018}
1019
1020bool  MDPComp::markLayersForCaching(hwc_context_t* ctx,
1021        hwc_display_contents_1_t* list) {
1022    /* Idea is to keep as many non-updating(cached) layers in FB and
1023     * send rest of them through MDP. This is done in 2 steps.
1024     *   1. Find the maximum contiguous batch of non-updating layers.
1025     *   2. See if we can improve this batch size for caching by adding
1026     *      opaque layers around the batch, if they don't have
1027     *      any overlapping with the updating layers in between.
1028     * NEVER mark an updating layer for caching.
1029     * But cached ones can be marked for MDP */
1030
1031    int maxBatchStart = -1;
1032    int maxBatchEnd = -1;
1033    int maxBatchCount = 0;
1034    int fbZ = -1;
1035
1036    /* Nothing is cached. No batching needed */
1037    if(mCurrentFrame.fbCount == 0) {
1038        return true;
1039    }
1040
1041    /* No MDP comp layers, try to use other comp modes */
1042    if(mCurrentFrame.mdpCount == 0) {
1043        return false;
1044    }
1045
1046    fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount);
1047
1048    /* reset rest of the layers lying inside ROI for MDP comp */
1049    for(int i = 0; i < mCurrentFrame.layerCount; i++) {
1050        hwc_layer_1_t* layer = &list->hwLayers[i];
1051        if((i < maxBatchStart || i > maxBatchEnd) &&
1052                mCurrentFrame.isFBComposed[i]){
1053            if(!mCurrentFrame.drop[i]){
1054                //If an unsupported layer is being attempted to
1055                //be pulled out we should fail
1056                if(not isSupportedForMDPComp(ctx, layer)) {
1057                    return false;
1058                }
1059                mCurrentFrame.isFBComposed[i] = false;
1060            }
1061        }
1062    }
1063
1064    // update the frame data
1065    mCurrentFrame.fbZ = fbZ;
1066    mCurrentFrame.fbCount = maxBatchCount;
1067    mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1068            mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1069
1070    ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__,
1071            mCurrentFrame.fbCount);
1072
1073    return true;
1074}
1075
1076void MDPComp::updateLayerCache(hwc_context_t* ctx,
1077        hwc_display_contents_1_t* list) {
1078    int numAppLayers = ctx->listStats[mDpy].numAppLayers;
1079    int fbCount = 0;
1080
1081    for(int i = 0; i < numAppLayers; i++) {
1082        if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) {
1083            if(!mCurrentFrame.drop[i])
1084                fbCount++;
1085            mCurrentFrame.isFBComposed[i] = true;
1086        } else {
1087            mCurrentFrame.isFBComposed[i] = false;
1088        }
1089    }
1090
1091    mCurrentFrame.fbCount = fbCount;
1092    mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount
1093                                                    - mCurrentFrame.dropCount;
1094
1095    ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d"
1096             ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount,
1097            mCurrentFrame.dropCount);
1098}
1099
1100void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list,
1101        bool secureOnly) {
1102    int nYuvCount = ctx->listStats[mDpy].yuvCount;
1103    for(int index = 0;index < nYuvCount; index++){
1104        int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index];
1105        hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex];
1106
1107        if(!isYUVDoable(ctx, layer)) {
1108            if(!mCurrentFrame.isFBComposed[nYuvIndex]) {
1109                mCurrentFrame.isFBComposed[nYuvIndex] = true;
1110                mCurrentFrame.fbCount++;
1111            }
1112        } else {
1113            if(mCurrentFrame.isFBComposed[nYuvIndex]) {
1114                private_handle_t *hnd = (private_handle_t *)layer->handle;
1115                if(!secureOnly || isSecureBuffer(hnd)) {
1116                    mCurrentFrame.isFBComposed[nYuvIndex] = false;
1117                    mCurrentFrame.fbCount--;
1118                }
1119            }
1120        }
1121    }
1122
1123    mCurrentFrame.mdpCount = mCurrentFrame.layerCount -
1124            mCurrentFrame.fbCount - mCurrentFrame.dropCount;
1125    ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__,
1126             mCurrentFrame.fbCount);
1127}
1128
1129hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx,
1130        hwc_display_contents_1_t* list){
1131    hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0};
1132    hwc_layer_1_t *fbLayer = &list->hwLayers[mCurrentFrame.layerCount];
1133
1134    /* Update only the region of FB needed for composition */
1135    for(int i = 0; i < mCurrentFrame.layerCount; i++ ) {
1136        if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) {
1137            hwc_layer_1_t* layer = &list->hwLayers[i];
1138            hwc_rect_t dst = layer->displayFrame;
1139            fbRect = getUnion(fbRect, dst);
1140        }
1141    }
1142    return fbRect;
1143}
1144
1145bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx,
1146        hwc_display_contents_1_t* list) {
1147
1148    //Capability checks
1149    if(!resourceCheck(ctx, list)) {
1150        ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__);
1151        return false;
1152    }
1153
1154    //Limitations checks
1155    if(!hwLimitationsCheck(ctx, list)) {
1156        ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__);
1157        return false;
1158    }
1159
1160    //Configure framebuffer first if applicable
1161    if(mCurrentFrame.fbZ >= 0) {
1162        hwc_rect_t fbRect = getUpdatingFBRect(ctx, list);
1163        if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ))
1164        {
1165            ALOGD_IF(isDebug(), "%s configure framebuffer failed",
1166                    __FUNCTION__);
1167            return false;
1168        }
1169    }
1170
1171    mCurrentFrame.map();
1172
1173    if(!allocLayerPipes(ctx, list)) {
1174        ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__);
1175        return false;
1176    }
1177
1178    for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount;
1179            index++) {
1180        if(!mCurrentFrame.isFBComposed[index]) {
1181            int mdpIndex = mCurrentFrame.layerToMDP[index];
1182            hwc_layer_1_t* layer = &list->hwLayers[index];
1183
1184            //Leave fbZ for framebuffer. CACHE/GLES layers go here.
1185            if(mdpNextZOrder == mCurrentFrame.fbZ) {
1186                mdpNextZOrder++;
1187            }
1188            MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1189            cur_pipe->zOrder = mdpNextZOrder++;
1190
1191            private_handle_t *hnd = (private_handle_t *)layer->handle;
1192            if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1193                if(configure4k2kYuv(ctx, layer,
1194                            mCurrentFrame.mdpToLayer[mdpIndex])
1195                        != 0 ){
1196                    ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \
1197                            for layer %d",__FUNCTION__, index);
1198                    return false;
1199                }
1200                else{
1201                    mdpNextZOrder++;
1202                }
1203                continue;
1204            }
1205            if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){
1206                ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \
1207                        layer %d",__FUNCTION__, index);
1208                return false;
1209            }
1210        }
1211    }
1212
1213    if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) {
1214        ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d"
1215                ,__FUNCTION__, mDpy);
1216        return false;
1217    }
1218
1219    setRedraw(ctx, list);
1220    return true;
1221}
1222
1223bool MDPComp::resourceCheck(hwc_context_t *ctx,
1224        hwc_display_contents_1_t *list) {
1225    const bool fbUsed = mCurrentFrame.fbCount;
1226    if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) {
1227        ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__);
1228        return false;
1229    }
1230    return true;
1231}
1232
1233bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx,
1234        hwc_display_contents_1_t* list) {
1235
1236    //A-family hw limitation:
1237    //If a layer need alpha scaling, MDP can not support.
1238    if(ctx->mMDP.version < qdutils::MDSS_V5) {
1239        for(int i = 0; i < mCurrentFrame.layerCount; ++i) {
1240            if(!mCurrentFrame.isFBComposed[i] &&
1241                    isAlphaScaled( &list->hwLayers[i])) {
1242                ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__);
1243                return false;
1244            }
1245        }
1246    }
1247
1248    // On 8x26 & 8974 hw, we have a limitation of downscaling+blending.
1249    //If multiple layers requires downscaling and also they are overlapping
1250    //fall back to GPU since MDSS can not handle it.
1251    if(qdutils::MDPVersion::getInstance().is8x74v2() ||
1252            qdutils::MDPVersion::getInstance().is8x26()) {
1253        for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) {
1254            hwc_layer_1_t* botLayer = &list->hwLayers[i];
1255            if(!mCurrentFrame.isFBComposed[i] &&
1256                    isDownscaleRequired(botLayer)) {
1257                //if layer-i is marked for MDP and needs downscaling
1258                //check if any MDP layer on top of i & overlaps with layer-i
1259                for(int j = i+1; j < mCurrentFrame.layerCount; ++j) {
1260                    hwc_layer_1_t* topLayer = &list->hwLayers[j];
1261                    if(!mCurrentFrame.isFBComposed[j] &&
1262                            isDownscaleRequired(topLayer)) {
1263                        hwc_rect_t r = getIntersection(botLayer->displayFrame,
1264                                topLayer->displayFrame);
1265                        if(isValidRect(r))
1266                            return false;
1267                    }
1268                }
1269            }
1270        }
1271    }
1272    return true;
1273}
1274
1275int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1276    int ret = 0;
1277    const int numLayers = ctx->listStats[mDpy].numAppLayers;
1278    MDPVersion& mdpVersion = qdutils::MDPVersion::getInstance();
1279
1280    //Do not cache the information for next draw cycle.
1281    if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) {
1282        ALOGI("%s: Unsupported layer count for mdp composition",
1283                __FUNCTION__);
1284        mCachedFrame.reset();
1285        return -1;
1286    }
1287
1288    //reset old data
1289    mCurrentFrame.reset(numLayers);
1290    memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1291    mCurrentFrame.dropCount = 0;
1292
1293    // Detect the start of animation and fall back to GPU only once to cache
1294    // all the layers in FB and display FB content untill animation completes.
1295    if(ctx->listStats[mDpy].isDisplayAnimating) {
1296        mCurrentFrame.needsRedraw = false;
1297        if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) {
1298            mCurrentFrame.needsRedraw = true;
1299            ctx->mAnimationState[mDpy] = ANIMATION_STARTED;
1300        }
1301        setMDPCompLayerFlags(ctx, list);
1302        mCachedFrame.updateCounts(mCurrentFrame);
1303        ret = -1;
1304        return ret;
1305    } else {
1306        ctx->mAnimationState[mDpy] = ANIMATION_STOPPED;
1307    }
1308
1309    //Hard conditions, if not met, cannot do MDP comp
1310    if(isFrameDoable(ctx)) {
1311        generateROI(ctx, list);
1312
1313        if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) {
1314            setMDPCompLayerFlags(ctx, list);
1315        } else {
1316            reset(ctx);
1317            memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop));
1318            mCurrentFrame.dropCount = 0;
1319            ret = -1;
1320        }
1321    } else {
1322        ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame",
1323                __FUNCTION__);
1324        ret = -1;
1325    }
1326
1327    if(isDebug()) {
1328        ALOGD("GEOMETRY change: %d",
1329                (list->flags & HWC_GEOMETRY_CHANGED));
1330        android::String8 sDump("");
1331        dump(sDump);
1332        ALOGD("%s",sDump.string());
1333    }
1334
1335    mCachedFrame.cacheAll(list);
1336    mCachedFrame.updateCounts(mCurrentFrame);
1337    return ret;
1338}
1339
1340bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) {
1341
1342    bool bRet = true;
1343    int mdpIndex = mCurrentFrame.layerToMDP[index];
1344    PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1345    info.pipeInfo = new MdpYUVPipeInfo;
1346    info.rot = NULL;
1347    MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo;
1348    ePipeType type =  MDPCOMP_OV_VG;
1349
1350    pipe_info.lIndex = ovutils::OV_INVALID;
1351    pipe_info.rIndex = ovutils::OV_INVALID;
1352
1353    pipe_info.lIndex = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT);
1354    if(pipe_info.lIndex == ovutils::OV_INVALID){
1355        bRet = false;
1356        ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed",
1357                __FUNCTION__);
1358    }
1359    pipe_info.rIndex = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT);
1360    if(pipe_info.rIndex == ovutils::OV_INVALID){
1361        bRet = false;
1362        ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed",
1363                __FUNCTION__);
1364    }
1365    return bRet;
1366}
1367//=============MDPCompNonSplit==================================================
1368
1369void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx,
1370        hwc_display_contents_1_t*) {
1371    //As we split 4kx2k yuv layer and program to 2 VG pipes
1372    //(if available) increase mdpcount accordingly
1373    mCurrentFrame.mdpCount += ctx->listStats[mDpy].yuv4k2kCount;
1374
1375    //If 4k2k Yuv layer split is possible,  and if
1376    //fbz is above 4k2k layer, increment fb zorder by 1
1377    //as we split 4k2k layer and increment zorder for right half
1378    //of the layer
1379    if(mCurrentFrame.fbZ >= 0) {
1380        int n4k2kYuvCount = ctx->listStats[mDpy].yuv4k2kCount;
1381        for(int index = 0; index < n4k2kYuvCount; index++){
1382            int n4k2kYuvIndex =
1383                    ctx->listStats[mDpy].yuv4k2kIndices[index];
1384            if(mCurrentFrame.fbZ > n4k2kYuvIndex){
1385                mCurrentFrame.fbZ += 1;
1386            }
1387        }
1388    }
1389}
1390
1391/*
1392 * Configures pipe(s) for MDP composition
1393 */
1394int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1395                             PipeLayerPair& PipeLayerPair) {
1396    MdpPipeInfoNonSplit& mdp_info =
1397        *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo));
1398    eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION;
1399    eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1400    eIsFg isFg = IS_FG_OFF;
1401    eDest dest = mdp_info.index;
1402
1403    ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d",
1404             __FUNCTION__, layer, zOrder, dest);
1405
1406    return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest,
1407                           &PipeLayerPair.rot);
1408}
1409
1410bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx,
1411        hwc_display_contents_1_t* list) {
1412    for(int index = 0; index < mCurrentFrame.layerCount; index++) {
1413
1414        if(mCurrentFrame.isFBComposed[index]) continue;
1415
1416        hwc_layer_1_t* layer = &list->hwLayers[index];
1417        private_handle_t *hnd = (private_handle_t *)layer->handle;
1418        if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1419            if(allocSplitVGPipesfor4k2k(ctx, index)){
1420                continue;
1421            }
1422        }
1423
1424        int mdpIndex = mCurrentFrame.layerToMDP[index];
1425        PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1426        info.pipeInfo = new MdpPipeInfoNonSplit;
1427        info.rot = NULL;
1428        MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo;
1429        ePipeType type = MDPCOMP_OV_ANY;
1430
1431        if(isYuvBuffer(hnd)) {
1432            type = MDPCOMP_OV_VG;
1433        } else if(qdutils::MDPVersion::getInstance().is8x26() &&
1434                (ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024)) {
1435            if(qhwc::needsScaling(layer))
1436                type = MDPCOMP_OV_RGB;
1437        } else if(!qhwc::needsScaling(layer)
1438            && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE
1439            && ctx->mMDP.version >= qdutils::MDSS_V5) {
1440            type = MDPCOMP_OV_DMA;
1441        } else if(qhwc::needsScaling(layer) &&
1442                !(ctx->listStats[mDpy].yuvCount) &&
1443                ! qdutils::MDPVersion::getInstance().isRGBScalarSupported()){
1444            type = MDPCOMP_OV_VG;
1445        }
1446
1447        pipe_info.index = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT);
1448        if(pipe_info.index == ovutils::OV_INVALID) {
1449            ALOGD_IF(isDebug(), "%s: Unable to get pipe type = %d",
1450                __FUNCTION__, (int) type);
1451            return false;
1452        }
1453    }
1454    return true;
1455}
1456
1457int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1458        PipeLayerPair& PipeLayerPair) {
1459    MdpYUVPipeInfo& mdp_info =
1460            *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1461    eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1462    eIsFg isFg = IS_FG_OFF;
1463    eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1464    eDest lDest = mdp_info.lIndex;
1465    eDest rDest = mdp_info.rIndex;
1466
1467    return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1468            lDest, rDest, &PipeLayerPair.rot);
1469}
1470
1471bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1472
1473    if(!isEnabled()) {
1474        ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1475        return true;
1476    }
1477
1478    if(!ctx || !list) {
1479        ALOGE("%s: invalid contxt or list",__FUNCTION__);
1480        return false;
1481    }
1482
1483    if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1484        ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1485        return true;
1486    }
1487
1488    // Set the Handle timeout to true for MDP or MIXED composition.
1489    if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1490        sHandleTimeout = true;
1491    }
1492
1493    overlay::Overlay& ov = *ctx->mOverlay;
1494    LayerProp *layerProp = ctx->layerProp[mDpy];
1495
1496    int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1497    for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1498    {
1499        if(mCurrentFrame.isFBComposed[i]) continue;
1500
1501        hwc_layer_1_t *layer = &list->hwLayers[i];
1502        private_handle_t *hnd = (private_handle_t *)layer->handle;
1503        if(!hnd) {
1504            if (!(layer->flags & HWC_COLOR_FILL)) {
1505                ALOGE("%s handle null", __FUNCTION__);
1506                return false;
1507            }
1508            // No PLAY for Color layer
1509            layerProp[i].mFlags &= ~HWC_MDPCOMP;
1510            continue;
1511        }
1512
1513        int mdpIndex = mCurrentFrame.layerToMDP[i];
1514
1515        if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1516        {
1517            MdpYUVPipeInfo& pipe_info =
1518                *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1519            Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1520            ovutils::eDest indexL = pipe_info.lIndex;
1521            ovutils::eDest indexR = pipe_info.rIndex;
1522            int fd = hnd->fd;
1523            uint32_t offset = hnd->offset;
1524            if(rot) {
1525                rot->queueBuffer(fd, offset);
1526                fd = rot->getDstMemId();
1527                offset = rot->getDstOffset();
1528            }
1529            if(indexL != ovutils::OV_INVALID) {
1530                ovutils::eDest destL = (ovutils::eDest)indexL;
1531                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1532                        using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1533                if (!ov.queueBuffer(fd, offset, destL)) {
1534                    ALOGE("%s: queueBuffer failed for display:%d",
1535                            __FUNCTION__, mDpy);
1536                    return false;
1537                }
1538            }
1539
1540            if(indexR != ovutils::OV_INVALID) {
1541                ovutils::eDest destR = (ovutils::eDest)indexR;
1542                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1543                        using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
1544                if (!ov.queueBuffer(fd, offset, destR)) {
1545                    ALOGE("%s: queueBuffer failed for display:%d",
1546                            __FUNCTION__, mDpy);
1547                    return false;
1548                }
1549            }
1550        }
1551        else{
1552            MdpPipeInfoNonSplit& pipe_info =
1553            *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1554            ovutils::eDest dest = pipe_info.index;
1555            if(dest == ovutils::OV_INVALID) {
1556                ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest);
1557                return false;
1558            }
1559
1560            if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1561                continue;
1562            }
1563
1564            ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1565                    using  pipe: %d", __FUNCTION__, layer,
1566                    hnd, dest );
1567
1568            int fd = hnd->fd;
1569            uint32_t offset = hnd->offset;
1570
1571            Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1572            if(rot) {
1573                if(!rot->queueBuffer(fd, offset))
1574                    return false;
1575                fd = rot->getDstMemId();
1576                offset = rot->getDstOffset();
1577            }
1578
1579            if (!ov.queueBuffer(fd, offset, dest)) {
1580                ALOGE("%s: queueBuffer failed for display:%d ",
1581                        __FUNCTION__, mDpy);
1582                return false;
1583            }
1584        }
1585
1586        layerProp[i].mFlags &= ~HWC_MDPCOMP;
1587    }
1588    return true;
1589}
1590
1591//=============MDPCompSplit===================================================
1592
1593void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx,
1594         hwc_display_contents_1_t* list){
1595    //if 4kx2k yuv layer is totally present in either in left half
1596    //or right half then try splitting the yuv layer to avoid decimation
1597    int n4k2kYuvCount = ctx->listStats[mDpy].yuv4k2kCount;
1598    const int lSplit = getLeftSplit(ctx, mDpy);
1599    for(int index = 0; index < n4k2kYuvCount; index++){
1600        int n4k2kYuvIndex = ctx->listStats[mDpy].yuv4k2kIndices[index];
1601        hwc_layer_1_t* layer = &list->hwLayers[n4k2kYuvIndex];
1602        hwc_rect_t dst = layer->displayFrame;
1603        if((dst.left > lSplit) || (dst.right < lSplit)) {
1604            mCurrentFrame.mdpCount += 1;
1605        }
1606        if(mCurrentFrame.fbZ > n4k2kYuvIndex){
1607            mCurrentFrame.fbZ += 1;
1608        }
1609    }
1610}
1611
1612bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
1613        MdpPipeInfoSplit& pipe_info,
1614        ePipeType type) {
1615    const int lSplit = getLeftSplit(ctx, mDpy);
1616
1617    hwc_rect_t dst = layer->displayFrame;
1618    pipe_info.lIndex = ovutils::OV_INVALID;
1619    pipe_info.rIndex = ovutils::OV_INVALID;
1620
1621    if (dst.left < lSplit) {
1622        pipe_info.lIndex = getMdpPipe(ctx, type, Overlay::MIXER_LEFT);
1623        if(pipe_info.lIndex == ovutils::OV_INVALID)
1624            return false;
1625    }
1626
1627    if(dst.right > lSplit) {
1628        pipe_info.rIndex = getMdpPipe(ctx, type, Overlay::MIXER_RIGHT);
1629        if(pipe_info.rIndex == ovutils::OV_INVALID)
1630            return false;
1631    }
1632
1633    return true;
1634}
1635
1636bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx,
1637        hwc_display_contents_1_t* list) {
1638    for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
1639
1640        if(mCurrentFrame.isFBComposed[index]) continue;
1641
1642        hwc_layer_1_t* layer = &list->hwLayers[index];
1643        private_handle_t *hnd = (private_handle_t *)layer->handle;
1644        hwc_rect_t dst = layer->displayFrame;
1645        const int lSplit = getLeftSplit(ctx, mDpy);
1646        if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){
1647            if((dst.left > lSplit)||(dst.right < lSplit)){
1648                if(allocSplitVGPipesfor4k2k(ctx, index)){
1649                    continue;
1650                }
1651            }
1652        }
1653        int mdpIndex = mCurrentFrame.layerToMDP[index];
1654        PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1655        info.pipeInfo = new MdpPipeInfoSplit;
1656        info.rot = NULL;
1657        MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
1658        ePipeType type = MDPCOMP_OV_ANY;
1659
1660        if(isYuvBuffer(hnd)) {
1661            type = MDPCOMP_OV_VG;
1662        } else if(!qhwc::needsScalingWithSplit(ctx, layer, mDpy)
1663            && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE
1664            && ctx->mMDP.version >= qdutils::MDSS_V5) {
1665            type = MDPCOMP_OV_DMA;
1666        }
1667
1668        if(!acquireMDPPipes(ctx, layer, pipe_info, type)) {
1669            ALOGD_IF(isDebug(), "%s: Unable to get pipe for type = %d",
1670                    __FUNCTION__, (int) type);
1671            return false;
1672        }
1673    }
1674    return true;
1675}
1676
1677int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer,
1678        PipeLayerPair& PipeLayerPair) {
1679    const int lSplit = getLeftSplit(ctx, mDpy);
1680    hwc_rect_t dst = layer->displayFrame;
1681    if((dst.left > lSplit)||(dst.right < lSplit)){
1682        MdpYUVPipeInfo& mdp_info =
1683                *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo));
1684        eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1685        eIsFg isFg = IS_FG_OFF;
1686        eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1687        eDest lDest = mdp_info.lIndex;
1688        eDest rDest = mdp_info.rIndex;
1689
1690        return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg,
1691                lDest, rDest, &PipeLayerPair.rot);
1692    }
1693    else{
1694        return configure(ctx, layer, PipeLayerPair);
1695    }
1696}
1697
1698/*
1699 * Configures pipe(s) for MDP composition
1700 */
1701int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1702        PipeLayerPair& PipeLayerPair) {
1703    MdpPipeInfoSplit& mdp_info =
1704        *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
1705    eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder);
1706    eIsFg isFg = IS_FG_OFF;
1707    eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1708    eDest lDest = mdp_info.lIndex;
1709    eDest rDest = mdp_info.rIndex;
1710
1711    ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
1712             "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest);
1713
1714    return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest,
1715                            rDest, &PipeLayerPair.rot);
1716}
1717
1718bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) {
1719
1720    if(!isEnabled()) {
1721        ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__);
1722        return true;
1723    }
1724
1725    if(!ctx || !list) {
1726        ALOGE("%s: invalid contxt or list",__FUNCTION__);
1727        return false;
1728    }
1729
1730    if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) {
1731        ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__);
1732        return true;
1733    }
1734
1735    // Set the Handle timeout to true for MDP or MIXED composition.
1736    if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) {
1737        sHandleTimeout = true;
1738    }
1739
1740    overlay::Overlay& ov = *ctx->mOverlay;
1741    LayerProp *layerProp = ctx->layerProp[mDpy];
1742
1743    int numHwLayers = ctx->listStats[mDpy].numAppLayers;
1744    for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ )
1745    {
1746        if(mCurrentFrame.isFBComposed[i]) continue;
1747
1748        hwc_layer_1_t *layer = &list->hwLayers[i];
1749        private_handle_t *hnd = (private_handle_t *)layer->handle;
1750        if(!hnd) {
1751            ALOGE("%s handle null", __FUNCTION__);
1752            return false;
1753        }
1754
1755        if(!(layerProp[i].mFlags & HWC_MDPCOMP)) {
1756            continue;
1757        }
1758
1759        int mdpIndex = mCurrentFrame.layerToMDP[i];
1760
1761        if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit)
1762        {
1763            MdpYUVPipeInfo& pipe_info =
1764                *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1765            Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1766            ovutils::eDest indexL = pipe_info.lIndex;
1767            ovutils::eDest indexR = pipe_info.rIndex;
1768            int fd = hnd->fd;
1769            uint32_t offset = hnd->offset;
1770            if(rot) {
1771                rot->queueBuffer(fd, offset);
1772                fd = rot->getDstMemId();
1773                offset = rot->getDstOffset();
1774            }
1775            if(indexL != ovutils::OV_INVALID) {
1776                ovutils::eDest destL = (ovutils::eDest)indexL;
1777                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1778                        using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1779                if (!ov.queueBuffer(fd, offset, destL)) {
1780                    ALOGE("%s: queueBuffer failed for display:%d",
1781                            __FUNCTION__, mDpy);
1782                    return false;
1783                }
1784            }
1785
1786            if(indexR != ovutils::OV_INVALID) {
1787                ovutils::eDest destR = (ovutils::eDest)indexR;
1788                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1789                        using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
1790                if (!ov.queueBuffer(fd, offset, destR)) {
1791                    ALOGE("%s: queueBuffer failed for display:%d",
1792                            __FUNCTION__, mDpy);
1793                    return false;
1794                }
1795            }
1796        }
1797        else{
1798            MdpPipeInfoSplit& pipe_info =
1799                *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo;
1800            Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot;
1801
1802            ovutils::eDest indexL = pipe_info.lIndex;
1803            ovutils::eDest indexR = pipe_info.rIndex;
1804
1805            int fd = hnd->fd;
1806            int offset = hnd->offset;
1807
1808            if(ctx->mAD->isModeOn()) {
1809                if(ctx->mAD->draw(ctx, fd, offset)) {
1810                    fd = ctx->mAD->getDstFd();
1811                    offset = ctx->mAD->getDstOffset();
1812                }
1813            }
1814
1815            if(rot) {
1816                rot->queueBuffer(fd, offset);
1817                fd = rot->getDstMemId();
1818                offset = rot->getDstOffset();
1819            }
1820
1821            //************* play left mixer **********
1822            if(indexL != ovutils::OV_INVALID) {
1823                ovutils::eDest destL = (ovutils::eDest)indexL;
1824                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1825                        using  pipe: %d", __FUNCTION__, layer, hnd, indexL );
1826                if (!ov.queueBuffer(fd, offset, destL)) {
1827                    ALOGE("%s: queueBuffer failed for left mixer",
1828                            __FUNCTION__);
1829                    return false;
1830                }
1831            }
1832
1833            //************* play right mixer **********
1834            if(indexR != ovutils::OV_INVALID) {
1835                ovutils::eDest destR = (ovutils::eDest)indexR;
1836                ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \
1837                        using  pipe: %d", __FUNCTION__, layer, hnd, indexR );
1838                if (!ov.queueBuffer(fd, offset, destR)) {
1839                    ALOGE("%s: queueBuffer failed for right mixer",
1840                            __FUNCTION__);
1841                    return false;
1842                }
1843            }
1844        }
1845
1846        layerProp[i].mFlags &= ~HWC_MDPCOMP;
1847    }
1848
1849    return true;
1850}
1851
1852//================MDPCompSrcSplit==============================================
1853bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer,
1854        MdpPipeInfoSplit& pipe_info, ePipeType /*type*/) {
1855    private_handle_t *hnd = (private_handle_t *)layer->handle;
1856    hwc_rect_t dst = layer->displayFrame;
1857    hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
1858    pipe_info.lIndex = ovutils::OV_INVALID;
1859    pipe_info.rIndex = ovutils::OV_INVALID;
1860
1861    //If 2 pipes are staged on a single stage of a mixer, then the left pipe
1862    //should have a higher priority than the right one. Pipe priorities are
1863    //starting with VG0, VG1 ... , RGB0 ..., DMA1
1864    //TODO Currently we acquire VG pipes for left side and RGB/DMA for right to
1865    //make sure pipe priorities are satisfied. A better way is to have priority
1866    //as part of overlay object and acquire any 2 pipes. Assign the higher
1867    //priority one to left side and lower to right side.
1868
1869    //1 pipe by default for a layer
1870    pipe_info.lIndex = getMdpPipe(ctx, MDPCOMP_OV_VG, Overlay::MIXER_DEFAULT);
1871    if(pipe_info.lIndex == ovutils::OV_INVALID) {
1872        if(isYuvBuffer(hnd)) {
1873            return false;
1874        }
1875        pipe_info.lIndex = getMdpPipe(ctx, MDPCOMP_OV_ANY,
1876                Overlay::MIXER_DEFAULT);
1877        if(pipe_info.lIndex == ovutils::OV_INVALID) {
1878            return false;
1879        }
1880    }
1881
1882    //If layer's crop width or dest width > 2048, use 2 pipes
1883    if((dst.right - dst.left) > qdutils::MAX_DISPLAY_DIM or
1884            (crop.right - crop.left) > qdutils::MAX_DISPLAY_DIM) {
1885        ePipeType rightType = isYuvBuffer(hnd) ?
1886                MDPCOMP_OV_VG : MDPCOMP_OV_ANY;
1887        pipe_info.rIndex = getMdpPipe(ctx, rightType, Overlay::MIXER_DEFAULT);
1888        if(pipe_info.rIndex == ovutils::OV_INVALID) {
1889            return false;
1890        }
1891    }
1892
1893    return true;
1894}
1895
1896bool MDPCompSrcSplit::allocLayerPipes(hwc_context_t *ctx,
1897        hwc_display_contents_1_t* list) {
1898    for(int index = 0 ; index < mCurrentFrame.layerCount; index++) {
1899        if(mCurrentFrame.isFBComposed[index]) continue;
1900        hwc_layer_1_t* layer = &list->hwLayers[index];
1901        int mdpIndex = mCurrentFrame.layerToMDP[index];
1902        PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex];
1903        info.pipeInfo = new MdpPipeInfoSplit;
1904        info.rot = NULL;
1905        MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo;
1906
1907        ePipeType type = MDPCOMP_OV_ANY;
1908        if(!acquireMDPPipes(ctx, layer, pipe_info, type)) {
1909            ALOGD_IF(isDebug(), "%s: Unable to get pipe for type = %d",
1910                    __FUNCTION__, (int) type);
1911            return false;
1912        }
1913    }
1914    return true;
1915}
1916
1917int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer,
1918        PipeLayerPair& PipeLayerPair) {
1919    private_handle_t *hnd = (private_handle_t *)layer->handle;
1920    if(!hnd) {
1921        ALOGE("%s: layer handle is NULL", __FUNCTION__);
1922        return -1;
1923    }
1924    MetaData_t *metadata = (MetaData_t *)hnd->base_metadata;
1925    MdpPipeInfoSplit& mdp_info =
1926        *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo));
1927    Rotator **rot = &PipeLayerPair.rot;
1928    eZorder z = static_cast<eZorder>(mdp_info.zOrder);
1929    eIsFg isFg = IS_FG_OFF;
1930    eDest lDest = mdp_info.lIndex;
1931    eDest rDest = mdp_info.rIndex;
1932    hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf);
1933    hwc_rect_t dst = layer->displayFrame;
1934    int transform = layer->transform;
1935    eTransform orient = static_cast<eTransform>(transform);
1936    const int downscale = 0;
1937    int rotFlags = ROT_FLAGS_NONE;
1938    uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd));
1939    Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size);
1940
1941    ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d"
1942             "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest);
1943
1944    // Handle R/B swap
1945    if (layer->flags & HWC_FORMAT_RB_SWAP) {
1946        if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888)
1947            whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888);
1948        else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888)
1949            whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888);
1950    }
1951
1952    eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION;
1953    setMdpFlags(layer, mdpFlagsL, 0, transform);
1954    eMdpFlags mdpFlagsR = mdpFlagsL;
1955
1956    if(lDest != OV_INVALID && rDest != OV_INVALID) {
1957        //Enable overfetch
1958        setMdpFlags(mdpFlagsL, OV_MDSS_MDP_DUAL_PIPE);
1959    }
1960
1961    if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) {
1962        (*rot) = ctx->mRotMgr->getNext();
1963        if((*rot) == NULL) return -1;
1964        //Configure rotator for pre-rotation
1965        if(configRotator(*rot, whf, crop, mdpFlagsL, orient, downscale) < 0) {
1966            ALOGE("%s: configRotator failed!", __FUNCTION__);
1967            return -1;
1968        }
1969        ctx->mLayerRotMap[mDpy]->add(layer, *rot);
1970        whf.format = (*rot)->getDstFormat();
1971        updateSource(orient, whf, crop);
1972        rotFlags |= ROT_PREROTATED;
1973    }
1974
1975    //If 2 pipes being used, divide layer into half, crop and dst
1976    hwc_rect_t cropL = crop;
1977    hwc_rect_t cropR = crop;
1978    hwc_rect_t dstL = dst;
1979    hwc_rect_t dstR = dst;
1980    if(lDest != OV_INVALID && rDest != OV_INVALID) {
1981        cropL.right = (crop.right + crop.left) / 2;
1982        cropR.left = cropL.right;
1983        sanitizeSourceCrop(cropL, cropR, hnd);
1984
1985        //Swap crops on H flip since 2 pipes are being used
1986        if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) {
1987            hwc_rect_t tmp = cropL;
1988            cropL = cropR;
1989            cropR = tmp;
1990        }
1991
1992        dstL.right = (dst.right + dst.left) / 2;
1993        dstR.left = dstL.right;
1994    }
1995
1996    //For the mdp, since either we are pre-rotating or MDP does flips
1997    orient = OVERLAY_TRANSFORM_0;
1998    transform = 0;
1999
2000    //configure left pipe
2001    if(lDest != OV_INVALID) {
2002        PipeArgs pargL(mdpFlagsL, whf, z, isFg,
2003                static_cast<eRotFlags>(rotFlags), layer->planeAlpha,
2004                (ovutils::eBlending) getBlending(layer->blending));
2005
2006        if(configMdp(ctx->mOverlay, pargL, orient,
2007                    cropL, dstL, metadata, lDest) < 0) {
2008            ALOGE("%s: commit failed for left mixer config", __FUNCTION__);
2009            return -1;
2010        }
2011    }
2012
2013    //configure right pipe
2014    if(rDest != OV_INVALID) {
2015        PipeArgs pargR(mdpFlagsR, whf, z, isFg,
2016                static_cast<eRotFlags>(rotFlags),
2017                layer->planeAlpha,
2018                (ovutils::eBlending) getBlending(layer->blending));
2019        if(configMdp(ctx->mOverlay, pargR, orient,
2020                    cropR, dstR, metadata, rDest) < 0) {
2021            ALOGE("%s: commit failed for right mixer config", __FUNCTION__);
2022            return -1;
2023        }
2024    }
2025
2026    return 0;
2027}
2028
2029}; //namespace
2030
2031