1/* 2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19#include <math.h> 20#include "hwc_mdpcomp.h" 21#include <sys/ioctl.h> 22#include "external.h" 23#include "virtual.h" 24#include "qdMetaData.h" 25#include "mdp_version.h" 26#include "hwc_fbupdate.h" 27#include "hwc_ad.h" 28#include <overlayRotator.h> 29#include "hwc_copybit.h" 30 31using namespace overlay; 32using namespace qdutils; 33using namespace overlay::utils; 34namespace ovutils = overlay::utils; 35 36namespace qhwc { 37 38//==============MDPComp======================================================== 39 40IdleInvalidator *MDPComp::idleInvalidator = NULL; 41bool MDPComp::sIdleFallBack = false; 42bool MDPComp::sHandleTimeout = false; 43bool MDPComp::sDebugLogs = false; 44bool MDPComp::sEnabled = false; 45bool MDPComp::sEnableMixedMode = true; 46int MDPComp::sSimulationFlags = 0; 47int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 48bool MDPComp::sEnable4k2kYUVSplit = false; 49bool MDPComp::sSrcSplitEnabled = false; 50MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) { 51 if(qdutils::MDPVersion::getInstance().isSrcSplit()) { 52 sSrcSplitEnabled = true; 53 return new MDPCompSrcSplit(dpy); 54 } else if(isDisplaySplit(ctx, dpy)) { 55 return new MDPCompSplit(dpy); 56 } 57 return new MDPCompNonSplit(dpy); 58} 59 60MDPComp::MDPComp(int dpy):mDpy(dpy){}; 61 62void MDPComp::dump(android::String8& buf, hwc_context_t *ctx) 63{ 64 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS) 65 return; 66 67 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 68 (mDpy == 0) ? "\"PRIMARY\"" : 69 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\""); 70 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d " 71 "fbCount:%2d \n", mCurrentFrame.layerCount, 72 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 73 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 74 (mCurrentFrame.needsRedraw? "YES" : "NO"), 75 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 76 if(isDisplaySplit(ctx, mDpy)) { 77 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] " 78 "Right: [%d, %d, %d, %d] \n", 79 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 80 ctx->listStats[mDpy].lRoi.right, 81 ctx->listStats[mDpy].lRoi.bottom, 82 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top, 83 ctx->listStats[mDpy].rRoi.right, 84 ctx->listStats[mDpy].rRoi.bottom); 85 } else { 86 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n", 87 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top, 88 ctx->listStats[mDpy].lRoi.right, 89 ctx->listStats[mDpy].lRoi.bottom); 90 } 91 dumpsys_log(buf," --------------------------------------------- \n"); 92 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 93 dumpsys_log(buf," --------------------------------------------- \n"); 94 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 95 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 96 index, 97 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 98 mCurrentFrame.layerToMDP[index], 99 (mCurrentFrame.isFBComposed[index] ? 100 (mCurrentFrame.drop[index] ? "DROP" : 101 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"), 102 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 103 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 104 dumpsys_log(buf,"\n"); 105} 106 107bool MDPComp::init(hwc_context_t *ctx) { 108 109 if(!ctx) { 110 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 111 return false; 112 } 113 114 char property[PROPERTY_VALUE_MAX]; 115 116 sEnabled = false; 117 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 118 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 119 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 120 sEnabled = true; 121 } 122 123#ifdef DELTA_PANEL 124 if((property_get("ro.hwc.is_delta_panel", property, NULL) > 0) && 125 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 126 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 127 sEnabled = false; 128 } 129#endif 130 131 sEnableMixedMode = true; 132 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 133 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 134 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 135 sEnableMixedMode = false; 136 } 137 138 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 139 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 140 int val = atoi(property); 141 if(val >= 0) 142 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 143 } 144 145 if(ctx->mMDP.panel != MIPI_CMD_PANEL) { 146 // Idle invalidation is not necessary on command mode panels 147 long idle_timeout = DEFAULT_IDLE_TIME; 148 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 149 if(atoi(property) != 0) 150 idle_timeout = atoi(property); 151 } 152 153 //create Idle Invalidator only when not disabled through property 154 if(idle_timeout != -1) 155 idleInvalidator = IdleInvalidator::getInstance(); 156 157 if(idleInvalidator == NULL) { 158 ALOGE("%s: failed to instantiate idleInvalidator object", 159 __FUNCTION__); 160 } else { 161 idleInvalidator->init(timeout_handler, ctx, 162 (unsigned int)idle_timeout); 163 } 164 } 165 166 if(!qdutils::MDPVersion::getInstance().isSrcSplit() && 167 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 && 168 (!strncmp(property, "1", PROPERTY_VALUE_MAX) || 169 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) { 170 sEnable4k2kYUVSplit = true; 171 } 172 173 if ((property_get("persist.hwc.ptor.enable", property, NULL) > 0) && 174 ((!strncasecmp(property, "true", PROPERTY_VALUE_MAX )) || 175 (!strncmp(property, "1", PROPERTY_VALUE_MAX )))) { 176 ctx->mCopyBit[HWC_DISPLAY_PRIMARY] = new CopyBit(ctx, 177 HWC_DISPLAY_PRIMARY); 178 } 179 180 return true; 181} 182 183void MDPComp::reset(hwc_context_t *ctx) { 184 const int numLayers = ctx->listStats[mDpy].numAppLayers; 185 mCurrentFrame.reset(numLayers); 186 ctx->mOverlay->clear(mDpy); 187 ctx->mLayerRotMap[mDpy]->clear(); 188} 189 190void MDPComp::reset() { 191 sHandleTimeout = false; 192 mModeOn = false; 193} 194 195void MDPComp::timeout_handler(void *udata) { 196 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 197 198 if(!ctx) { 199 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 200 return; 201 } 202 Locker::Autolock _l(ctx->mDrawLock); 203 // Handle timeout event only if the previous composition is MDP or MIXED. 204 if(!sHandleTimeout) { 205 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__); 206 return; 207 } 208 if(!ctx->proc) { 209 ALOGE("%s: HWC proc not registered", __FUNCTION__); 210 return; 211 } 212 sIdleFallBack = true; 213 /* Trigger SF to redraw the current frame */ 214 ctx->proc->invalidate(ctx->proc); 215} 216 217void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 218 hwc_display_contents_1_t* list) { 219 LayerProp *layerProp = ctx->layerProp[mDpy]; 220 221 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 222 hwc_layer_1_t* layer = &(list->hwLayers[index]); 223 if(!mCurrentFrame.isFBComposed[index]) { 224 layerProp[index].mFlags |= HWC_MDPCOMP; 225 layer->compositionType = HWC_OVERLAY; 226 layer->hints |= HWC_HINT_CLEAR_FB; 227 } else { 228 /* Drop the layer when its already present in FB OR when it lies 229 * outside frame's ROI */ 230 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) { 231 layer->compositionType = HWC_OVERLAY; 232 } 233 } 234 } 235} 236 237void MDPComp::setRedraw(hwc_context_t *ctx, 238 hwc_display_contents_1_t* list) { 239 mCurrentFrame.needsRedraw = false; 240 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) || 241 (list->flags & HWC_GEOMETRY_CHANGED) || 242 isSkipPresent(ctx, mDpy)) { 243 mCurrentFrame.needsRedraw = true; 244 } 245} 246 247MDPComp::FrameInfo::FrameInfo() { 248 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 249 reset(0); 250} 251 252void MDPComp::FrameInfo::reset(const int& numLayers) { 253 for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) { 254 if(mdpToLayer[i].pipeInfo) { 255 delete mdpToLayer[i].pipeInfo; 256 mdpToLayer[i].pipeInfo = NULL; 257 //We dont own the rotator 258 mdpToLayer[i].rot = NULL; 259 } 260 } 261 262 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 263 memset(&layerToMDP, -1, sizeof(layerToMDP)); 264 memset(&isFBComposed, 1, sizeof(isFBComposed)); 265 266 layerCount = numLayers; 267 fbCount = numLayers; 268 mdpCount = 0; 269 needsRedraw = true; 270 fbZ = -1; 271} 272 273void MDPComp::FrameInfo::map() { 274 // populate layer and MDP maps 275 int mdpIdx = 0; 276 for(int idx = 0; idx < layerCount; idx++) { 277 if(!isFBComposed[idx]) { 278 mdpToLayer[mdpIdx].listIndex = idx; 279 layerToMDP[idx] = mdpIdx++; 280 } 281 } 282} 283 284MDPComp::LayerCache::LayerCache() { 285 reset(); 286} 287 288void MDPComp::LayerCache::reset() { 289 memset(&hnd, 0, sizeof(hnd)); 290 memset(&isFBComposed, true, sizeof(isFBComposed)); 291 memset(&drop, false, sizeof(drop)); 292 layerCount = 0; 293} 294 295void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 296 const int numAppLayers = (int)list->numHwLayers - 1; 297 for(int i = 0; i < numAppLayers; i++) { 298 hnd[i] = list->hwLayers[i].handle; 299 } 300} 301 302void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 303 layerCount = curFrame.layerCount; 304 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed)); 305 memcpy(&drop, &curFrame.drop, sizeof(drop)); 306} 307 308bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame, 309 hwc_display_contents_1_t* list) { 310 if(layerCount != curFrame.layerCount) 311 return false; 312 for(int i = 0; i < curFrame.layerCount; i++) { 313 if((curFrame.isFBComposed[i] != isFBComposed[i]) || 314 (curFrame.drop[i] != drop[i])) { 315 return false; 316 } 317 if(curFrame.isFBComposed[i] && 318 (hnd[i] != list->hwLayers[i].handle)){ 319 return false; 320 } 321 } 322 return true; 323} 324 325bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) { 326 private_handle_t *hnd = (private_handle_t *)layer->handle; 327 if((has90Transform(layer) and (not isRotationDoable(ctx, hnd))) || 328 (not isValidDimension(ctx,layer)) 329 //More conditions here, SKIP, sRGB+Blend etc 330 ) { 331 return false; 332 } 333 return true; 334} 335 336bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 337 private_handle_t *hnd = (private_handle_t *)layer->handle; 338 339 if(!hnd) { 340 if (layer->flags & HWC_COLOR_FILL) { 341 // Color layer 342 return true; 343 } 344 ALOGE("%s: layer handle is NULL", __FUNCTION__); 345 return false; 346 } 347 348 //XXX: Investigate doing this with pixel phase on MDSS 349 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf)) 350 return false; 351 352 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 353 hwc_rect_t dst = layer->displayFrame; 354 bool rotated90 = (bool)(layer->transform & HAL_TRANSFORM_ROT_90); 355 int crop_w = rotated90 ? crop.bottom - crop.top : crop.right - crop.left; 356 int crop_h = rotated90 ? crop.right - crop.left : crop.bottom - crop.top; 357 int dst_w = dst.right - dst.left; 358 int dst_h = dst.bottom - dst.top; 359 float w_scale = ((float)crop_w / (float)dst_w); 360 float h_scale = ((float)crop_h / (float)dst_h); 361 362 /* Workaround for MDP HW limitation in DSI command mode panels where 363 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height 364 * less than 5 pixels 365 * There also is a HW limilation in MDP, minimum block size is 2x2 366 * Fallback to GPU if height is less than 2. 367 */ 368 if((crop_w < 5)||(crop_h < 5)) 369 return false; 370 371 if((w_scale > 1.0f) || (h_scale > 1.0f)) { 372 const uint32_t maxMDPDownscale = 373 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 374 const float w_dscale = w_scale; 375 const float h_dscale = h_scale; 376 377 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 378 379 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 380 /* On targets that doesnt support Decimation (eg.,8x26) 381 * maximum downscale support is overlay pipe downscale. 382 */ 383 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale || 384 h_dscale > maxMDPDownscale) 385 return false; 386 } else { 387 // Decimation on macrotile format layers is not supported. 388 if(isTileRendered(hnd)) { 389 /* MDP can read maximum MAX_DISPLAY_DIM width. 390 * Bail out if 391 * 1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp 392 * 2. exceeds maximum downscale limit 393 */ 394 if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) || 395 w_dscale > maxMDPDownscale || 396 h_dscale > maxMDPDownscale) { 397 return false; 398 } 399 } else if(w_dscale > 64 || h_dscale > 64) 400 return false; 401 } 402 } else { //A-family 403 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale) 404 return false; 405 } 406 } 407 408 if((w_scale < 1.0f) || (h_scale < 1.0f)) { 409 const uint32_t upscale = 410 qdutils::MDPVersion::getInstance().getMaxMDPUpscale(); 411 const float w_uscale = 1.0f / w_scale; 412 const float h_uscale = 1.0f / h_scale; 413 414 if(w_uscale > upscale || h_uscale > upscale) 415 return false; 416 } 417 418 return true; 419} 420 421bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 422 bool ret = true; 423 424 if(!isEnabled()) { 425 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 426 ret = false; 427 } else if((qdutils::MDPVersion::getInstance().is8x26() || 428 qdutils::MDPVersion::getInstance().is8x16() || 429 qdutils::MDPVersion::getInstance().is8x39()) && 430 ctx->mVideoTransFlag && 431 isSecondaryConnected(ctx)) { 432 //1 Padding round to shift pipes across mixers 433 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 434 __FUNCTION__); 435 ret = false; 436 } else if(isSecondaryConfiguring(ctx)) { 437 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 438 __FUNCTION__); 439 ret = false; 440 } else if(ctx->isPaddingRound) { 441 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d", 442 __FUNCTION__,mDpy); 443 ret = false; 444 } 445 return ret; 446} 447 448void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 449 hwc_rect_t roi = ctx->listStats[mDpy].lRoi; 450 fbRect = getIntersection(fbRect, roi); 451} 452 453/* 1) Identify layers that are not visible or lying outside the updating ROI and 454 * drop them from composition. 455 * 2) If we have a scaling layer which needs cropping against generated 456 * ROI, reset ROI to full resolution. */ 457bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx, 458 hwc_display_contents_1_t* list) { 459 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 460 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi; 461 462 for(int i = numAppLayers - 1; i >= 0; i--){ 463 if(!isValidRect(visibleRect)) { 464 mCurrentFrame.drop[i] = true; 465 mCurrentFrame.dropCount++; 466 continue; 467 } 468 469 const hwc_layer_1_t* layer = &list->hwLayers[i]; 470 hwc_rect_t dstRect = layer->displayFrame; 471 hwc_rect_t res = getIntersection(visibleRect, dstRect); 472 473 if(!isValidRect(res)) { 474 mCurrentFrame.drop[i] = true; 475 mCurrentFrame.dropCount++; 476 } else { 477 /* Reset frame ROI when any layer which needs scaling also needs ROI 478 * cropping */ 479 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 480 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__); 481 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 482 mCurrentFrame.dropCount = 0; 483 return false; 484 } 485 486 /* deduct any opaque region from visibleRect */ 487 if (layer->blending == HWC_BLENDING_NONE) 488 visibleRect = deductRect(visibleRect, res); 489 } 490 } 491 return true; 492} 493 494/* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 495 * are updating. If DirtyRegion is applicable, calculate it by accounting all 496 * the changing layer's dirtyRegion. */ 497void MDPCompNonSplit::generateROI(hwc_context_t *ctx, 498 hwc_display_contents_1_t* list) { 499 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 500 if(!canPartialUpdate(ctx, list)) 501 return; 502 503 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0}; 504 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres, 505 (int)ctx->dpyAttr[mDpy].yres}; 506 507 for(int index = 0; index < numAppLayers; index++ ) { 508 hwc_layer_1_t* layer = &list->hwLayers[index]; 509 if ((mCachedFrame.hnd[index] != layer->handle) || 510 isYuvBuffer((private_handle_t *)layer->handle)) { 511 hwc_rect_t dst = layer->displayFrame; 512 hwc_rect_t updatingRect = dst; 513 514#ifdef QCOM_BSP 515 if(!needsScaling(layer) && !layer->transform) 516 { 517 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 518 int x_off = dst.left - src.left; 519 int y_off = dst.top - src.top; 520 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 521 } 522#endif 523 524 roi = getUnion(roi, updatingRect); 525 } 526 } 527 528 /* No layer is updating. Still SF wants a refresh.*/ 529 if(!isValidRect(roi)) 530 return; 531 532 // Align ROI coordinates to panel restrictions 533 roi = getSanitizeROI(roi, fullFrame); 534 535 ctx->listStats[mDpy].lRoi = roi; 536 if(!validateAndApplyROI(ctx, list)) 537 resetROI(ctx, mDpy); 538 539 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__, 540 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 541 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom); 542} 543 544void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 545 hwc_rect l_roi = ctx->listStats[mDpy].lRoi; 546 hwc_rect r_roi = ctx->listStats[mDpy].rRoi; 547 548 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi); 549 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi); 550 fbRect = getUnion(l_fbRect, r_fbRect); 551} 552/* 1) Identify layers that are not visible or lying outside BOTH the updating 553 * ROI's and drop them from composition. If a layer is spanning across both 554 * the halves of the screen but needed by only ROI, the non-contributing 555 * half will not be programmed for MDP. 556 * 2) If we have a scaling layer which needs cropping against generated 557 * ROI, reset ROI to full resolution. */ 558bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx, 559 hwc_display_contents_1_t* list) { 560 561 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 562 563 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi; 564 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi; 565 566 for(int i = numAppLayers - 1; i >= 0; i--){ 567 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR)) 568 { 569 mCurrentFrame.drop[i] = true; 570 mCurrentFrame.dropCount++; 571 continue; 572 } 573 574 const hwc_layer_1_t* layer = &list->hwLayers[i]; 575 hwc_rect_t dstRect = layer->displayFrame; 576 577 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect); 578 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect); 579 hwc_rect_t res = getUnion(l_res, r_res); 580 581 if(!isValidRect(l_res) && !isValidRect(r_res)) { 582 mCurrentFrame.drop[i] = true; 583 mCurrentFrame.dropCount++; 584 } else { 585 /* Reset frame ROI when any layer which needs scaling also needs ROI 586 * cropping */ 587 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 588 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 589 mCurrentFrame.dropCount = 0; 590 return false; 591 } 592 593 if (layer->blending == HWC_BLENDING_NONE) { 594 visibleRectL = deductRect(visibleRectL, l_res); 595 visibleRectR = deductRect(visibleRectR, r_res); 596 } 597 } 598 } 599 return true; 600} 601/* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 602 * are updating. If DirtyRegion is applicable, calculate it by accounting all 603 * the changing layer's dirtyRegion. */ 604void MDPCompSplit::generateROI(hwc_context_t *ctx, 605 hwc_display_contents_1_t* list) { 606 if(!canPartialUpdate(ctx, list)) 607 return; 608 609 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 610 int lSplit = getLeftSplit(ctx, mDpy); 611 612 int hw_h = (int)ctx->dpyAttr[mDpy].yres; 613 int hw_w = (int)ctx->dpyAttr[mDpy].xres; 614 615 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h}; 616 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h}; 617 618 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0}; 619 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0}; 620 621 for(int index = 0; index < numAppLayers; index++ ) { 622 hwc_layer_1_t* layer = &list->hwLayers[index]; 623 private_handle_t *hnd = (private_handle_t *)layer->handle; 624 if ((mCachedFrame.hnd[index] != layer->handle) || 625 isYuvBuffer(hnd)) { 626 hwc_rect_t dst = layer->displayFrame; 627 hwc_rect_t updatingRect = dst; 628 629#ifdef QCOM_BSP 630 if(!needsScaling(layer) && !layer->transform) 631 { 632 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 633 int x_off = dst.left - src.left; 634 int y_off = dst.top - src.top; 635 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 636 } 637#endif 638 639 hwc_rect_t l_dst = getIntersection(l_frame, updatingRect); 640 if(isValidRect(l_dst)) 641 l_roi = getUnion(l_roi, l_dst); 642 643 hwc_rect_t r_dst = getIntersection(r_frame, updatingRect); 644 if(isValidRect(r_dst)) 645 r_roi = getUnion(r_roi, r_dst); 646 } 647 } 648 649 /* For panels that cannot accept commands in both the interfaces, we cannot 650 * send two ROI's (for each half). We merge them into single ROI and split 651 * them across lSplit for MDP mixer use. The ROI's will be merged again 652 * finally before udpating the panel in the driver. */ 653 if(qdutils::MDPVersion::getInstance().needsROIMerge()) { 654 hwc_rect_t temp_roi = getUnion(l_roi, r_roi); 655 l_roi = getIntersection(temp_roi, l_frame); 656 r_roi = getIntersection(temp_roi, r_frame); 657 } 658 659 /* No layer is updating. Still SF wants a refresh. */ 660 if(!isValidRect(l_roi) && !isValidRect(r_roi)) 661 return; 662 663 l_roi = getSanitizeROI(l_roi, l_frame); 664 r_roi = getSanitizeROI(r_roi, r_frame); 665 666 ctx->listStats[mDpy].lRoi = l_roi; 667 ctx->listStats[mDpy].rRoi = r_roi; 668 669 if(!validateAndApplyROI(ctx, list)) 670 resetROI(ctx, mDpy); 671 672 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]" 673 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__, 674 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 675 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom, 676 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top, 677 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom); 678} 679 680/* Checks for conditions where all the layers marked for MDP comp cannot be 681 * bypassed. On such conditions we try to bypass atleast YUV layers */ 682bool MDPComp::tryFullFrame(hwc_context_t *ctx, 683 hwc_display_contents_1_t* list){ 684 685 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 686 int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres; 687 688 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) { 689 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 690 return false; 691 } 692 693 if(isSkipPresent(ctx, mDpy)) { 694 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 695 __FUNCTION__, 696 isSkipPresent(ctx, mDpy)); 697 return false; 698 } 699 700 if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) && 701 (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) { 702 // Disable MDP comp on Secondary when the primary is highres panel and 703 // the secondary is a normal 1080p, because, MDP comp on secondary under 704 // in such usecase, decimation gets used for downscale and there will be 705 // a quality mismatch when there will be a fallback to GPU comp 706 ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp", 707 __FUNCTION__); 708 return false; 709 } 710 711 // check for action safe flag and downscale mode which requires scaling. 712 if(ctx->dpyAttr[mDpy].mActionSafePresent 713 || ctx->dpyAttr[mDpy].mDownScaleMode) { 714 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__); 715 return false; 716 } 717 718 for(int i = 0; i < numAppLayers; ++i) { 719 hwc_layer_1_t* layer = &list->hwLayers[i]; 720 private_handle_t *hnd = (private_handle_t *)layer->handle; 721 722 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) { 723 if(!canUseRotator(ctx, mDpy)) { 724 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d", 725 __FUNCTION__, mDpy); 726 return false; 727 } 728 } 729 730 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 731 // may not need it if Gfx pre-rotation can handle all flips & rotations 732 if(qdutils::MDPVersion::getInstance().is8x26() && 733 (ctx->dpyAttr[mDpy].xres > 1024) && 734 (layer->transform & HWC_TRANSFORM_FLIP_H) && 735 (!isYuvBuffer(hnd))) 736 return false; 737 } 738 739 if(ctx->mAD->isDoable()) { 740 return false; 741 } 742 743 //If all above hard conditions are met we can do full or partial MDP comp. 744 bool ret = false; 745 if(fullMDPComp(ctx, list)) { 746 ret = true; 747 } else if(fullMDPCompWithPTOR(ctx, list)) { 748 ret = true; 749 } else if(partialMDPComp(ctx, list)) { 750 ret = true; 751 } 752 753 return ret; 754} 755 756bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 757 758 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP) 759 return false; 760 761 //Will benefit presentation / secondary-only layer. 762 if((mDpy > HWC_DISPLAY_PRIMARY) && 763 (list->numHwLayers - 1) > MAX_SEC_LAYERS) { 764 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 765 return false; 766 } 767 768 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 769 for(int i = 0; i < numAppLayers; i++) { 770 hwc_layer_1_t* layer = &list->hwLayers[i]; 771 if(not mCurrentFrame.drop[i] and 772 not isSupportedForMDPComp(ctx, layer)) { 773 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 774 return false; 775 } 776 } 777 778 mCurrentFrame.fbCount = 0; 779 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop, 780 sizeof(mCurrentFrame.isFBComposed)); 781 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount - 782 mCurrentFrame.dropCount; 783 784 if(sEnable4k2kYUVSplit){ 785 adjustForSourceSplit(ctx, list); 786 } 787 788 if(!postHeuristicsHandling(ctx, list)) { 789 ALOGD_IF(isDebug(), "post heuristic handling failed"); 790 reset(ctx); 791 return false; 792 } 793 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED", 794 __FUNCTION__); 795 return true; 796} 797 798/* Full MDP Composition with Peripheral Tiny Overlap Removal. 799 * MDP bandwidth limitations can be avoided, if the overlap region 800 * covered by the smallest layer at a higher z-order, gets composed 801 * by Copybit on a render buffer, which can be queued to MDP. 802 */ 803bool MDPComp::fullMDPCompWithPTOR(hwc_context_t *ctx, 804 hwc_display_contents_1_t* list) { 805 806 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 807 const int stagesForMDP = min(sMaxPipesPerMixer, 808 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 809 810 // Hard checks where we cannot use this mode 811 if (mDpy || !ctx->mCopyBit[mDpy] || isDisplaySplit(ctx, mDpy)) { 812 ALOGD_IF(isDebug(), "%s: Feature not supported!", __FUNCTION__); 813 return false; 814 } 815 816 // Frame level checks 817 if ((numAppLayers > stagesForMDP) || isSkipPresent(ctx, mDpy) || 818 isYuvPresent(ctx, mDpy) || mCurrentFrame.dropCount || 819 isSecurePresent(ctx, mDpy)) { 820 ALOGD_IF(isDebug(), "%s: Frame not supported!", __FUNCTION__); 821 return false; 822 } 823 // MDP comp checks 824 for(int i = 0; i < numAppLayers; i++) { 825 hwc_layer_1_t* layer = &list->hwLayers[i]; 826 if(not isSupportedForMDPComp(ctx, layer)) { 827 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 828 return false; 829 } 830 } 831 832 /* We cannot use this composition mode, if: 833 1. A below layer needs scaling. 834 2. Overlap is not peripheral to display. 835 3. Overlap or a below layer has 90 degree transform. 836 4. Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs. 837 */ 838 839 int minLayerIndex[MAX_PTOR_LAYERS] = { -1, -1}; 840 hwc_rect_t overlapRect[MAX_PTOR_LAYERS]; 841 memset(overlapRect, 0, sizeof(overlapRect)); 842 int layerPixelCount, minPixelCount = 0; 843 int numPTORLayersFound = 0; 844 for (int i = numAppLayers-1; (i >= 0 && 845 numPTORLayersFound < MAX_PTOR_LAYERS); i--) { 846 hwc_layer_1_t* layer = &list->hwLayers[i]; 847 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 848 hwc_rect_t dispFrame = layer->displayFrame; 849 layerPixelCount = (crop.right - crop.left) * (crop.bottom - crop.top); 850 // PTOR layer should be peripheral and cannot have transform 851 if (!isPeripheral(dispFrame, ctx->mViewFrame[mDpy]) || 852 has90Transform(layer)) { 853 continue; 854 } 855 if((3 * (layerPixelCount + minPixelCount)) > 856 ((int)ctx->dpyAttr[mDpy].xres * (int)ctx->dpyAttr[mDpy].yres)) { 857 // Overlap area > (1/3 * FrameBuffer) area, based on Perf inputs. 858 continue; 859 } 860 // Found the PTOR layer 861 bool found = true; 862 for (int j = i-1; j >= 0; j--) { 863 // Check if the layers below this layer qualifies for PTOR comp 864 hwc_layer_1_t* layer = &list->hwLayers[j]; 865 hwc_rect_t disFrame = layer->displayFrame; 866 //layer below PTOR is intersecting and has 90 degree transform or 867 // needs scaling cannot be supported. 868 if ((isValidRect(getIntersection(dispFrame, disFrame))) 869 && (has90Transform(layer) || needsScaling(layer))) { 870 found = false; 871 break; 872 } 873 } 874 // Store the minLayer Index 875 if(found) { 876 minLayerIndex[numPTORLayersFound] = i; 877 overlapRect[numPTORLayersFound] = list->hwLayers[i].displayFrame; 878 minPixelCount += layerPixelCount; 879 numPTORLayersFound++; 880 } 881 } 882 883 if(isValidRect(getIntersection(overlapRect[0], overlapRect[1]))) { 884 ALOGD_IF(isDebug(), "%s: Ignore Rect2 its intersects with Rect1", 885 __FUNCTION__); 886 // reset second minLayerIndex[1]; 887 minLayerIndex[1] = -1; 888 numPTORLayersFound--; 889 } 890 891 // No overlap layers 892 if (!numPTORLayersFound) 893 return false; 894 895 ctx->mPtorInfo.count = numPTORLayersFound; 896 for(int i = 0; i < MAX_PTOR_LAYERS; i++) { 897 ctx->mPtorInfo.layerIndex[i] = minLayerIndex[i]; 898 } 899 900 if (!ctx->mCopyBit[mDpy]->prepareOverlap(ctx, list)) { 901 // reset PTOR 902 ctx->mPtorInfo.count = 0; 903 return false; 904 } 905 // Store the displayFrame and the sourceCrops of the layers 906 hwc_rect_t displayFrame[numAppLayers]; 907 hwc_rect_t sourceCrop[numAppLayers]; 908 for(int i = 0; i < numAppLayers; i++) { 909 hwc_layer_1_t* layer = &list->hwLayers[i]; 910 displayFrame[i] = layer->displayFrame; 911 sourceCrop[i] = integerizeSourceCrop(layer->sourceCropf); 912 } 913 914 for(int j = 0; j < numPTORLayersFound; j++) { 915 int index = ctx->mPtorInfo.layerIndex[j]; 916 // Remove overlap from crop & displayFrame of below layers 917 for (int i = 0; i < index && index !=-1; i++) { 918 hwc_layer_1_t* layer = &list->hwLayers[i]; 919 if(!isValidRect(getIntersection(layer->displayFrame, 920 overlapRect[j]))) { 921 continue; 922 } 923 // Update layer attributes 924 hwc_rect_t srcCrop = integerizeSourceCrop(layer->sourceCropf); 925 hwc_rect_t destRect = deductRect(layer->displayFrame, 926 overlapRect[j]); 927 qhwc::calculate_crop_rects(srcCrop, layer->displayFrame, destRect, 928 layer->transform); 929 layer->sourceCropf.left = (float)srcCrop.left; 930 layer->sourceCropf.top = (float)srcCrop.top; 931 layer->sourceCropf.right = (float)srcCrop.right; 932 layer->sourceCropf.bottom = (float)srcCrop.bottom; 933 } 934 } 935 936 mCurrentFrame.mdpCount = numAppLayers; 937 mCurrentFrame.fbCount = 0; 938 mCurrentFrame.fbZ = -1; 939 940 for (int j = 0; j < numAppLayers; j++) 941 mCurrentFrame.isFBComposed[j] = false; 942 943 bool result = postHeuristicsHandling(ctx, list); 944 945 // Restore layer attributes 946 for(int i = 0; i < numAppLayers; i++) { 947 hwc_layer_1_t* layer = &list->hwLayers[i]; 948 layer->displayFrame = displayFrame[i]; 949 layer->sourceCropf.left = (float)sourceCrop[i].left; 950 layer->sourceCropf.top = (float)sourceCrop[i].top; 951 layer->sourceCropf.right = (float)sourceCrop[i].right; 952 layer->sourceCropf.bottom = (float)sourceCrop[i].bottom; 953 } 954 955 if (!result) { 956 // reset PTOR 957 ctx->mPtorInfo.count = 0; 958 reset(ctx); 959 } else { 960 ALOGD_IF(isDebug(), "%s: PTOR Indexes: %d and %d", __FUNCTION__, 961 ctx->mPtorInfo.layerIndex[0], ctx->mPtorInfo.layerIndex[1]); 962 } 963 964 ALOGD_IF(isDebug(), "%s: Postheuristics %s!", __FUNCTION__, 965 (result ? "successful" : "failed")); 966 return result; 967} 968 969bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 970{ 971 if(!sEnableMixedMode) { 972 //Mixed mode is disabled. No need to even try caching. 973 return false; 974 } 975 976 bool ret = false; 977 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first 978 ret = loadBasedComp(ctx, list) or 979 cacheBasedComp(ctx, list); 980 } else { 981 ret = cacheBasedComp(ctx, list) or 982 loadBasedComp(ctx, list); 983 } 984 985 return ret; 986} 987 988bool MDPComp::cacheBasedComp(hwc_context_t *ctx, 989 hwc_display_contents_1_t* list) { 990 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP) 991 return false; 992 993 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 994 mCurrentFrame.reset(numAppLayers); 995 updateLayerCache(ctx, list); 996 997 //If an MDP marked layer is unsupported cannot do partial MDP Comp 998 for(int i = 0; i < numAppLayers; i++) { 999 if(!mCurrentFrame.isFBComposed[i]) { 1000 hwc_layer_1_t* layer = &list->hwLayers[i]; 1001 if(not isSupportedForMDPComp(ctx, layer)) { 1002 ALOGD_IF(isDebug(), "%s: Unsupported layer in list", 1003 __FUNCTION__); 1004 reset(ctx); 1005 return false; 1006 } 1007 } 1008 } 1009 1010 updateYUV(ctx, list, false /*secure only*/); 1011 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also 1012 if(!ret) { 1013 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); 1014 reset(ctx); 1015 return false; 1016 } 1017 1018 int mdpCount = mCurrentFrame.mdpCount; 1019 1020 if(sEnable4k2kYUVSplit){ 1021 adjustForSourceSplit(ctx, list); 1022 } 1023 1024 //Will benefit cases where a video has non-updating background. 1025 if((mDpy > HWC_DISPLAY_PRIMARY) and 1026 (mdpCount > MAX_SEC_LAYERS)) { 1027 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 1028 reset(ctx); 1029 return false; 1030 } 1031 1032 if(!postHeuristicsHandling(ctx, list)) { 1033 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1034 reset(ctx); 1035 return false; 1036 } 1037 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED", 1038 __FUNCTION__); 1039 1040 return true; 1041} 1042 1043bool MDPComp::loadBasedComp(hwc_context_t *ctx, 1044 hwc_display_contents_1_t* list) { 1045 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP) 1046 return false; 1047 1048 if(not isLoadBasedCompDoable(ctx)) { 1049 return false; 1050 } 1051 1052 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1053 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount; 1054 const int stagesForMDP = min(sMaxPipesPerMixer, 1055 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 1056 1057 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB 1058 int fbBatchSize = numNonDroppedLayers - mdpBatchSize; 1059 int lastMDPSupportedIndex = numAppLayers; 1060 int dropCount = 0; 1061 1062 //Find the minimum MDP batch size 1063 for(int i = 0; i < numAppLayers;i++) { 1064 if(mCurrentFrame.drop[i]) { 1065 dropCount++; 1066 continue; 1067 } 1068 hwc_layer_1_t* layer = &list->hwLayers[i]; 1069 if(not isSupportedForMDPComp(ctx, layer)) { 1070 lastMDPSupportedIndex = i; 1071 mdpBatchSize = min(i - dropCount, stagesForMDP - 1); 1072 fbBatchSize = numNonDroppedLayers - mdpBatchSize; 1073 break; 1074 } 1075 } 1076 1077 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d " 1078 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize, 1079 mCurrentFrame.dropCount); 1080 1081 //Start at a point where the fb batch should at least have 2 layers, for 1082 //this mode to be justified. 1083 while(fbBatchSize < 2) { 1084 ++fbBatchSize; 1085 --mdpBatchSize; 1086 } 1087 1088 //If there are no layers for MDP, this mode doesnt make sense. 1089 if(mdpBatchSize < 1) { 1090 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch", 1091 __FUNCTION__); 1092 return false; 1093 } 1094 1095 mCurrentFrame.reset(numAppLayers); 1096 1097 //Try with successively smaller mdp batch sizes until we succeed or reach 1 1098 while(mdpBatchSize > 0) { 1099 //Mark layers for MDP comp 1100 int mdpBatchLeft = mdpBatchSize; 1101 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) { 1102 if(mCurrentFrame.drop[i]) { 1103 continue; 1104 } 1105 mCurrentFrame.isFBComposed[i] = false; 1106 --mdpBatchLeft; 1107 } 1108 1109 mCurrentFrame.fbZ = mdpBatchSize; 1110 mCurrentFrame.fbCount = fbBatchSize; 1111 mCurrentFrame.mdpCount = mdpBatchSize; 1112 1113 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d", 1114 __FUNCTION__, mdpBatchSize, fbBatchSize, 1115 mCurrentFrame.dropCount); 1116 1117 if(postHeuristicsHandling(ctx, list)) { 1118 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded", 1119 __FUNCTION__); 1120 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED", 1121 __FUNCTION__); 1122 return true; 1123 } 1124 1125 reset(ctx); 1126 --mdpBatchSize; 1127 ++fbBatchSize; 1128 } 1129 1130 return false; 1131} 1132 1133bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) { 1134 if(mDpy or isSecurePresent(ctx, mDpy) or 1135 isYuvPresent(ctx, mDpy)) { 1136 return false; 1137 } 1138 return true; 1139} 1140 1141bool MDPComp::canPartialUpdate(hwc_context_t *ctx, 1142 hwc_display_contents_1_t* list){ 1143 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() || 1144 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) || 1145 mDpy ) { 1146 return false; 1147 } 1148 if(ctx->listStats[mDpy].secureUI) 1149 return false; 1150 return true; 1151} 1152 1153bool MDPComp::tryVideoOnly(hwc_context_t *ctx, 1154 hwc_display_contents_1_t* list) { 1155 const bool secureOnly = true; 1156 return videoOnlyComp(ctx, list, not secureOnly) or 1157 videoOnlyComp(ctx, list, secureOnly); 1158} 1159 1160bool MDPComp::videoOnlyComp(hwc_context_t *ctx, 1161 hwc_display_contents_1_t* list, bool secureOnly) { 1162 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY) 1163 return false; 1164 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1165 1166 mCurrentFrame.reset(numAppLayers); 1167 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 1168 updateYUV(ctx, list, secureOnly); 1169 int mdpCount = mCurrentFrame.mdpCount; 1170 1171 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) { 1172 reset(ctx); 1173 return false; 1174 } 1175 1176 /* Bail out if we are processing only secured video layers 1177 * and we dont have any */ 1178 if(!isSecurePresent(ctx, mDpy) && secureOnly){ 1179 reset(ctx); 1180 return false; 1181 } 1182 1183 if(mCurrentFrame.fbCount) 1184 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 1185 1186 if(sEnable4k2kYUVSplit){ 1187 adjustForSourceSplit(ctx, list); 1188 } 1189 1190 if(!postHeuristicsHandling(ctx, list)) { 1191 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1192 reset(ctx); 1193 return false; 1194 } 1195 1196 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED", 1197 __FUNCTION__); 1198 return true; 1199} 1200 1201/* Checks for conditions where YUV layers cannot be bypassed */ 1202bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 1203 if(isSkipLayer(layer)) { 1204 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy); 1205 return false; 1206 } 1207 1208 if(has90Transform(layer) && !canUseRotator(ctx, mDpy)) { 1209 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__); 1210 return false; 1211 } 1212 1213 if(isSecuring(ctx, layer)) { 1214 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 1215 return false; 1216 } 1217 1218 if(!isValidDimension(ctx, layer)) { 1219 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 1220 __FUNCTION__); 1221 return false; 1222 } 1223 1224 if(layer->planeAlpha < 0xFF) { 1225 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 1226 in video only mode", 1227 __FUNCTION__); 1228 return false; 1229 } 1230 1231 return true; 1232} 1233 1234/* starts at fromIndex and check for each layer to find 1235 * if it it has overlapping with any Updating layer above it in zorder 1236 * till the end of the batch. returns true if it finds any intersection */ 1237bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list, 1238 int fromIndex, int toIndex) { 1239 for(int i = fromIndex; i < toIndex; i++) { 1240 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1241 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) { 1242 return false; 1243 } 1244 } 1245 } 1246 return true; 1247} 1248 1249/* Checks if given layer at targetLayerIndex has any 1250 * intersection with all the updating layers in beween 1251 * fromIndex and toIndex. Returns true if it finds intersectiion */ 1252bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 1253 int fromIndex, int toIndex, int targetLayerIndex) { 1254 for(int i = fromIndex; i <= toIndex; i++) { 1255 if(!mCurrentFrame.isFBComposed[i]) { 1256 if(areLayersIntersecting(&list->hwLayers[i], 1257 &list->hwLayers[targetLayerIndex])) { 1258 return true; 1259 } 1260 } 1261 } 1262 return false; 1263} 1264 1265int MDPComp::getBatch(hwc_display_contents_1_t* list, 1266 int& maxBatchStart, int& maxBatchEnd, 1267 int& maxBatchCount) { 1268 int i = 0; 1269 int fbZOrder =-1; 1270 int droppedLayerCt = 0; 1271 while (i < mCurrentFrame.layerCount) { 1272 int batchCount = 0; 1273 int batchStart = i; 1274 int batchEnd = i; 1275 /* Adjust batch Z order with the dropped layers so far */ 1276 int fbZ = batchStart - droppedLayerCt; 1277 int firstZReverseIndex = -1; 1278 int updatingLayersAbove = 0;//Updating layer count in middle of batch 1279 while(i < mCurrentFrame.layerCount) { 1280 if(!mCurrentFrame.isFBComposed[i]) { 1281 if(!batchCount) { 1282 i++; 1283 break; 1284 } 1285 updatingLayersAbove++; 1286 i++; 1287 continue; 1288 } else { 1289 if(mCurrentFrame.drop[i]) { 1290 i++; 1291 droppedLayerCt++; 1292 continue; 1293 } else if(updatingLayersAbove <= 0) { 1294 batchCount++; 1295 batchEnd = i; 1296 i++; 1297 continue; 1298 } else { //Layer is FBComposed, not a drop & updatingLayer > 0 1299 1300 // We have a valid updating layer already. If layer-i not 1301 // have overlapping with all updating layers in between 1302 // batch-start and i, then we can add layer i to batch. 1303 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) { 1304 batchCount++; 1305 batchEnd = i; 1306 i++; 1307 continue; 1308 } else if(canPushBatchToTop(list, batchStart, i)) { 1309 //If All the non-updating layers with in this batch 1310 //does not have intersection with the updating layers 1311 //above in z-order, then we can safely move the batch to 1312 //higher z-order. Increment fbZ as it is moving up. 1313 if( firstZReverseIndex < 0) { 1314 firstZReverseIndex = i; 1315 } 1316 batchCount++; 1317 batchEnd = i; 1318 fbZ += updatingLayersAbove; 1319 i++; 1320 updatingLayersAbove = 0; 1321 continue; 1322 } else { 1323 //both failed.start the loop again from here. 1324 if(firstZReverseIndex >= 0) { 1325 i = firstZReverseIndex; 1326 } 1327 break; 1328 } 1329 } 1330 } 1331 } 1332 if(batchCount > maxBatchCount) { 1333 maxBatchCount = batchCount; 1334 maxBatchStart = batchStart; 1335 maxBatchEnd = batchEnd; 1336 fbZOrder = fbZ; 1337 } 1338 } 1339 return fbZOrder; 1340} 1341 1342bool MDPComp::markLayersForCaching(hwc_context_t* ctx, 1343 hwc_display_contents_1_t* list) { 1344 /* Idea is to keep as many non-updating(cached) layers in FB and 1345 * send rest of them through MDP. This is done in 2 steps. 1346 * 1. Find the maximum contiguous batch of non-updating layers. 1347 * 2. See if we can improve this batch size for caching by adding 1348 * opaque layers around the batch, if they don't have 1349 * any overlapping with the updating layers in between. 1350 * NEVER mark an updating layer for caching. 1351 * But cached ones can be marked for MDP */ 1352 1353 int maxBatchStart = -1; 1354 int maxBatchEnd = -1; 1355 int maxBatchCount = 0; 1356 int fbZ = -1; 1357 1358 /* Nothing is cached. No batching needed */ 1359 if(mCurrentFrame.fbCount == 0) { 1360 return true; 1361 } 1362 1363 /* No MDP comp layers, try to use other comp modes */ 1364 if(mCurrentFrame.mdpCount == 0) { 1365 return false; 1366 } 1367 1368 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount); 1369 1370 /* reset rest of the layers lying inside ROI for MDP comp */ 1371 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 1372 hwc_layer_1_t* layer = &list->hwLayers[i]; 1373 if((i < maxBatchStart || i > maxBatchEnd) && 1374 mCurrentFrame.isFBComposed[i]){ 1375 if(!mCurrentFrame.drop[i]){ 1376 //If an unsupported layer is being attempted to 1377 //be pulled out we should fail 1378 if(not isSupportedForMDPComp(ctx, layer)) { 1379 return false; 1380 } 1381 mCurrentFrame.isFBComposed[i] = false; 1382 } 1383 } 1384 } 1385 1386 // update the frame data 1387 mCurrentFrame.fbZ = fbZ; 1388 mCurrentFrame.fbCount = maxBatchCount; 1389 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1390 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1391 1392 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 1393 mCurrentFrame.fbCount); 1394 1395 return true; 1396} 1397 1398void MDPComp::updateLayerCache(hwc_context_t* ctx, 1399 hwc_display_contents_1_t* list) { 1400 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1401 int fbCount = 0; 1402 1403 for(int i = 0; i < numAppLayers; i++) { 1404 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 1405 if(!mCurrentFrame.drop[i]) 1406 fbCount++; 1407 mCurrentFrame.isFBComposed[i] = true; 1408 } else { 1409 mCurrentFrame.isFBComposed[i] = false; 1410 } 1411 } 1412 1413 mCurrentFrame.fbCount = fbCount; 1414 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount 1415 - mCurrentFrame.dropCount; 1416 1417 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d" 1418 ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount, 1419 mCurrentFrame.dropCount); 1420} 1421 1422void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 1423 bool secureOnly) { 1424 int nYuvCount = ctx->listStats[mDpy].yuvCount; 1425 for(int index = 0;index < nYuvCount; index++){ 1426 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 1427 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 1428 1429 if(!isYUVDoable(ctx, layer)) { 1430 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 1431 mCurrentFrame.isFBComposed[nYuvIndex] = true; 1432 mCurrentFrame.fbCount++; 1433 } 1434 } else { 1435 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 1436 private_handle_t *hnd = (private_handle_t *)layer->handle; 1437 if(!secureOnly || isSecureBuffer(hnd)) { 1438 mCurrentFrame.isFBComposed[nYuvIndex] = false; 1439 mCurrentFrame.fbCount--; 1440 } 1441 } 1442 } 1443 } 1444 1445 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1446 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1447 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, 1448 mCurrentFrame.fbCount); 1449} 1450 1451hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx, 1452 hwc_display_contents_1_t* list){ 1453 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0}; 1454 1455 /* Update only the region of FB needed for composition */ 1456 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) { 1457 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1458 hwc_layer_1_t* layer = &list->hwLayers[i]; 1459 hwc_rect_t dst = layer->displayFrame; 1460 fbRect = getUnion(fbRect, dst); 1461 } 1462 } 1463 trimAgainstROI(ctx, fbRect); 1464 return fbRect; 1465} 1466 1467bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx, 1468 hwc_display_contents_1_t* list) { 1469 1470 //Capability checks 1471 if(!resourceCheck(ctx, list)) { 1472 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__); 1473 return false; 1474 } 1475 1476 //Limitations checks 1477 if(!hwLimitationsCheck(ctx, list)) { 1478 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__); 1479 return false; 1480 } 1481 1482 //Configure framebuffer first if applicable 1483 if(mCurrentFrame.fbZ >= 0) { 1484 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list); 1485 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ)) 1486 { 1487 ALOGD_IF(isDebug(), "%s configure framebuffer failed", 1488 __FUNCTION__); 1489 return false; 1490 } 1491 } 1492 1493 mCurrentFrame.map(); 1494 1495 if(!allocLayerPipes(ctx, list)) { 1496 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 1497 return false; 1498 } 1499 1500 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1501 index++) { 1502 if(!mCurrentFrame.isFBComposed[index]) { 1503 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1504 hwc_layer_1_t* layer = &list->hwLayers[index]; 1505 1506 //Leave fbZ for framebuffer. CACHE/GLES layers go here. 1507 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1508 mdpNextZOrder++; 1509 } 1510 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1511 cur_pipe->zOrder = mdpNextZOrder++; 1512 1513 private_handle_t *hnd = (private_handle_t *)layer->handle; 1514 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1515 if(configure4k2kYuv(ctx, layer, 1516 mCurrentFrame.mdpToLayer[mdpIndex]) 1517 != 0 ){ 1518 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \ 1519 for layer %d",__FUNCTION__, index); 1520 return false; 1521 } 1522 else{ 1523 mdpNextZOrder++; 1524 } 1525 continue; 1526 } 1527 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 1528 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 1529 layer %d",__FUNCTION__, index); 1530 return false; 1531 } 1532 } 1533 } 1534 1535 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) { 1536 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d" 1537 ,__FUNCTION__, mDpy); 1538 return false; 1539 } 1540 1541 setRedraw(ctx, list); 1542 return true; 1543} 1544 1545bool MDPComp::resourceCheck(hwc_context_t* ctx, 1546 hwc_display_contents_1_t* list) { 1547 const bool fbUsed = mCurrentFrame.fbCount; 1548 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) { 1549 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 1550 return false; 1551 } 1552 // Init rotCount to number of rotate sessions used by other displays 1553 int rotCount = ctx->mRotMgr->getNumActiveSessions(); 1554 // Count the number of rotator sessions required for current display 1555 for (int index = 0; index < mCurrentFrame.layerCount; index++) { 1556 if(!mCurrentFrame.isFBComposed[index]) { 1557 hwc_layer_1_t* layer = &list->hwLayers[index]; 1558 private_handle_t *hnd = (private_handle_t *)layer->handle; 1559 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) { 1560 rotCount++; 1561 } 1562 } 1563 } 1564 // if number of layers to rotate exceeds max rotator sessions, bail out. 1565 if(rotCount > RotMgr::MAX_ROT_SESS) { 1566 ALOGD_IF(isDebug(), "%s: Exceeds max rotator sessions %d", 1567 __FUNCTION__, mDpy); 1568 return false; 1569 } 1570 return true; 1571} 1572 1573bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx, 1574 hwc_display_contents_1_t* list) { 1575 1576 //A-family hw limitation: 1577 //If a layer need alpha scaling, MDP can not support. 1578 if(ctx->mMDP.version < qdutils::MDSS_V5) { 1579 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 1580 if(!mCurrentFrame.isFBComposed[i] && 1581 isAlphaScaled( &list->hwLayers[i])) { 1582 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__); 1583 return false; 1584 } 1585 } 1586 } 1587 1588 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending. 1589 //If multiple layers requires downscaling and also they are overlapping 1590 //fall back to GPU since MDSS can not handle it. 1591 if(qdutils::MDPVersion::getInstance().is8x74v2() || 1592 qdutils::MDPVersion::getInstance().is8x26()) { 1593 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) { 1594 hwc_layer_1_t* botLayer = &list->hwLayers[i]; 1595 if(!mCurrentFrame.isFBComposed[i] && 1596 isDownscaleRequired(botLayer)) { 1597 //if layer-i is marked for MDP and needs downscaling 1598 //check if any MDP layer on top of i & overlaps with layer-i 1599 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) { 1600 hwc_layer_1_t* topLayer = &list->hwLayers[j]; 1601 if(!mCurrentFrame.isFBComposed[j] && 1602 isDownscaleRequired(topLayer)) { 1603 hwc_rect_t r = getIntersection(botLayer->displayFrame, 1604 topLayer->displayFrame); 1605 if(isValidRect(r)) 1606 return false; 1607 } 1608 } 1609 } 1610 } 1611 } 1612 return true; 1613} 1614 1615int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1616 int ret = 0; 1617 char property[PROPERTY_VALUE_MAX]; 1618 1619 if(!ctx || !list) { 1620 ALOGE("%s: Invalid context or list",__FUNCTION__); 1621 mCachedFrame.reset(); 1622 return -1; 1623 } 1624 1625 const int numLayers = ctx->listStats[mDpy].numAppLayers; 1626 1627 if(property_get("debug.hwc.simulate", property, NULL) > 0) { 1628 int currentFlags = atoi(property); 1629 if(currentFlags != sSimulationFlags) { 1630 sSimulationFlags = currentFlags; 1631 ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__, 1632 sSimulationFlags, sSimulationFlags); 1633 } 1634 } 1635 // reset PTOR 1636 if(!mDpy) 1637 memset(&(ctx->mPtorInfo), 0, sizeof(ctx->mPtorInfo)); 1638 1639 //Do not cache the information for next draw cycle. 1640 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) { 1641 ALOGI("%s: Unsupported layer count for mdp composition", 1642 __FUNCTION__); 1643 mCachedFrame.reset(); 1644 return -1; 1645 } 1646 1647 //reset old data 1648 mCurrentFrame.reset(numLayers); 1649 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1650 mCurrentFrame.dropCount = 0; 1651 1652 // Detect the start of animation and fall back to GPU only once to cache 1653 // all the layers in FB and display FB content untill animation completes. 1654 if(ctx->listStats[mDpy].isDisplayAnimating) { 1655 mCurrentFrame.needsRedraw = false; 1656 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) { 1657 mCurrentFrame.needsRedraw = true; 1658 ctx->mAnimationState[mDpy] = ANIMATION_STARTED; 1659 } 1660 setMDPCompLayerFlags(ctx, list); 1661 mCachedFrame.updateCounts(mCurrentFrame); 1662 ret = -1; 1663 return ret; 1664 } else { 1665 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED; 1666 } 1667 1668 //Hard conditions, if not met, cannot do MDP comp 1669 if(isFrameDoable(ctx)) { 1670 generateROI(ctx, list); 1671 1672 mModeOn = tryFullFrame(ctx, list) || tryVideoOnly(ctx, list); 1673 if(mModeOn) { 1674 setMDPCompLayerFlags(ctx, list); 1675 } else { 1676 resetROI(ctx, mDpy); 1677 reset(ctx); 1678 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1679 mCurrentFrame.dropCount = 0; 1680 ret = -1; 1681 } 1682 } else { 1683 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 1684 __FUNCTION__); 1685 ret = -1; 1686 } 1687 1688 if(isDebug()) { 1689 ALOGD("GEOMETRY change: %d", 1690 (list->flags & HWC_GEOMETRY_CHANGED)); 1691 android::String8 sDump(""); 1692 dump(sDump, ctx); 1693 ALOGD("%s",sDump.string()); 1694 } 1695 1696 mCachedFrame.cacheAll(list); 1697 mCachedFrame.updateCounts(mCurrentFrame); 1698 return ret; 1699} 1700 1701bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) { 1702 1703 bool bRet = true; 1704 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1705 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1706 info.pipeInfo = new MdpYUVPipeInfo; 1707 info.rot = NULL; 1708 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo; 1709 1710 pipe_info.lIndex = ovutils::OV_INVALID; 1711 pipe_info.rIndex = ovutils::OV_INVALID; 1712 1713 Overlay::PipeSpecs pipeSpecs; 1714 pipeSpecs.formatClass = Overlay::FORMAT_YUV; 1715 pipeSpecs.needsScaling = true; 1716 pipeSpecs.dpy = mDpy; 1717 pipeSpecs.fb = false; 1718 1719 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 1720 if(pipe_info.lIndex == ovutils::OV_INVALID){ 1721 bRet = false; 1722 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed", 1723 __FUNCTION__); 1724 } 1725 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 1726 if(pipe_info.rIndex == ovutils::OV_INVALID){ 1727 bRet = false; 1728 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed", 1729 __FUNCTION__); 1730 } 1731 return bRet; 1732} 1733 1734int MDPComp::drawOverlap(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1735 int fd = -1; 1736 if (ctx->mPtorInfo.isActive()) { 1737 fd = ctx->mCopyBit[mDpy]->drawOverlap(ctx, list); 1738 if (fd < 0) { 1739 ALOGD_IF(isDebug(),"%s: failed", __FUNCTION__); 1740 } 1741 } 1742 return fd; 1743} 1744//=============MDPCompNonSplit================================================== 1745 1746void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx, 1747 hwc_display_contents_1_t* list) { 1748 //If 4k2k Yuv layer split is possible, and if 1749 //fbz is above 4k2k layer, increment fb zorder by 1 1750 //as we split 4k2k layer and increment zorder for right half 1751 //of the layer 1752 if(!ctx) 1753 return; 1754 if(mCurrentFrame.fbZ >= 0) { 1755 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1756 index++) { 1757 if(!mCurrentFrame.isFBComposed[index]) { 1758 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1759 mdpNextZOrder++; 1760 } 1761 mdpNextZOrder++; 1762 hwc_layer_1_t* layer = &list->hwLayers[index]; 1763 private_handle_t *hnd = (private_handle_t *)layer->handle; 1764 if(is4kx2kYuvBuffer(hnd)) { 1765 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1766 mCurrentFrame.fbZ += 1; 1767 mdpNextZOrder++; 1768 //As we split 4kx2k yuv layer and program to 2 VG pipes 1769 //(if available) increase mdpcount by 1. 1770 mCurrentFrame.mdpCount++; 1771 } 1772 } 1773 } 1774 } 1775} 1776 1777/* 1778 * Configures pipe(s) for MDP composition 1779 */ 1780int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1781 PipeLayerPair& PipeLayerPair) { 1782 MdpPipeInfoNonSplit& mdp_info = 1783 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo)); 1784 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 1785 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1786 eIsFg isFg = IS_FG_OFF; 1787 eDest dest = mdp_info.index; 1788 1789 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 1790 __FUNCTION__, layer, zOrder, dest); 1791 1792 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 1793 &PipeLayerPair.rot); 1794} 1795 1796bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx, 1797 hwc_display_contents_1_t* list) { 1798 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 1799 1800 if(mCurrentFrame.isFBComposed[index]) continue; 1801 1802 hwc_layer_1_t* layer = &list->hwLayers[index]; 1803 private_handle_t *hnd = (private_handle_t *)layer->handle; 1804 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1805 if(allocSplitVGPipesfor4k2k(ctx, index)){ 1806 continue; 1807 } 1808 } 1809 1810 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1811 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1812 info.pipeInfo = new MdpPipeInfoNonSplit; 1813 info.rot = NULL; 1814 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo; 1815 1816 Overlay::PipeSpecs pipeSpecs; 1817 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 1818 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 1819 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or 1820 (qdutils::MDPVersion::getInstance().is8x26() and 1821 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024); 1822 pipeSpecs.dpy = mDpy; 1823 pipeSpecs.fb = false; 1824 pipeSpecs.numActiveDisplays = ctx->numActiveDisplays; 1825 1826 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs); 1827 1828 if(pipe_info.index == ovutils::OV_INVALID) { 1829 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__); 1830 return false; 1831 } 1832 } 1833 return true; 1834} 1835 1836int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 1837 PipeLayerPair& PipeLayerPair) { 1838 MdpYUVPipeInfo& mdp_info = 1839 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 1840 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1841 eIsFg isFg = IS_FG_OFF; 1842 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1843 eDest lDest = mdp_info.lIndex; 1844 eDest rDest = mdp_info.rIndex; 1845 1846 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 1847 lDest, rDest, &PipeLayerPair.rot); 1848} 1849 1850bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1851 1852 if(!isEnabled() or !mModeOn) { 1853 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__); 1854 return true; 1855 } 1856 1857 // Set the Handle timeout to true for MDP or MIXED composition. 1858 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 1859 sHandleTimeout = true; 1860 } 1861 1862 overlay::Overlay& ov = *ctx->mOverlay; 1863 LayerProp *layerProp = ctx->layerProp[mDpy]; 1864 1865 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1866 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1867 { 1868 if(mCurrentFrame.isFBComposed[i]) continue; 1869 1870 hwc_layer_1_t *layer = &list->hwLayers[i]; 1871 private_handle_t *hnd = (private_handle_t *)layer->handle; 1872 if(!hnd) { 1873 if (!(layer->flags & HWC_COLOR_FILL)) { 1874 ALOGE("%s handle null", __FUNCTION__); 1875 return false; 1876 } 1877 // No PLAY for Color layer 1878 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1879 continue; 1880 } 1881 1882 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1883 1884 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 1885 { 1886 MdpYUVPipeInfo& pipe_info = 1887 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1888 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1889 ovutils::eDest indexL = pipe_info.lIndex; 1890 ovutils::eDest indexR = pipe_info.rIndex; 1891 int fd = hnd->fd; 1892 uint32_t offset = (uint32_t)hnd->offset; 1893 if(rot) { 1894 rot->queueBuffer(fd, offset); 1895 fd = rot->getDstMemId(); 1896 offset = rot->getDstOffset(); 1897 } 1898 if(indexL != ovutils::OV_INVALID) { 1899 ovutils::eDest destL = (ovutils::eDest)indexL; 1900 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1901 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1902 if (!ov.queueBuffer(fd, offset, destL)) { 1903 ALOGE("%s: queueBuffer failed for display:%d", 1904 __FUNCTION__, mDpy); 1905 return false; 1906 } 1907 } 1908 1909 if(indexR != ovutils::OV_INVALID) { 1910 ovutils::eDest destR = (ovutils::eDest)indexR; 1911 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1912 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1913 if (!ov.queueBuffer(fd, offset, destR)) { 1914 ALOGE("%s: queueBuffer failed for display:%d", 1915 __FUNCTION__, mDpy); 1916 return false; 1917 } 1918 } 1919 } 1920 else{ 1921 MdpPipeInfoNonSplit& pipe_info = 1922 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1923 ovutils::eDest dest = pipe_info.index; 1924 if(dest == ovutils::OV_INVALID) { 1925 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 1926 return false; 1927 } 1928 1929 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1930 continue; 1931 } 1932 1933 int fd = hnd->fd; 1934 uint32_t offset = (uint32_t)hnd->offset; 1935 int index = ctx->mPtorInfo.getPTORArrayIndex(i); 1936 if (!mDpy && (index != -1)) { 1937 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer(); 1938 fd = hnd->fd; 1939 // Use the offset of the RenderBuffer 1940 offset = ctx->mPtorInfo.mRenderBuffOffset[index]; 1941 } 1942 1943 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1944 using pipe: %d", __FUNCTION__, layer, 1945 hnd, dest ); 1946 1947 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1948 if(rot) { 1949 if(!rot->queueBuffer(fd, offset)) 1950 return false; 1951 fd = rot->getDstMemId(); 1952 offset = rot->getDstOffset(); 1953 } 1954 1955 if (!ov.queueBuffer(fd, offset, dest)) { 1956 ALOGE("%s: queueBuffer failed for display:%d ", 1957 __FUNCTION__, mDpy); 1958 return false; 1959 } 1960 } 1961 1962 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1963 } 1964 return true; 1965} 1966 1967//=============MDPCompSplit=================================================== 1968 1969void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx, 1970 hwc_display_contents_1_t* list){ 1971 //if 4kx2k yuv layer is totally present in either in left half 1972 //or right half then try splitting the yuv layer to avoid decimation 1973 const int lSplit = getLeftSplit(ctx, mDpy); 1974 if(mCurrentFrame.fbZ >= 0) { 1975 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1976 index++) { 1977 if(!mCurrentFrame.isFBComposed[index]) { 1978 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1979 mdpNextZOrder++; 1980 } 1981 mdpNextZOrder++; 1982 hwc_layer_1_t* layer = &list->hwLayers[index]; 1983 private_handle_t *hnd = (private_handle_t *)layer->handle; 1984 if(is4kx2kYuvBuffer(hnd)) { 1985 hwc_rect_t dst = layer->displayFrame; 1986 if((dst.left > lSplit) || (dst.right < lSplit)) { 1987 mCurrentFrame.mdpCount += 1; 1988 } 1989 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1990 mCurrentFrame.fbZ += 1; 1991 mdpNextZOrder++; 1992 } 1993 } 1994 } 1995 } 1996} 1997 1998bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 1999 MdpPipeInfoSplit& pipe_info) { 2000 2001 const int lSplit = getLeftSplit(ctx, mDpy); 2002 private_handle_t *hnd = (private_handle_t *)layer->handle; 2003 hwc_rect_t dst = layer->displayFrame; 2004 pipe_info.lIndex = ovutils::OV_INVALID; 2005 pipe_info.rIndex = ovutils::OV_INVALID; 2006 2007 Overlay::PipeSpecs pipeSpecs; 2008 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2009 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2010 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy); 2011 pipeSpecs.dpy = mDpy; 2012 pipeSpecs.mixer = Overlay::MIXER_LEFT; 2013 pipeSpecs.fb = false; 2014 2015 // Acquire pipe only for the updating half 2016 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi; 2017 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi; 2018 2019 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) { 2020 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2021 if(pipe_info.lIndex == ovutils::OV_INVALID) 2022 return false; 2023 } 2024 2025 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) { 2026 pipeSpecs.mixer = Overlay::MIXER_RIGHT; 2027 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2028 if(pipe_info.rIndex == ovutils::OV_INVALID) 2029 return false; 2030 } 2031 2032 return true; 2033} 2034 2035bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx, 2036 hwc_display_contents_1_t* list) { 2037 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 2038 2039 if(mCurrentFrame.isFBComposed[index]) continue; 2040 2041 hwc_layer_1_t* layer = &list->hwLayers[index]; 2042 private_handle_t *hnd = (private_handle_t *)layer->handle; 2043 hwc_rect_t dst = layer->displayFrame; 2044 const int lSplit = getLeftSplit(ctx, mDpy); 2045 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 2046 if((dst.left > lSplit)||(dst.right < lSplit)){ 2047 if(allocSplitVGPipesfor4k2k(ctx, index)){ 2048 continue; 2049 } 2050 } 2051 } 2052 int mdpIndex = mCurrentFrame.layerToMDP[index]; 2053 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 2054 info.pipeInfo = new MdpPipeInfoSplit; 2055 info.rot = NULL; 2056 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo; 2057 2058 if(!acquireMDPPipes(ctx, layer, pipe_info)) { 2059 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type", 2060 __FUNCTION__); 2061 return false; 2062 } 2063 } 2064 return true; 2065} 2066 2067int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 2068 PipeLayerPair& PipeLayerPair) { 2069 const int lSplit = getLeftSplit(ctx, mDpy); 2070 hwc_rect_t dst = layer->displayFrame; 2071 if((dst.left > lSplit)||(dst.right < lSplit)){ 2072 MdpYUVPipeInfo& mdp_info = 2073 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 2074 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2075 eIsFg isFg = IS_FG_OFF; 2076 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 2077 eDest lDest = mdp_info.lIndex; 2078 eDest rDest = mdp_info.rIndex; 2079 2080 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 2081 lDest, rDest, &PipeLayerPair.rot); 2082 } 2083 else{ 2084 return configure(ctx, layer, PipeLayerPair); 2085 } 2086} 2087 2088/* 2089 * Configures pipe(s) for MDP composition 2090 */ 2091int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2092 PipeLayerPair& PipeLayerPair) { 2093 MdpPipeInfoSplit& mdp_info = 2094 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2095 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 2096 eIsFg isFg = IS_FG_OFF; 2097 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 2098 eDest lDest = mdp_info.lIndex; 2099 eDest rDest = mdp_info.rIndex; 2100 2101 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2102 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 2103 2104 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 2105 rDest, &PipeLayerPair.rot); 2106} 2107 2108bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 2109 2110 if(!isEnabled() or !mModeOn) { 2111 ALOGD_IF(isDebug(),"%s: MDP Comp not enabled/configured", __FUNCTION__); 2112 return true; 2113 } 2114 2115 // Set the Handle timeout to true for MDP or MIXED composition. 2116 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 2117 sHandleTimeout = true; 2118 } 2119 2120 overlay::Overlay& ov = *ctx->mOverlay; 2121 LayerProp *layerProp = ctx->layerProp[mDpy]; 2122 2123 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 2124 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 2125 { 2126 if(mCurrentFrame.isFBComposed[i]) continue; 2127 2128 hwc_layer_1_t *layer = &list->hwLayers[i]; 2129 private_handle_t *hnd = (private_handle_t *)layer->handle; 2130 if(!hnd) { 2131 ALOGE("%s handle null", __FUNCTION__); 2132 return false; 2133 } 2134 2135 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 2136 continue; 2137 } 2138 2139 int mdpIndex = mCurrentFrame.layerToMDP[i]; 2140 2141 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 2142 { 2143 MdpYUVPipeInfo& pipe_info = 2144 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2145 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2146 ovutils::eDest indexL = pipe_info.lIndex; 2147 ovutils::eDest indexR = pipe_info.rIndex; 2148 int fd = hnd->fd; 2149 uint32_t offset = (uint32_t)hnd->offset; 2150 if(rot) { 2151 rot->queueBuffer(fd, offset); 2152 fd = rot->getDstMemId(); 2153 offset = rot->getDstOffset(); 2154 } 2155 if(indexL != ovutils::OV_INVALID) { 2156 ovutils::eDest destL = (ovutils::eDest)indexL; 2157 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2158 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 2159 if (!ov.queueBuffer(fd, offset, destL)) { 2160 ALOGE("%s: queueBuffer failed for display:%d", 2161 __FUNCTION__, mDpy); 2162 return false; 2163 } 2164 } 2165 2166 if(indexR != ovutils::OV_INVALID) { 2167 ovutils::eDest destR = (ovutils::eDest)indexR; 2168 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2169 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2170 if (!ov.queueBuffer(fd, offset, destR)) { 2171 ALOGE("%s: queueBuffer failed for display:%d", 2172 __FUNCTION__, mDpy); 2173 return false; 2174 } 2175 } 2176 } 2177 else{ 2178 MdpPipeInfoSplit& pipe_info = 2179 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 2180 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 2181 2182 ovutils::eDest indexL = pipe_info.lIndex; 2183 ovutils::eDest indexR = pipe_info.rIndex; 2184 2185 int fd = hnd->fd; 2186 uint32_t offset = (uint32_t)hnd->offset; 2187 int index = ctx->mPtorInfo.getPTORArrayIndex(i); 2188 if (!mDpy && (index != -1)) { 2189 hnd = ctx->mCopyBit[mDpy]->getCurrentRenderBuffer(); 2190 fd = hnd->fd; 2191 offset = ctx->mPtorInfo.mRenderBuffOffset[index]; 2192 } 2193 2194 if(ctx->mAD->draw(ctx, fd, offset)) { 2195 fd = ctx->mAD->getDstFd(); 2196 offset = ctx->mAD->getDstOffset(); 2197 } 2198 2199 if(rot) { 2200 rot->queueBuffer(fd, offset); 2201 fd = rot->getDstMemId(); 2202 offset = rot->getDstOffset(); 2203 } 2204 2205 //************* play left mixer ********** 2206 if(indexL != ovutils::OV_INVALID) { 2207 ovutils::eDest destL = (ovutils::eDest)indexL; 2208 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2209 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 2210 if (!ov.queueBuffer(fd, offset, destL)) { 2211 ALOGE("%s: queueBuffer failed for left mixer", 2212 __FUNCTION__); 2213 return false; 2214 } 2215 } 2216 2217 //************* play right mixer ********** 2218 if(indexR != ovutils::OV_INVALID) { 2219 ovutils::eDest destR = (ovutils::eDest)indexR; 2220 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2221 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2222 if (!ov.queueBuffer(fd, offset, destR)) { 2223 ALOGE("%s: queueBuffer failed for right mixer", 2224 __FUNCTION__); 2225 return false; 2226 } 2227 } 2228 } 2229 2230 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2231 } 2232 2233 return true; 2234} 2235 2236//================MDPCompSrcSplit============================================== 2237bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 2238 MdpPipeInfoSplit& pipe_info) { 2239 private_handle_t *hnd = (private_handle_t *)layer->handle; 2240 hwc_rect_t dst = layer->displayFrame; 2241 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2242 pipe_info.lIndex = ovutils::OV_INVALID; 2243 pipe_info.rIndex = ovutils::OV_INVALID; 2244 2245 //If 2 pipes are staged on a single stage of a mixer, then the left pipe 2246 //should have a higher priority than the right one. Pipe priorities are 2247 //starting with VG0, VG1 ... , RGB0 ..., DMA1 2248 2249 Overlay::PipeSpecs pipeSpecs; 2250 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2251 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2252 pipeSpecs.needsScaling = qhwc::needsScaling(layer); 2253 pipeSpecs.dpy = mDpy; 2254 pipeSpecs.fb = false; 2255 2256 //1 pipe by default for a layer 2257 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2258 if(pipe_info.lIndex == ovutils::OV_INVALID) { 2259 return false; 2260 } 2261 2262 /* Use 2 pipes IF 2263 a) Layer's crop width is > 2048 or 2264 b) Layer's dest width > 2048 or 2265 c) On primary, driver has indicated with caps to split always. This is 2266 based on an empirically derived value of panel height. Applied only 2267 if the layer's width is > mixer's width 2268 */ 2269 2270 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and 2271 qdutils::MDPVersion::getInstance().isSrcSplitAlways(); 2272 int lSplit = getLeftSplit(ctx, mDpy); 2273 int dstWidth = dst.right - dst.left; 2274 int cropWidth = crop.right - crop.left; 2275 2276 if(dstWidth > qdutils::MAX_DISPLAY_DIM or 2277 cropWidth > qdutils::MAX_DISPLAY_DIM or 2278 (primarySplitAlways and (cropWidth > lSplit))) { 2279 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2280 if(pipe_info.rIndex == ovutils::OV_INVALID) { 2281 return false; 2282 } 2283 2284 // Return values 2285 // 1 Left pipe is higher priority, do nothing. 2286 // 0 Pipes of same priority. 2287 //-1 Right pipe is of higher priority, needs swap. 2288 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex, 2289 pipe_info.rIndex) == -1) { 2290 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex); 2291 } 2292 } 2293 2294 return true; 2295} 2296 2297int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2298 PipeLayerPair& PipeLayerPair) { 2299 private_handle_t *hnd = (private_handle_t *)layer->handle; 2300 if(!hnd) { 2301 ALOGE("%s: layer handle is NULL", __FUNCTION__); 2302 return -1; 2303 } 2304 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata; 2305 MdpPipeInfoSplit& mdp_info = 2306 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2307 Rotator **rot = &PipeLayerPair.rot; 2308 eZorder z = static_cast<eZorder>(mdp_info.zOrder); 2309 eIsFg isFg = IS_FG_OFF; 2310 eDest lDest = mdp_info.lIndex; 2311 eDest rDest = mdp_info.rIndex; 2312 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2313 hwc_rect_t dst = layer->displayFrame; 2314 int transform = layer->transform; 2315 eTransform orient = static_cast<eTransform>(transform); 2316 const int downscale = 0; 2317 int rotFlags = ROT_FLAGS_NONE; 2318 uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd)); 2319 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size); 2320 2321 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2322 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest); 2323 2324 // Handle R/B swap 2325 if (layer->flags & HWC_FORMAT_RB_SWAP) { 2326 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888) 2327 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888); 2328 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888) 2329 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888); 2330 } 2331 2332 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 2333 setMdpFlags(ctx, layer, mdpFlags, 0, transform); 2334 2335 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2336 //Enable overfetch 2337 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE); 2338 } 2339 2340 if(has90Transform(layer) && isRotationDoable(ctx, hnd)) { 2341 (*rot) = ctx->mRotMgr->getNext(); 2342 if((*rot) == NULL) return -1; 2343 ctx->mLayerRotMap[mDpy]->add(layer, *rot); 2344 //If the video is using a single pipe, enable BWC 2345 if(rDest == OV_INVALID) { 2346 BwcPM::setBwc(crop, dst, transform, mdpFlags); 2347 } 2348 //Configure rotator for pre-rotation 2349 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) { 2350 ALOGE("%s: configRotator failed!", __FUNCTION__); 2351 return -1; 2352 } 2353 updateSource(orient, whf, crop, *rot); 2354 rotFlags |= ROT_PREROTATED; 2355 } 2356 2357 //If 2 pipes being used, divide layer into half, crop and dst 2358 hwc_rect_t cropL = crop; 2359 hwc_rect_t cropR = crop; 2360 hwc_rect_t dstL = dst; 2361 hwc_rect_t dstR = dst; 2362 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2363 cropL.right = (crop.right + crop.left) / 2; 2364 cropR.left = cropL.right; 2365 sanitizeSourceCrop(cropL, cropR, hnd); 2366 2367 //Swap crops on H flip since 2 pipes are being used 2368 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) { 2369 hwc_rect_t tmp = cropL; 2370 cropL = cropR; 2371 cropR = tmp; 2372 } 2373 2374 dstL.right = (dst.right + dst.left) / 2; 2375 dstR.left = dstL.right; 2376 } 2377 2378 //For the mdp, since either we are pre-rotating or MDP does flips 2379 orient = OVERLAY_TRANSFORM_0; 2380 transform = 0; 2381 2382 //configure left pipe 2383 if(lDest != OV_INVALID) { 2384 PipeArgs pargL(mdpFlags, whf, z, isFg, 2385 static_cast<eRotFlags>(rotFlags), layer->planeAlpha, 2386 (ovutils::eBlending) getBlending(layer->blending)); 2387 2388 if(configMdp(ctx->mOverlay, pargL, orient, 2389 cropL, dstL, metadata, lDest) < 0) { 2390 ALOGE("%s: commit failed for left mixer config", __FUNCTION__); 2391 return -1; 2392 } 2393 } 2394 2395 //configure right pipe 2396 if(rDest != OV_INVALID) { 2397 PipeArgs pargR(mdpFlags, whf, z, isFg, 2398 static_cast<eRotFlags>(rotFlags), 2399 layer->planeAlpha, 2400 (ovutils::eBlending) getBlending(layer->blending)); 2401 if(configMdp(ctx->mOverlay, pargR, orient, 2402 cropR, dstR, metadata, rDest) < 0) { 2403 ALOGE("%s: commit failed for right mixer config", __FUNCTION__); 2404 return -1; 2405 } 2406 } 2407 2408 return 0; 2409} 2410 2411}; //namespace 2412 2413