1/* 2 * Copyright (C) 2012-2014, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19#include <math.h> 20#include "hwc_mdpcomp.h" 21#include <sys/ioctl.h> 22#include "external.h" 23#include "virtual.h" 24#include "qdMetaData.h" 25#include "mdp_version.h" 26#include "hwc_fbupdate.h" 27#include "hwc_ad.h" 28#include <overlayRotator.h> 29 30using namespace overlay; 31using namespace qdutils; 32using namespace overlay::utils; 33namespace ovutils = overlay::utils; 34 35namespace qhwc { 36 37//==============MDPComp======================================================== 38 39IdleInvalidator *MDPComp::idleInvalidator = NULL; 40bool MDPComp::sIdleFallBack = false; 41bool MDPComp::sHandleTimeout = false; 42bool MDPComp::sDebugLogs = false; 43bool MDPComp::sEnabled = false; 44bool MDPComp::sEnableMixedMode = true; 45int MDPComp::sSimulationFlags = 0; 46int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 47bool MDPComp::sEnable4k2kYUVSplit = false; 48bool MDPComp::sSrcSplitEnabled = false; 49MDPComp* MDPComp::getObject(hwc_context_t *ctx, const int& dpy) { 50 if(qdutils::MDPVersion::getInstance().isSrcSplit()) { 51 sSrcSplitEnabled = true; 52 return new MDPCompSrcSplit(dpy); 53 } else if(isDisplaySplit(ctx, dpy)) { 54 return new MDPCompSplit(dpy); 55 } 56 return new MDPCompNonSplit(dpy); 57} 58 59MDPComp::MDPComp(int dpy):mDpy(dpy){}; 60 61void MDPComp::dump(android::String8& buf, hwc_context_t *ctx) 62{ 63 if(mCurrentFrame.layerCount > MAX_NUM_APP_LAYERS) 64 return; 65 66 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 67 (mDpy == 0) ? "\"PRIMARY\"" : 68 (mDpy == 1) ? "\"EXTERNAL\"" : "\"VIRTUAL\""); 69 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d " 70 "fbCount:%2d \n", mCurrentFrame.layerCount, 71 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 72 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 73 (mCurrentFrame.needsRedraw? "YES" : "NO"), 74 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 75 if(isDisplaySplit(ctx, mDpy)) { 76 dumpsys_log(buf, "Programmed ROI's: Left: [%d, %d, %d, %d] " 77 "Right: [%d, %d, %d, %d] \n", 78 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 79 ctx->listStats[mDpy].lRoi.right, 80 ctx->listStats[mDpy].lRoi.bottom, 81 ctx->listStats[mDpy].rRoi.left,ctx->listStats[mDpy].rRoi.top, 82 ctx->listStats[mDpy].rRoi.right, 83 ctx->listStats[mDpy].rRoi.bottom); 84 } else { 85 dumpsys_log(buf, "Programmed ROI: [%d, %d, %d, %d] \n", 86 ctx->listStats[mDpy].lRoi.left,ctx->listStats[mDpy].lRoi.top, 87 ctx->listStats[mDpy].lRoi.right, 88 ctx->listStats[mDpy].lRoi.bottom); 89 } 90 dumpsys_log(buf," --------------------------------------------- \n"); 91 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 92 dumpsys_log(buf," --------------------------------------------- \n"); 93 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 94 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 95 index, 96 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 97 mCurrentFrame.layerToMDP[index], 98 (mCurrentFrame.isFBComposed[index] ? 99 (mCurrentFrame.drop[index] ? "DROP" : 100 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE")) : "MDP"), 101 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 102 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 103 dumpsys_log(buf,"\n"); 104} 105 106bool MDPComp::init(hwc_context_t *ctx) { 107 108 if(!ctx) { 109 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 110 return false; 111 } 112 113 char property[PROPERTY_VALUE_MAX]; 114 115 sEnabled = false; 116 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 117 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 118 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 119 sEnabled = true; 120 } 121 122 sEnableMixedMode = true; 123 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 124 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 125 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 126 sEnableMixedMode = false; 127 } 128 129 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) { 130 if(atoi(property) != 0) 131 sDebugLogs = true; 132 } 133 134 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 135 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 136 int val = atoi(property); 137 if(val >= 0) 138 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 139 } 140 141 if(ctx->mMDP.panel != MIPI_CMD_PANEL) { 142 // Idle invalidation is not necessary on command mode panels 143 long idle_timeout = DEFAULT_IDLE_TIME; 144 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 145 if(atoi(property) != 0) 146 idle_timeout = atoi(property); 147 } 148 149 //create Idle Invalidator only when not disabled through property 150 if(idle_timeout != -1) 151 idleInvalidator = IdleInvalidator::getInstance(); 152 153 if(idleInvalidator == NULL) { 154 ALOGE("%s: failed to instantiate idleInvalidator object", 155 __FUNCTION__); 156 } else { 157 idleInvalidator->init(timeout_handler, ctx, 158 (unsigned int)idle_timeout); 159 } 160 } 161 162 if(!qdutils::MDPVersion::getInstance().isSrcSplit() && 163 property_get("persist.mdpcomp.4k2kSplit", property, "0") > 0 && 164 (!strncmp(property, "1", PROPERTY_VALUE_MAX) || 165 !strncasecmp(property,"true", PROPERTY_VALUE_MAX))) { 166 sEnable4k2kYUVSplit = true; 167 } 168 return true; 169} 170 171void MDPComp::reset(hwc_context_t *ctx) { 172 const int numLayers = ctx->listStats[mDpy].numAppLayers; 173 mCurrentFrame.reset(numLayers); 174 ctx->mOverlay->clear(mDpy); 175 ctx->mLayerRotMap[mDpy]->clear(); 176} 177 178void MDPComp::timeout_handler(void *udata) { 179 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 180 181 if(!ctx) { 182 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 183 return; 184 } 185 Locker::Autolock _l(ctx->mDrawLock); 186 // Handle timeout event only if the previous composition is MDP or MIXED. 187 if(!sHandleTimeout) { 188 ALOGD_IF(isDebug(), "%s:Do not handle this timeout", __FUNCTION__); 189 return; 190 } 191 if(!ctx->proc) { 192 ALOGE("%s: HWC proc not registered", __FUNCTION__); 193 return; 194 } 195 sIdleFallBack = true; 196 /* Trigger SF to redraw the current frame */ 197 ctx->proc->invalidate(ctx->proc); 198} 199 200void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 201 hwc_display_contents_1_t* list) { 202 LayerProp *layerProp = ctx->layerProp[mDpy]; 203 204 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 205 hwc_layer_1_t* layer = &(list->hwLayers[index]); 206 if(!mCurrentFrame.isFBComposed[index]) { 207 layerProp[index].mFlags |= HWC_MDPCOMP; 208 layer->compositionType = HWC_OVERLAY; 209 layer->hints |= HWC_HINT_CLEAR_FB; 210 } else { 211 /* Drop the layer when its already present in FB OR when it lies 212 * outside frame's ROI */ 213 if(!mCurrentFrame.needsRedraw || mCurrentFrame.drop[index]) { 214 layer->compositionType = HWC_OVERLAY; 215 } 216 } 217 } 218} 219 220void MDPComp::setRedraw(hwc_context_t *ctx, 221 hwc_display_contents_1_t* list) { 222 mCurrentFrame.needsRedraw = false; 223 if(!mCachedFrame.isSameFrame(mCurrentFrame, list) || 224 (list->flags & HWC_GEOMETRY_CHANGED) || 225 isSkipPresent(ctx, mDpy)) { 226 mCurrentFrame.needsRedraw = true; 227 } 228} 229 230MDPComp::FrameInfo::FrameInfo() { 231 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 232 reset(0); 233} 234 235void MDPComp::FrameInfo::reset(const int& numLayers) { 236 for(int i = 0 ; i < MAX_PIPES_PER_MIXER; i++ ) { 237 if(mdpToLayer[i].pipeInfo) { 238 delete mdpToLayer[i].pipeInfo; 239 mdpToLayer[i].pipeInfo = NULL; 240 //We dont own the rotator 241 mdpToLayer[i].rot = NULL; 242 } 243 } 244 245 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 246 memset(&layerToMDP, -1, sizeof(layerToMDP)); 247 memset(&isFBComposed, 1, sizeof(isFBComposed)); 248 249 layerCount = numLayers; 250 fbCount = numLayers; 251 mdpCount = 0; 252 needsRedraw = true; 253 fbZ = -1; 254} 255 256void MDPComp::FrameInfo::map() { 257 // populate layer and MDP maps 258 int mdpIdx = 0; 259 for(int idx = 0; idx < layerCount; idx++) { 260 if(!isFBComposed[idx]) { 261 mdpToLayer[mdpIdx].listIndex = idx; 262 layerToMDP[idx] = mdpIdx++; 263 } 264 } 265} 266 267MDPComp::LayerCache::LayerCache() { 268 reset(); 269} 270 271void MDPComp::LayerCache::reset() { 272 memset(&hnd, 0, sizeof(hnd)); 273 memset(&isFBComposed, true, sizeof(isFBComposed)); 274 memset(&drop, false, sizeof(drop)); 275 layerCount = 0; 276} 277 278void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 279 const int numAppLayers = (int)list->numHwLayers - 1; 280 for(int i = 0; i < numAppLayers; i++) { 281 hnd[i] = list->hwLayers[i].handle; 282 } 283} 284 285void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 286 layerCount = curFrame.layerCount; 287 memcpy(&isFBComposed, &curFrame.isFBComposed, sizeof(isFBComposed)); 288 memcpy(&drop, &curFrame.drop, sizeof(drop)); 289} 290 291bool MDPComp::LayerCache::isSameFrame(const FrameInfo& curFrame, 292 hwc_display_contents_1_t* list) { 293 if(layerCount != curFrame.layerCount) 294 return false; 295 for(int i = 0; i < curFrame.layerCount; i++) { 296 if((curFrame.isFBComposed[i] != isFBComposed[i]) || 297 (curFrame.drop[i] != drop[i])) { 298 return false; 299 } 300 if(curFrame.isFBComposed[i] && 301 (hnd[i] != list->hwLayers[i].handle)){ 302 return false; 303 } 304 } 305 return true; 306} 307 308bool MDPComp::isSupportedForMDPComp(hwc_context_t *ctx, hwc_layer_1_t* layer) { 309 private_handle_t *hnd = (private_handle_t *)layer->handle; 310 if((not isYuvBuffer(hnd) and has90Transform(layer)) or 311 (not isValidDimension(ctx,layer)) 312 //More conditions here, SKIP, sRGB+Blend etc 313 ) { 314 return false; 315 } 316 return true; 317} 318 319bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 320 private_handle_t *hnd = (private_handle_t *)layer->handle; 321 322 if(!hnd) { 323 if (layer->flags & HWC_COLOR_FILL) { 324 // Color layer 325 return true; 326 } 327 ALOGE("%s: layer handle is NULL", __FUNCTION__); 328 return false; 329 } 330 331 //XXX: Investigate doing this with pixel phase on MDSS 332 if(!isSecureBuffer(hnd) && isNonIntegralSourceCrop(layer->sourceCropf)) 333 return false; 334 335 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 336 hwc_rect_t dst = layer->displayFrame; 337 int crop_w = crop.right - crop.left; 338 int crop_h = crop.bottom - crop.top; 339 int dst_w = dst.right - dst.left; 340 int dst_h = dst.bottom - dst.top; 341 float w_scale = ((float)crop_w / (float)dst_w); 342 float h_scale = ((float)crop_h / (float)dst_h); 343 344 /* Workaround for MDP HW limitation in DSI command mode panels where 345 * FPS will not go beyond 30 if buffers on RGB pipes are of width or height 346 * less than 5 pixels 347 * There also is a HW limilation in MDP, minimum block size is 2x2 348 * Fallback to GPU if height is less than 2. 349 */ 350 if(qdutils::MDPVersion::getInstance().hasMinCropWidthLimitation() and 351 (crop_w < 5 or crop_h < 5)) 352 return false; 353 354 if((w_scale > 1.0f) || (h_scale > 1.0f)) { 355 const uint32_t maxMDPDownscale = 356 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 357 const float w_dscale = w_scale; 358 const float h_dscale = h_scale; 359 360 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 361 362 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 363 /* On targets that doesnt support Decimation (eg.,8x26) 364 * maximum downscale support is overlay pipe downscale. 365 */ 366 if(crop_w > MAX_DISPLAY_DIM || w_dscale > maxMDPDownscale || 367 h_dscale > maxMDPDownscale) 368 return false; 369 } else { 370 // Decimation on macrotile format layers is not supported. 371 if(isTileRendered(hnd)) { 372 /* MDP can read maximum MAX_DISPLAY_DIM width. 373 * Bail out if 374 * 1. Src crop > MAX_DISPLAY_DIM on nonsplit MDPComp 375 * 2. exceeds maximum downscale limit 376 */ 377 if(((crop_w > MAX_DISPLAY_DIM) && !sSrcSplitEnabled) || 378 w_dscale > maxMDPDownscale || 379 h_dscale > maxMDPDownscale) { 380 return false; 381 } 382 } else if(w_dscale > 64 || h_dscale > 64) 383 return false; 384 } 385 } else { //A-family 386 if(w_dscale > maxMDPDownscale || h_dscale > maxMDPDownscale) 387 return false; 388 } 389 } 390 391 if((w_scale < 1.0f) || (h_scale < 1.0f)) { 392 const uint32_t upscale = 393 qdutils::MDPVersion::getInstance().getMaxMDPUpscale(); 394 const float w_uscale = 1.0f / w_scale; 395 const float h_uscale = 1.0f / h_scale; 396 397 if(w_uscale > upscale || h_uscale > upscale) 398 return false; 399 } 400 401 return true; 402} 403 404bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 405 bool ret = true; 406 407 if(!isEnabled()) { 408 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 409 ret = false; 410 } else if(qdutils::MDPVersion::getInstance().is8x26() && 411 ctx->mVideoTransFlag && 412 isSecondaryConnected(ctx)) { 413 //1 Padding round to shift pipes across mixers 414 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 415 __FUNCTION__); 416 ret = false; 417 } else if(isSecondaryConfiguring(ctx)) { 418 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 419 __FUNCTION__); 420 ret = false; 421 } else if(ctx->isPaddingRound) { 422 ALOGD_IF(isDebug(), "%s: padding round invoked for dpy %d", 423 __FUNCTION__,mDpy); 424 ret = false; 425 } 426 return ret; 427} 428 429void MDPCompNonSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 430 hwc_rect_t roi = ctx->listStats[mDpy].lRoi; 431 fbRect = getIntersection(fbRect, roi); 432} 433 434/* 1) Identify layers that are not visible or lying outside the updating ROI and 435 * drop them from composition. 436 * 2) If we have a scaling layer which needs cropping against generated 437 * ROI, reset ROI to full resolution. */ 438bool MDPCompNonSplit::validateAndApplyROI(hwc_context_t *ctx, 439 hwc_display_contents_1_t* list) { 440 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 441 hwc_rect_t visibleRect = ctx->listStats[mDpy].lRoi; 442 443 for(int i = numAppLayers - 1; i >= 0; i--){ 444 if(!isValidRect(visibleRect)) { 445 mCurrentFrame.drop[i] = true; 446 mCurrentFrame.dropCount++; 447 continue; 448 } 449 450 const hwc_layer_1_t* layer = &list->hwLayers[i]; 451 hwc_rect_t dstRect = layer->displayFrame; 452 hwc_rect_t res = getIntersection(visibleRect, dstRect); 453 454 if(!isValidRect(res)) { 455 mCurrentFrame.drop[i] = true; 456 mCurrentFrame.dropCount++; 457 } else { 458 /* Reset frame ROI when any layer which needs scaling also needs ROI 459 * cropping */ 460 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 461 ALOGI("%s: Resetting ROI due to scaling", __FUNCTION__); 462 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 463 mCurrentFrame.dropCount = 0; 464 return false; 465 } 466 467 /* deduct any opaque region from visibleRect */ 468 if (layer->blending == HWC_BLENDING_NONE) 469 visibleRect = deductRect(visibleRect, res); 470 } 471 } 472 return true; 473} 474 475/* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 476 * are updating. If DirtyRegion is applicable, calculate it by accounting all 477 * the changing layer's dirtyRegion. */ 478void MDPCompNonSplit::generateROI(hwc_context_t *ctx, 479 hwc_display_contents_1_t* list) { 480 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 481 if(!canPartialUpdate(ctx, list)) 482 return; 483 484 struct hwc_rect roi = (struct hwc_rect){0, 0, 0, 0}; 485 hwc_rect fullFrame = (struct hwc_rect) {0, 0,(int)ctx->dpyAttr[mDpy].xres, 486 (int)ctx->dpyAttr[mDpy].yres}; 487 488 for(int index = 0; index < numAppLayers; index++ ) { 489 hwc_layer_1_t* layer = &list->hwLayers[index]; 490 if ((mCachedFrame.hnd[index] != layer->handle) || 491 isYuvBuffer((private_handle_t *)layer->handle)) { 492 hwc_rect_t dst = layer->displayFrame; 493 hwc_rect_t updatingRect = dst; 494 495#ifdef QCOM_BSP 496 if(!needsScaling(layer) && !layer->transform) 497 { 498 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 499 int x_off = dst.left - src.left; 500 int y_off = dst.top - src.top; 501 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 502 } 503#endif 504 505 roi = getUnion(roi, updatingRect); 506 } 507 } 508 509 /* No layer is updating. Still SF wants a refresh.*/ 510 if(!isValidRect(roi)) 511 return; 512 513 // Align ROI coordinates to panel restrictions 514 roi = getSanitizeROI(roi, fullFrame); 515 516 ctx->listStats[mDpy].lRoi = roi; 517 if(!validateAndApplyROI(ctx, list)) 518 resetROI(ctx, mDpy); 519 520 ALOGD_IF(isDebug(),"%s: generated ROI: [%d, %d, %d, %d]", __FUNCTION__, 521 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 522 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom); 523} 524 525void MDPCompSplit::trimAgainstROI(hwc_context_t *ctx, hwc_rect_t& fbRect) { 526 hwc_rect l_roi = ctx->listStats[mDpy].lRoi; 527 hwc_rect r_roi = ctx->listStats[mDpy].rRoi; 528 529 hwc_rect_t l_fbRect = getIntersection(fbRect, l_roi); 530 hwc_rect_t r_fbRect = getIntersection(fbRect, r_roi); 531 fbRect = getUnion(l_fbRect, r_fbRect); 532} 533/* 1) Identify layers that are not visible or lying outside BOTH the updating 534 * ROI's and drop them from composition. If a layer is spanning across both 535 * the halves of the screen but needed by only ROI, the non-contributing 536 * half will not be programmed for MDP. 537 * 2) If we have a scaling layer which needs cropping against generated 538 * ROI, reset ROI to full resolution. */ 539bool MDPCompSplit::validateAndApplyROI(hwc_context_t *ctx, 540 hwc_display_contents_1_t* list) { 541 542 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 543 544 hwc_rect_t visibleRectL = ctx->listStats[mDpy].lRoi; 545 hwc_rect_t visibleRectR = ctx->listStats[mDpy].rRoi; 546 547 for(int i = numAppLayers - 1; i >= 0; i--){ 548 if(!isValidRect(visibleRectL) && !isValidRect(visibleRectR)) 549 { 550 mCurrentFrame.drop[i] = true; 551 mCurrentFrame.dropCount++; 552 continue; 553 } 554 555 const hwc_layer_1_t* layer = &list->hwLayers[i]; 556 hwc_rect_t dstRect = layer->displayFrame; 557 558 hwc_rect_t l_res = getIntersection(visibleRectL, dstRect); 559 hwc_rect_t r_res = getIntersection(visibleRectR, dstRect); 560 hwc_rect_t res = getUnion(l_res, r_res); 561 562 if(!isValidRect(l_res) && !isValidRect(r_res)) { 563 mCurrentFrame.drop[i] = true; 564 mCurrentFrame.dropCount++; 565 } else { 566 /* Reset frame ROI when any layer which needs scaling also needs ROI 567 * cropping */ 568 if(!isSameRect(res, dstRect) && needsScaling (layer)) { 569 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 570 mCurrentFrame.dropCount = 0; 571 return false; 572 } 573 574 if (layer->blending == HWC_BLENDING_NONE) { 575 visibleRectL = deductRect(visibleRectL, l_res); 576 visibleRectR = deductRect(visibleRectR, r_res); 577 } 578 } 579 } 580 return true; 581} 582/* Calculate ROI for the frame by accounting all the layer's dispalyFrame which 583 * are updating. If DirtyRegion is applicable, calculate it by accounting all 584 * the changing layer's dirtyRegion. */ 585void MDPCompSplit::generateROI(hwc_context_t *ctx, 586 hwc_display_contents_1_t* list) { 587 if(!canPartialUpdate(ctx, list)) 588 return; 589 590 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 591 int lSplit = getLeftSplit(ctx, mDpy); 592 593 int hw_h = (int)ctx->dpyAttr[mDpy].yres; 594 int hw_w = (int)ctx->dpyAttr[mDpy].xres; 595 596 struct hwc_rect l_frame = (struct hwc_rect){0, 0, lSplit, hw_h}; 597 struct hwc_rect r_frame = (struct hwc_rect){lSplit, 0, hw_w, hw_h}; 598 599 struct hwc_rect l_roi = (struct hwc_rect){0, 0, 0, 0}; 600 struct hwc_rect r_roi = (struct hwc_rect){0, 0, 0, 0}; 601 602 for(int index = 0; index < numAppLayers; index++ ) { 603 hwc_layer_1_t* layer = &list->hwLayers[index]; 604 if ((mCachedFrame.hnd[index] != layer->handle) || 605 isYuvBuffer((private_handle_t *)layer->handle)) { 606 hwc_rect_t dst = layer->displayFrame; 607 hwc_rect_t updatingRect = dst; 608 609#ifdef QCOM_BSP 610 if(!needsScaling(layer) && !layer->transform) 611 { 612 hwc_rect_t src = integerizeSourceCrop(layer->sourceCropf); 613 int x_off = dst.left - src.left; 614 int y_off = dst.top - src.top; 615 updatingRect = moveRect(layer->dirtyRect, x_off, y_off); 616 } 617#endif 618 619 hwc_rect_t l_dst = getIntersection(l_frame, updatingRect); 620 if(isValidRect(l_dst)) 621 l_roi = getUnion(l_roi, l_dst); 622 623 hwc_rect_t r_dst = getIntersection(r_frame, updatingRect); 624 if(isValidRect(r_dst)) 625 r_roi = getUnion(r_roi, r_dst); 626 } 627 } 628 629 /* For panels that cannot accept commands in both the interfaces, we cannot 630 * send two ROI's (for each half). We merge them into single ROI and split 631 * them across lSplit for MDP mixer use. The ROI's will be merged again 632 * finally before udpating the panel in the driver. */ 633 if(qdutils::MDPVersion::getInstance().needsROIMerge()) { 634 hwc_rect_t temp_roi = getUnion(l_roi, r_roi); 635 l_roi = getIntersection(temp_roi, l_frame); 636 r_roi = getIntersection(temp_roi, r_frame); 637 } 638 639 /* No layer is updating. Still SF wants a refresh. */ 640 if(!isValidRect(l_roi) && !isValidRect(r_roi)) 641 return; 642 643 l_roi = getSanitizeROI(l_roi, l_frame); 644 r_roi = getSanitizeROI(r_roi, r_frame); 645 646 ctx->listStats[mDpy].lRoi = l_roi; 647 ctx->listStats[mDpy].rRoi = r_roi; 648 649 if(!validateAndApplyROI(ctx, list)) 650 resetROI(ctx, mDpy); 651 652 ALOGD_IF(isDebug(),"%s: generated L_ROI: [%d, %d, %d, %d]" 653 "R_ROI: [%d, %d, %d, %d]", __FUNCTION__, 654 ctx->listStats[mDpy].lRoi.left, ctx->listStats[mDpy].lRoi.top, 655 ctx->listStats[mDpy].lRoi.right, ctx->listStats[mDpy].lRoi.bottom, 656 ctx->listStats[mDpy].rRoi.left, ctx->listStats[mDpy].rRoi.top, 657 ctx->listStats[mDpy].rRoi.right, ctx->listStats[mDpy].rRoi.bottom); 658} 659 660/* Checks for conditions where all the layers marked for MDP comp cannot be 661 * bypassed. On such conditions we try to bypass atleast YUV layers */ 662bool MDPComp::tryFullFrame(hwc_context_t *ctx, 663 hwc_display_contents_1_t* list){ 664 665 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 666 int priDispW = ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres; 667 668 if(sIdleFallBack && !ctx->listStats[mDpy].secureUI) { 669 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 670 return false; 671 } 672 673 if(isSkipPresent(ctx, mDpy)) { 674 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 675 __FUNCTION__, 676 isSkipPresent(ctx, mDpy)); 677 return false; 678 } 679 680 if(mDpy > HWC_DISPLAY_PRIMARY && (priDispW > MAX_DISPLAY_DIM) && 681 (ctx->dpyAttr[mDpy].xres < MAX_DISPLAY_DIM)) { 682 // Disable MDP comp on Secondary when the primary is highres panel and 683 // the secondary is a normal 1080p, because, MDP comp on secondary under 684 // in such usecase, decimation gets used for downscale and there will be 685 // a quality mismatch when there will be a fallback to GPU comp 686 ALOGD_IF(isDebug(), "%s: Disable MDP Compositon for Secondary Disp", 687 __FUNCTION__); 688 return false; 689 } 690 691 // check for action safe flag and downscale mode which requires scaling. 692 if(ctx->dpyAttr[mDpy].mActionSafePresent 693 || ctx->dpyAttr[mDpy].mDownScaleMode) { 694 ALOGD_IF(isDebug(), "%s: Scaling needed for this frame",__FUNCTION__); 695 return false; 696 } 697 698 for(int i = 0; i < numAppLayers; ++i) { 699 hwc_layer_1_t* layer = &list->hwLayers[i]; 700 private_handle_t *hnd = (private_handle_t *)layer->handle; 701 702 if(isYuvBuffer(hnd) && has90Transform(layer)) { 703 if(!canUseRotator(ctx, mDpy)) { 704 ALOGD_IF(isDebug(), "%s: Can't use rotator for dpy %d", 705 __FUNCTION__, mDpy); 706 return false; 707 } 708 } 709 710 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 711 // may not need it if Gfx pre-rotation can handle all flips & rotations 712 if(qdutils::MDPVersion::getInstance().is8x26() && 713 (ctx->dpyAttr[mDpy].xres > 1024) && 714 (layer->transform & HWC_TRANSFORM_FLIP_H) && 715 (!isYuvBuffer(hnd))) 716 return false; 717 } 718 719 if(ctx->mAD->isDoable()) { 720 return false; 721 } 722 723 //If all above hard conditions are met we can do full or partial MDP comp. 724 bool ret = false; 725 if(fullMDPComp(ctx, list)) { 726 ret = true; 727 } else if(partialMDPComp(ctx, list)) { 728 ret = true; 729 } 730 731 return ret; 732} 733 734bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 735 736 if(sSimulationFlags & MDPCOMP_AVOID_FULL_MDP) 737 return false; 738 739 //Will benefit presentation / secondary-only layer. 740 if((mDpy > HWC_DISPLAY_PRIMARY) && 741 (list->numHwLayers - 1) > MAX_SEC_LAYERS) { 742 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 743 return false; 744 } 745 746 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 747 for(int i = 0; i < numAppLayers; i++) { 748 hwc_layer_1_t* layer = &list->hwLayers[i]; 749 if(not mCurrentFrame.drop[i] and 750 not isSupportedForMDPComp(ctx, layer)) { 751 ALOGD_IF(isDebug(), "%s: Unsupported layer in list",__FUNCTION__); 752 return false; 753 } 754 755 //For 8x26, if there is only one layer which needs scale for secondary 756 //while no scale for primary display, DMA pipe is occupied by primary. 757 //If need to fall back to GLES composition, virtual display lacks DMA 758 //pipe and error is reported. 759 if(qdutils::MDPVersion::getInstance().is8x26() && 760 mDpy >= HWC_DISPLAY_EXTERNAL && 761 qhwc::needsScaling(layer)) 762 return false; 763 } 764 765 mCurrentFrame.fbCount = 0; 766 memcpy(&mCurrentFrame.isFBComposed, &mCurrentFrame.drop, 767 sizeof(mCurrentFrame.isFBComposed)); 768 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount - 769 mCurrentFrame.dropCount; 770 771 if(sEnable4k2kYUVSplit){ 772 adjustForSourceSplit(ctx, list); 773 } 774 775 if(!postHeuristicsHandling(ctx, list)) { 776 ALOGD_IF(isDebug(), "post heuristic handling failed"); 777 reset(ctx); 778 return false; 779 } 780 ALOGD_IF(sSimulationFlags,"%s: FULL_MDP_COMP SUCCEEDED", 781 __FUNCTION__); 782 return true; 783} 784 785bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 786{ 787 if(!sEnableMixedMode) { 788 //Mixed mode is disabled. No need to even try caching. 789 return false; 790 } 791 792 bool ret = false; 793 if(list->flags & HWC_GEOMETRY_CHANGED) { //Try load based first 794 ret = loadBasedComp(ctx, list) or 795 cacheBasedComp(ctx, list); 796 } else { 797 ret = cacheBasedComp(ctx, list) or 798 loadBasedComp(ctx, list); 799 } 800 801 return ret; 802} 803 804bool MDPComp::cacheBasedComp(hwc_context_t *ctx, 805 hwc_display_contents_1_t* list) { 806 if(sSimulationFlags & MDPCOMP_AVOID_CACHE_MDP) 807 return false; 808 809 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 810 mCurrentFrame.reset(numAppLayers); 811 updateLayerCache(ctx, list); 812 813 //If an MDP marked layer is unsupported cannot do partial MDP Comp 814 for(int i = 0; i < numAppLayers; i++) { 815 if(!mCurrentFrame.isFBComposed[i]) { 816 hwc_layer_1_t* layer = &list->hwLayers[i]; 817 if(not isSupportedForMDPComp(ctx, layer)) { 818 ALOGD_IF(isDebug(), "%s: Unsupported layer in list", 819 __FUNCTION__); 820 reset(ctx); 821 return false; 822 } 823 } 824 } 825 826 updateYUV(ctx, list, false /*secure only*/); 827 bool ret = markLayersForCaching(ctx, list); //sets up fbZ also 828 if(!ret) { 829 ALOGD_IF(isDebug(),"%s: batching failed, dpy %d",__FUNCTION__, mDpy); 830 reset(ctx); 831 return false; 832 } 833 834 int mdpCount = mCurrentFrame.mdpCount; 835 836 if(sEnable4k2kYUVSplit){ 837 adjustForSourceSplit(ctx, list); 838 } 839 840 //Will benefit cases where a video has non-updating background. 841 if((mDpy > HWC_DISPLAY_PRIMARY) and 842 (mdpCount > MAX_SEC_LAYERS)) { 843 ALOGD_IF(isDebug(), "%s: Exceeds max secondary pipes",__FUNCTION__); 844 reset(ctx); 845 return false; 846 } 847 848 if(!postHeuristicsHandling(ctx, list)) { 849 ALOGD_IF(isDebug(), "post heuristic handling failed"); 850 reset(ctx); 851 return false; 852 } 853 ALOGD_IF(sSimulationFlags,"%s: CACHE_MDP_COMP SUCCEEDED", 854 __FUNCTION__); 855 856 return true; 857} 858 859bool MDPComp::loadBasedComp(hwc_context_t *ctx, 860 hwc_display_contents_1_t* list) { 861 if(sSimulationFlags & MDPCOMP_AVOID_LOAD_MDP) 862 return false; 863 864 if(not isLoadBasedCompDoable(ctx)) { 865 return false; 866 } 867 868 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 869 const int numNonDroppedLayers = numAppLayers - mCurrentFrame.dropCount; 870 const int stagesForMDP = min(sMaxPipesPerMixer, 871 ctx->mOverlay->availablePipes(mDpy, Overlay::MIXER_DEFAULT)); 872 873 int mdpBatchSize = stagesForMDP - 1; //1 stage for FB 874 int fbBatchSize = numNonDroppedLayers - mdpBatchSize; 875 int lastMDPSupportedIndex = numAppLayers; 876 int dropCount = 0; 877 878 //Find the minimum MDP batch size 879 for(int i = 0; i < numAppLayers;i++) { 880 if(mCurrentFrame.drop[i]) { 881 dropCount++; 882 continue; 883 } 884 hwc_layer_1_t* layer = &list->hwLayers[i]; 885 if(not isSupportedForMDPComp(ctx, layer)) { 886 lastMDPSupportedIndex = i; 887 mdpBatchSize = min(i - dropCount, stagesForMDP - 1); 888 fbBatchSize = numNonDroppedLayers - mdpBatchSize; 889 break; 890 } 891 } 892 893 ALOGD_IF(isDebug(), "%s:Before optimizing fbBatch, mdpbatch %d, fbbatch %d " 894 "dropped %d", __FUNCTION__, mdpBatchSize, fbBatchSize, 895 mCurrentFrame.dropCount); 896 897 //Start at a point where the fb batch should at least have 2 layers, for 898 //this mode to be justified. 899 while(fbBatchSize < 2) { 900 ++fbBatchSize; 901 --mdpBatchSize; 902 } 903 904 //If there are no layers for MDP, this mode doesnt make sense. 905 if(mdpBatchSize < 1) { 906 ALOGD_IF(isDebug(), "%s: No MDP layers after optimizing for fbBatch", 907 __FUNCTION__); 908 return false; 909 } 910 911 mCurrentFrame.reset(numAppLayers); 912 913 //Try with successively smaller mdp batch sizes until we succeed or reach 1 914 while(mdpBatchSize > 0) { 915 //Mark layers for MDP comp 916 int mdpBatchLeft = mdpBatchSize; 917 for(int i = 0; i < lastMDPSupportedIndex and mdpBatchLeft; i++) { 918 if(mCurrentFrame.drop[i]) { 919 continue; 920 } 921 mCurrentFrame.isFBComposed[i] = false; 922 --mdpBatchLeft; 923 } 924 925 mCurrentFrame.fbZ = mdpBatchSize; 926 mCurrentFrame.fbCount = fbBatchSize; 927 mCurrentFrame.mdpCount = mdpBatchSize; 928 929 ALOGD_IF(isDebug(), "%s:Trying with: mdpbatch %d fbbatch %d dropped %d", 930 __FUNCTION__, mdpBatchSize, fbBatchSize, 931 mCurrentFrame.dropCount); 932 933 if(postHeuristicsHandling(ctx, list)) { 934 ALOGD_IF(isDebug(), "%s: Postheuristics handling succeeded", 935 __FUNCTION__); 936 ALOGD_IF(sSimulationFlags,"%s: LOAD_MDP_COMP SUCCEEDED", 937 __FUNCTION__); 938 return true; 939 } 940 941 reset(ctx); 942 --mdpBatchSize; 943 ++fbBatchSize; 944 } 945 946 return false; 947} 948 949bool MDPComp::isLoadBasedCompDoable(hwc_context_t *ctx) { 950 if(mDpy or isSecurePresent(ctx, mDpy) or 951 isYuvPresent(ctx, mDpy)) { 952 return false; 953 } 954 return true; 955} 956 957bool MDPComp::canPartialUpdate(hwc_context_t *ctx, 958 hwc_display_contents_1_t* list){ 959 if(!qdutils::MDPVersion::getInstance().isPartialUpdateEnabled() || 960 isSkipPresent(ctx, mDpy) || (list->flags & HWC_GEOMETRY_CHANGED) || 961 mDpy ) { 962 return false; 963 } 964 return true; 965} 966 967bool MDPComp::tryVideoOnly(hwc_context_t *ctx, 968 hwc_display_contents_1_t* list) { 969 const bool secureOnly = true; 970 return videoOnlyComp(ctx, list, not secureOnly) or 971 videoOnlyComp(ctx, list, secureOnly); 972} 973 974bool MDPComp::videoOnlyComp(hwc_context_t *ctx, 975 hwc_display_contents_1_t* list, bool secureOnly) { 976 if(sSimulationFlags & MDPCOMP_AVOID_VIDEO_ONLY) 977 return false; 978 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 979 980 mCurrentFrame.reset(numAppLayers); 981 mCurrentFrame.fbCount -= mCurrentFrame.dropCount; 982 updateYUV(ctx, list, secureOnly); 983 int mdpCount = mCurrentFrame.mdpCount; 984 985 if(!isYuvPresent(ctx, mDpy) or (mdpCount == 0)) { 986 reset(ctx); 987 return false; 988 } 989 990 /* Bail out if we are processing only secured video layers 991 * and we dont have any */ 992 if(!isSecurePresent(ctx, mDpy) && secureOnly){ 993 reset(ctx); 994 return false; 995 } 996 997 if(mCurrentFrame.fbCount) 998 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 999 1000 if(sEnable4k2kYUVSplit){ 1001 adjustForSourceSplit(ctx, list); 1002 } 1003 1004 if(!postHeuristicsHandling(ctx, list)) { 1005 ALOGD_IF(isDebug(), "post heuristic handling failed"); 1006 reset(ctx); 1007 return false; 1008 } 1009 1010 ALOGD_IF(sSimulationFlags,"%s: VIDEO_ONLY_COMP SUCCEEDED", 1011 __FUNCTION__); 1012 return true; 1013} 1014 1015/* Checks for conditions where YUV layers cannot be bypassed */ 1016bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 1017 if(isSkipLayer(layer)) { 1018 ALOGD_IF(isDebug(), "%s: Video marked SKIP dpy %d", __FUNCTION__, mDpy); 1019 return false; 1020 } 1021 1022 if(layer->transform & HWC_TRANSFORM_ROT_90 && !canUseRotator(ctx,mDpy)) { 1023 ALOGD_IF(isDebug(), "%s: no free DMA pipe",__FUNCTION__); 1024 return false; 1025 } 1026 1027 if(isSecuring(ctx, layer)) { 1028 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 1029 return false; 1030 } 1031 1032 if(!isValidDimension(ctx, layer)) { 1033 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 1034 __FUNCTION__); 1035 return false; 1036 } 1037 1038 if(layer->planeAlpha < 0xFF) { 1039 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 1040 in video only mode", 1041 __FUNCTION__); 1042 return false; 1043 } 1044 1045 return true; 1046} 1047 1048/* starts at fromIndex and check for each layer to find 1049 * if it it has overlapping with any Updating layer above it in zorder 1050 * till the end of the batch. returns true if it finds any intersection */ 1051bool MDPComp::canPushBatchToTop(const hwc_display_contents_1_t* list, 1052 int fromIndex, int toIndex) { 1053 for(int i = fromIndex; i < toIndex; i++) { 1054 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1055 if(intersectingUpdatingLayers(list, i+1, toIndex, i)) { 1056 return false; 1057 } 1058 } 1059 } 1060 return true; 1061} 1062 1063/* Checks if given layer at targetLayerIndex has any 1064 * intersection with all the updating layers in beween 1065 * fromIndex and toIndex. Returns true if it finds intersectiion */ 1066bool MDPComp::intersectingUpdatingLayers(const hwc_display_contents_1_t* list, 1067 int fromIndex, int toIndex, int targetLayerIndex) { 1068 for(int i = fromIndex; i <= toIndex; i++) { 1069 if(!mCurrentFrame.isFBComposed[i]) { 1070 if(areLayersIntersecting(&list->hwLayers[i], 1071 &list->hwLayers[targetLayerIndex])) { 1072 return true; 1073 } 1074 } 1075 } 1076 return false; 1077} 1078 1079int MDPComp::getBatch(hwc_display_contents_1_t* list, 1080 int& maxBatchStart, int& maxBatchEnd, 1081 int& maxBatchCount) { 1082 int i = 0; 1083 int fbZOrder =-1; 1084 int droppedLayerCt = 0; 1085 while (i < mCurrentFrame.layerCount) { 1086 int batchCount = 0; 1087 int batchStart = i; 1088 int batchEnd = i; 1089 /* Adjust batch Z order with the dropped layers so far */ 1090 int fbZ = batchStart - droppedLayerCt; 1091 int firstZReverseIndex = -1; 1092 int updatingLayersAbove = 0;//Updating layer count in middle of batch 1093 while(i < mCurrentFrame.layerCount) { 1094 if(!mCurrentFrame.isFBComposed[i]) { 1095 if(!batchCount) { 1096 i++; 1097 break; 1098 } 1099 updatingLayersAbove++; 1100 i++; 1101 continue; 1102 } else { 1103 if(mCurrentFrame.drop[i]) { 1104 i++; 1105 droppedLayerCt++; 1106 continue; 1107 } else if(updatingLayersAbove <= 0) { 1108 batchCount++; 1109 batchEnd = i; 1110 i++; 1111 continue; 1112 } else { //Layer is FBComposed, not a drop & updatingLayer > 0 1113 1114 // We have a valid updating layer already. If layer-i not 1115 // have overlapping with all updating layers in between 1116 // batch-start and i, then we can add layer i to batch. 1117 if(!intersectingUpdatingLayers(list, batchStart, i-1, i)) { 1118 batchCount++; 1119 batchEnd = i; 1120 i++; 1121 continue; 1122 } else if(canPushBatchToTop(list, batchStart, i)) { 1123 //If All the non-updating layers with in this batch 1124 //does not have intersection with the updating layers 1125 //above in z-order, then we can safely move the batch to 1126 //higher z-order. Increment fbZ as it is moving up. 1127 if( firstZReverseIndex < 0) { 1128 firstZReverseIndex = i; 1129 } 1130 batchCount++; 1131 batchEnd = i; 1132 fbZ += updatingLayersAbove; 1133 i++; 1134 updatingLayersAbove = 0; 1135 continue; 1136 } else { 1137 //both failed.start the loop again from here. 1138 if(firstZReverseIndex >= 0) { 1139 i = firstZReverseIndex; 1140 } 1141 break; 1142 } 1143 } 1144 } 1145 } 1146 if(batchCount > maxBatchCount) { 1147 maxBatchCount = batchCount; 1148 maxBatchStart = batchStart; 1149 maxBatchEnd = batchEnd; 1150 fbZOrder = fbZ; 1151 } 1152 } 1153 return fbZOrder; 1154} 1155 1156bool MDPComp::markLayersForCaching(hwc_context_t* ctx, 1157 hwc_display_contents_1_t* list) { 1158 /* Idea is to keep as many non-updating(cached) layers in FB and 1159 * send rest of them through MDP. This is done in 2 steps. 1160 * 1. Find the maximum contiguous batch of non-updating layers. 1161 * 2. See if we can improve this batch size for caching by adding 1162 * opaque layers around the batch, if they don't have 1163 * any overlapping with the updating layers in between. 1164 * NEVER mark an updating layer for caching. 1165 * But cached ones can be marked for MDP */ 1166 1167 int maxBatchStart = -1; 1168 int maxBatchEnd = -1; 1169 int maxBatchCount = 0; 1170 int fbZ = -1; 1171 1172 /* Nothing is cached. No batching needed */ 1173 if(mCurrentFrame.fbCount == 0) { 1174 return true; 1175 } 1176 1177 /* No MDP comp layers, try to use other comp modes */ 1178 if(mCurrentFrame.mdpCount == 0) { 1179 return false; 1180 } 1181 1182 fbZ = getBatch(list, maxBatchStart, maxBatchEnd, maxBatchCount); 1183 1184 /* reset rest of the layers lying inside ROI for MDP comp */ 1185 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 1186 hwc_layer_1_t* layer = &list->hwLayers[i]; 1187 if((i < maxBatchStart || i > maxBatchEnd) && 1188 mCurrentFrame.isFBComposed[i]){ 1189 if(!mCurrentFrame.drop[i]){ 1190 //If an unsupported layer is being attempted to 1191 //be pulled out we should fail 1192 if(not isSupportedForMDPComp(ctx, layer)) { 1193 return false; 1194 } 1195 mCurrentFrame.isFBComposed[i] = false; 1196 } 1197 } 1198 } 1199 1200 // update the frame data 1201 mCurrentFrame.fbZ = fbZ; 1202 mCurrentFrame.fbCount = maxBatchCount; 1203 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1204 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1205 1206 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 1207 mCurrentFrame.fbCount); 1208 1209 return true; 1210} 1211 1212void MDPComp::updateLayerCache(hwc_context_t* ctx, 1213 hwc_display_contents_1_t* list) { 1214 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 1215 int fbCount = 0; 1216 1217 for(int i = 0; i < numAppLayers; i++) { 1218 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 1219 if(!mCurrentFrame.drop[i]) 1220 fbCount++; 1221 mCurrentFrame.isFBComposed[i] = true; 1222 } else { 1223 mCurrentFrame.isFBComposed[i] = false; 1224 } 1225 } 1226 1227 mCurrentFrame.fbCount = fbCount; 1228 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - mCurrentFrame.fbCount 1229 - mCurrentFrame.dropCount; 1230 1231 ALOGD_IF(isDebug(),"%s: MDP count: %d FB count %d drop count: %d" 1232 ,__FUNCTION__, mCurrentFrame.mdpCount, mCurrentFrame.fbCount, 1233 mCurrentFrame.dropCount); 1234} 1235 1236void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list, 1237 bool secureOnly) { 1238 int nYuvCount = ctx->listStats[mDpy].yuvCount; 1239 for(int index = 0;index < nYuvCount; index++){ 1240 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 1241 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 1242 1243 if(!isYUVDoable(ctx, layer)) { 1244 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 1245 mCurrentFrame.isFBComposed[nYuvIndex] = true; 1246 mCurrentFrame.fbCount++; 1247 } 1248 } else { 1249 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 1250 private_handle_t *hnd = (private_handle_t *)layer->handle; 1251 if(!secureOnly || isSecureBuffer(hnd)) { 1252 mCurrentFrame.isFBComposed[nYuvIndex] = false; 1253 mCurrentFrame.fbCount--; 1254 } 1255 } 1256 } 1257 } 1258 1259 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 1260 mCurrentFrame.fbCount - mCurrentFrame.dropCount; 1261 ALOGD_IF(isDebug(),"%s: fb count: %d",__FUNCTION__, 1262 mCurrentFrame.fbCount); 1263} 1264 1265hwc_rect_t MDPComp::getUpdatingFBRect(hwc_context_t *ctx, 1266 hwc_display_contents_1_t* list){ 1267 hwc_rect_t fbRect = (struct hwc_rect){0, 0, 0, 0}; 1268 1269 /* Update only the region of FB needed for composition */ 1270 for(int i = 0; i < mCurrentFrame.layerCount; i++ ) { 1271 if(mCurrentFrame.isFBComposed[i] && !mCurrentFrame.drop[i]) { 1272 hwc_layer_1_t* layer = &list->hwLayers[i]; 1273 hwc_rect_t dst = layer->displayFrame; 1274 fbRect = getUnion(fbRect, dst); 1275 } 1276 } 1277 trimAgainstROI(ctx, fbRect); 1278 return fbRect; 1279} 1280 1281bool MDPComp::postHeuristicsHandling(hwc_context_t *ctx, 1282 hwc_display_contents_1_t* list) { 1283 1284 //Capability checks 1285 if(!resourceCheck()) { 1286 ALOGD_IF(isDebug(), "%s: resource check failed", __FUNCTION__); 1287 return false; 1288 } 1289 1290 //Limitations checks 1291 if(!hwLimitationsCheck(ctx, list)) { 1292 ALOGD_IF(isDebug(), "%s: HW limitations",__FUNCTION__); 1293 return false; 1294 } 1295 1296 //Configure framebuffer first if applicable 1297 if(mCurrentFrame.fbZ >= 0) { 1298 hwc_rect_t fbRect = getUpdatingFBRect(ctx, list); 1299 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, fbRect, mCurrentFrame.fbZ)) 1300 { 1301 ALOGD_IF(isDebug(), "%s configure framebuffer failed", 1302 __FUNCTION__); 1303 return false; 1304 } 1305 } 1306 1307 mCurrentFrame.map(); 1308 1309 if(!allocLayerPipes(ctx, list)) { 1310 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 1311 return false; 1312 } 1313 1314 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1315 index++) { 1316 if(!mCurrentFrame.isFBComposed[index]) { 1317 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1318 hwc_layer_1_t* layer = &list->hwLayers[index]; 1319 1320 //Leave fbZ for framebuffer. CACHE/GLES layers go here. 1321 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1322 mdpNextZOrder++; 1323 } 1324 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1325 cur_pipe->zOrder = mdpNextZOrder++; 1326 1327 private_handle_t *hnd = (private_handle_t *)layer->handle; 1328 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1329 if(configure4k2kYuv(ctx, layer, 1330 mCurrentFrame.mdpToLayer[mdpIndex]) 1331 != 0 ){ 1332 ALOGD_IF(isDebug(), "%s: Failed to configure split pipes \ 1333 for layer %d",__FUNCTION__, index); 1334 return false; 1335 } 1336 else{ 1337 mdpNextZOrder++; 1338 } 1339 continue; 1340 } 1341 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 1342 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 1343 layer %d",__FUNCTION__, index); 1344 return false; 1345 } 1346 } 1347 } 1348 1349 if(!ctx->mOverlay->validateAndSet(mDpy, ctx->dpyAttr[mDpy].fd)) { 1350 ALOGD_IF(isDebug(), "%s: Failed to validate and set overlay for dpy %d" 1351 ,__FUNCTION__, mDpy); 1352 return false; 1353 } 1354 1355 setRedraw(ctx, list); 1356 return true; 1357} 1358 1359bool MDPComp::resourceCheck() { 1360 const bool fbUsed = mCurrentFrame.fbCount; 1361 if(mCurrentFrame.mdpCount > sMaxPipesPerMixer - fbUsed) { 1362 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 1363 return false; 1364 } 1365 return true; 1366} 1367 1368bool MDPComp::hwLimitationsCheck(hwc_context_t* ctx, 1369 hwc_display_contents_1_t* list) { 1370 1371 //A-family hw limitation: 1372 //If a layer need alpha scaling, MDP can not support. 1373 if(ctx->mMDP.version < qdutils::MDSS_V5) { 1374 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 1375 if(!mCurrentFrame.isFBComposed[i] && 1376 isAlphaScaled( &list->hwLayers[i])) { 1377 ALOGD_IF(isDebug(), "%s:frame needs alphaScaling",__FUNCTION__); 1378 return false; 1379 } 1380 } 1381 } 1382 1383 // On 8x26 & 8974 hw, we have a limitation of downscaling+blending. 1384 //If multiple layers requires downscaling and also they are overlapping 1385 //fall back to GPU since MDSS can not handle it. 1386 if(qdutils::MDPVersion::getInstance().is8x74v2() || 1387 qdutils::MDPVersion::getInstance().is8x26()) { 1388 for(int i = 0; i < mCurrentFrame.layerCount-1; ++i) { 1389 hwc_layer_1_t* botLayer = &list->hwLayers[i]; 1390 if(!mCurrentFrame.isFBComposed[i] && 1391 isDownscaleRequired(botLayer)) { 1392 //if layer-i is marked for MDP and needs downscaling 1393 //check if any MDP layer on top of i & overlaps with layer-i 1394 for(int j = i+1; j < mCurrentFrame.layerCount; ++j) { 1395 hwc_layer_1_t* topLayer = &list->hwLayers[j]; 1396 if(!mCurrentFrame.isFBComposed[j] && 1397 isDownscaleRequired(topLayer)) { 1398 hwc_rect_t r = getIntersection(botLayer->displayFrame, 1399 topLayer->displayFrame); 1400 if(isValidRect(r)) 1401 return false; 1402 } 1403 } 1404 } 1405 } 1406 } 1407 return true; 1408} 1409 1410int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1411 int ret = 0; 1412 const int numLayers = ctx->listStats[mDpy].numAppLayers; 1413 char property[PROPERTY_VALUE_MAX]; 1414 1415 if(property_get("debug.hwc.simulate", property, NULL) > 0) { 1416 int currentFlags = atoi(property); 1417 if(currentFlags != sSimulationFlags) { 1418 sSimulationFlags = currentFlags; 1419 ALOGE("%s: Simulation Flag read: 0x%x (%d)", __FUNCTION__, 1420 sSimulationFlags, sSimulationFlags); 1421 } 1422 } 1423 1424 //Do not cache the information for next draw cycle. 1425 if(numLayers > MAX_NUM_APP_LAYERS or (!numLayers)) { 1426 ALOGI("%s: Unsupported layer count for mdp composition", 1427 __FUNCTION__); 1428 mCachedFrame.reset(); 1429 return -1; 1430 } 1431 1432 //reset old data 1433 mCurrentFrame.reset(numLayers); 1434 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1435 mCurrentFrame.dropCount = 0; 1436 1437 // Detect the start of animation and fall back to GPU only once to cache 1438 // all the layers in FB and display FB content untill animation completes. 1439 if(ctx->listStats[mDpy].isDisplayAnimating) { 1440 mCurrentFrame.needsRedraw = false; 1441 if(ctx->mAnimationState[mDpy] == ANIMATION_STOPPED) { 1442 mCurrentFrame.needsRedraw = true; 1443 ctx->mAnimationState[mDpy] = ANIMATION_STARTED; 1444 } 1445 setMDPCompLayerFlags(ctx, list); 1446 mCachedFrame.updateCounts(mCurrentFrame); 1447 ret = -1; 1448 return ret; 1449 } else { 1450 ctx->mAnimationState[mDpy] = ANIMATION_STOPPED; 1451 } 1452 1453 //Hard conditions, if not met, cannot do MDP comp 1454 if(isFrameDoable(ctx)) { 1455 generateROI(ctx, list); 1456 1457 if(tryFullFrame(ctx, list) || tryVideoOnly(ctx, list)) { 1458 setMDPCompLayerFlags(ctx, list); 1459 } else { 1460 resetROI(ctx, mDpy); 1461 reset(ctx); 1462 memset(&mCurrentFrame.drop, 0, sizeof(mCurrentFrame.drop)); 1463 mCurrentFrame.dropCount = 0; 1464 ret = -1; 1465 } 1466 } else { 1467 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 1468 __FUNCTION__); 1469 ret = -1; 1470 } 1471 1472 if(isDebug()) { 1473 ALOGD("GEOMETRY change: %d", 1474 (list->flags & HWC_GEOMETRY_CHANGED)); 1475 android::String8 sDump(""); 1476 dump(sDump, ctx); 1477 ALOGD("%s",sDump.string()); 1478 } 1479 1480 mCachedFrame.cacheAll(list); 1481 mCachedFrame.updateCounts(mCurrentFrame); 1482 return ret; 1483} 1484 1485bool MDPComp::allocSplitVGPipesfor4k2k(hwc_context_t *ctx, int index) { 1486 1487 bool bRet = true; 1488 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1489 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1490 info.pipeInfo = new MdpYUVPipeInfo; 1491 info.rot = NULL; 1492 MdpYUVPipeInfo& pipe_info = *(MdpYUVPipeInfo*)info.pipeInfo; 1493 1494 pipe_info.lIndex = ovutils::OV_INVALID; 1495 pipe_info.rIndex = ovutils::OV_INVALID; 1496 1497 Overlay::PipeSpecs pipeSpecs; 1498 pipeSpecs.formatClass = Overlay::FORMAT_YUV; 1499 pipeSpecs.needsScaling = true; 1500 pipeSpecs.dpy = mDpy; 1501 pipeSpecs.fb = false; 1502 1503 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 1504 if(pipe_info.lIndex == ovutils::OV_INVALID){ 1505 bRet = false; 1506 ALOGD_IF(isDebug(),"%s: allocating first VG pipe failed", 1507 __FUNCTION__); 1508 } 1509 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 1510 if(pipe_info.rIndex == ovutils::OV_INVALID){ 1511 bRet = false; 1512 ALOGD_IF(isDebug(),"%s: allocating second VG pipe failed", 1513 __FUNCTION__); 1514 } 1515 return bRet; 1516} 1517//=============MDPCompNonSplit================================================== 1518 1519void MDPCompNonSplit::adjustForSourceSplit(hwc_context_t *ctx, 1520 hwc_display_contents_1_t* list) { 1521 //If 4k2k Yuv layer split is possible, and if 1522 //fbz is above 4k2k layer, increment fb zorder by 1 1523 //as we split 4k2k layer and increment zorder for right half 1524 //of the layer 1525 if(mCurrentFrame.fbZ >= 0) { 1526 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1527 index++) { 1528 if(!mCurrentFrame.isFBComposed[index]) { 1529 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1530 mdpNextZOrder++; 1531 } 1532 mdpNextZOrder++; 1533 hwc_layer_1_t* layer = &list->hwLayers[index]; 1534 private_handle_t *hnd = (private_handle_t *)layer->handle; 1535 if(is4kx2kYuvBuffer(hnd)) { 1536 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1537 mCurrentFrame.fbZ += 1; 1538 mdpNextZOrder++; 1539 //As we split 4kx2k yuv layer and program to 2 VG pipes 1540 //(if available) increase mdpcount by 1. 1541 mCurrentFrame.mdpCount++; 1542 } 1543 } 1544 } 1545 } 1546} 1547 1548/* 1549 * Configures pipe(s) for MDP composition 1550 */ 1551int MDPCompNonSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1552 PipeLayerPair& PipeLayerPair) { 1553 MdpPipeInfoNonSplit& mdp_info = 1554 *(static_cast<MdpPipeInfoNonSplit*>(PipeLayerPair.pipeInfo)); 1555 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 1556 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1557 eIsFg isFg = IS_FG_OFF; 1558 eDest dest = mdp_info.index; 1559 1560 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 1561 __FUNCTION__, layer, zOrder, dest); 1562 1563 return configureNonSplit(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 1564 &PipeLayerPair.rot); 1565} 1566 1567bool MDPCompNonSplit::allocLayerPipes(hwc_context_t *ctx, 1568 hwc_display_contents_1_t* list) { 1569 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 1570 1571 if(mCurrentFrame.isFBComposed[index]) continue; 1572 1573 hwc_layer_1_t* layer = &list->hwLayers[index]; 1574 private_handle_t *hnd = (private_handle_t *)layer->handle; 1575 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1576 if(allocSplitVGPipesfor4k2k(ctx, index)){ 1577 continue; 1578 } 1579 } 1580 1581 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1582 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1583 info.pipeInfo = new MdpPipeInfoNonSplit; 1584 info.rot = NULL; 1585 MdpPipeInfoNonSplit& pipe_info = *(MdpPipeInfoNonSplit*)info.pipeInfo; 1586 1587 Overlay::PipeSpecs pipeSpecs; 1588 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 1589 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 1590 pipeSpecs.needsScaling = qhwc::needsScaling(layer) or 1591 (qdutils::MDPVersion::getInstance().is8x26() and 1592 ctx->dpyAttr[HWC_DISPLAY_PRIMARY].xres > 1024); 1593 pipeSpecs.dpy = mDpy; 1594 pipeSpecs.fb = false; 1595 1596 pipe_info.index = ctx->mOverlay->getPipe(pipeSpecs); 1597 1598 if(pipe_info.index == ovutils::OV_INVALID) { 1599 ALOGD_IF(isDebug(), "%s: Unable to get pipe", __FUNCTION__); 1600 return false; 1601 } 1602 } 1603 return true; 1604} 1605 1606int MDPCompNonSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 1607 PipeLayerPair& PipeLayerPair) { 1608 MdpYUVPipeInfo& mdp_info = 1609 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 1610 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1611 eIsFg isFg = IS_FG_OFF; 1612 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1613 eDest lDest = mdp_info.lIndex; 1614 eDest rDest = mdp_info.rIndex; 1615 1616 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 1617 lDest, rDest, &PipeLayerPair.rot); 1618} 1619 1620bool MDPCompNonSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1621 1622 if(!isEnabled()) { 1623 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1624 return true; 1625 } 1626 1627 if(!ctx || !list) { 1628 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1629 return false; 1630 } 1631 1632 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1633 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1634 return true; 1635 } 1636 1637 // Set the Handle timeout to true for MDP or MIXED composition. 1638 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 1639 sHandleTimeout = true; 1640 } 1641 1642 overlay::Overlay& ov = *ctx->mOverlay; 1643 LayerProp *layerProp = ctx->layerProp[mDpy]; 1644 1645 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1646 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1647 { 1648 if(mCurrentFrame.isFBComposed[i]) continue; 1649 1650 hwc_layer_1_t *layer = &list->hwLayers[i]; 1651 private_handle_t *hnd = (private_handle_t *)layer->handle; 1652 if(!hnd) { 1653 if (!(layer->flags & HWC_COLOR_FILL)) { 1654 ALOGE("%s handle null", __FUNCTION__); 1655 return false; 1656 } 1657 // No PLAY for Color layer 1658 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1659 continue; 1660 } 1661 1662 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1663 1664 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 1665 { 1666 MdpYUVPipeInfo& pipe_info = 1667 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1668 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1669 ovutils::eDest indexL = pipe_info.lIndex; 1670 ovutils::eDest indexR = pipe_info.rIndex; 1671 int fd = hnd->fd; 1672 uint32_t offset = (uint32_t)hnd->offset; 1673 if(rot) { 1674 rot->queueBuffer(fd, offset); 1675 fd = rot->getDstMemId(); 1676 offset = rot->getDstOffset(); 1677 } 1678 if(indexL != ovutils::OV_INVALID) { 1679 ovutils::eDest destL = (ovutils::eDest)indexL; 1680 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1681 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1682 if (!ov.queueBuffer(fd, offset, destL)) { 1683 ALOGE("%s: queueBuffer failed for display:%d", 1684 __FUNCTION__, mDpy); 1685 return false; 1686 } 1687 } 1688 1689 if(indexR != ovutils::OV_INVALID) { 1690 ovutils::eDest destR = (ovutils::eDest)indexR; 1691 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1692 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1693 if (!ov.queueBuffer(fd, offset, destR)) { 1694 ALOGE("%s: queueBuffer failed for display:%d", 1695 __FUNCTION__, mDpy); 1696 return false; 1697 } 1698 } 1699 } 1700 else{ 1701 MdpPipeInfoNonSplit& pipe_info = 1702 *(MdpPipeInfoNonSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1703 ovutils::eDest dest = pipe_info.index; 1704 if(dest == ovutils::OV_INVALID) { 1705 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 1706 return false; 1707 } 1708 1709 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1710 continue; 1711 } 1712 1713 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1714 using pipe: %d", __FUNCTION__, layer, 1715 hnd, dest ); 1716 1717 int fd = hnd->fd; 1718 uint32_t offset = (uint32_t)hnd->offset; 1719 1720 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1721 if(rot) { 1722 if(!rot->queueBuffer(fd, offset)) 1723 return false; 1724 fd = rot->getDstMemId(); 1725 offset = rot->getDstOffset(); 1726 } 1727 1728 if (!ov.queueBuffer(fd, offset, dest)) { 1729 ALOGE("%s: queueBuffer failed for display:%d ", 1730 __FUNCTION__, mDpy); 1731 return false; 1732 } 1733 } 1734 1735 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1736 } 1737 return true; 1738} 1739 1740//=============MDPCompSplit=================================================== 1741 1742void MDPCompSplit::adjustForSourceSplit(hwc_context_t *ctx, 1743 hwc_display_contents_1_t* list){ 1744 //if 4kx2k yuv layer is totally present in either in left half 1745 //or right half then try splitting the yuv layer to avoid decimation 1746 const int lSplit = getLeftSplit(ctx, mDpy); 1747 if(mCurrentFrame.fbZ >= 0) { 1748 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 1749 index++) { 1750 if(!mCurrentFrame.isFBComposed[index]) { 1751 if(mdpNextZOrder == mCurrentFrame.fbZ) { 1752 mdpNextZOrder++; 1753 } 1754 mdpNextZOrder++; 1755 hwc_layer_1_t* layer = &list->hwLayers[index]; 1756 private_handle_t *hnd = (private_handle_t *)layer->handle; 1757 if(is4kx2kYuvBuffer(hnd)) { 1758 hwc_rect_t dst = layer->displayFrame; 1759 if((dst.left > lSplit) || (dst.right < lSplit)) { 1760 mCurrentFrame.mdpCount += 1; 1761 } 1762 if(mdpNextZOrder <= mCurrentFrame.fbZ) 1763 mCurrentFrame.fbZ += 1; 1764 mdpNextZOrder++; 1765 } 1766 } 1767 } 1768 } 1769} 1770 1771bool MDPCompSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 1772 MdpPipeInfoSplit& pipe_info) { 1773 1774 const int lSplit = getLeftSplit(ctx, mDpy); 1775 private_handle_t *hnd = (private_handle_t *)layer->handle; 1776 hwc_rect_t dst = layer->displayFrame; 1777 pipe_info.lIndex = ovutils::OV_INVALID; 1778 pipe_info.rIndex = ovutils::OV_INVALID; 1779 1780 Overlay::PipeSpecs pipeSpecs; 1781 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 1782 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 1783 pipeSpecs.needsScaling = qhwc::needsScalingWithSplit(ctx, layer, mDpy); 1784 pipeSpecs.dpy = mDpy; 1785 pipeSpecs.mixer = Overlay::MIXER_LEFT; 1786 pipeSpecs.fb = false; 1787 1788 // Acquire pipe only for the updating half 1789 hwc_rect_t l_roi = ctx->listStats[mDpy].lRoi; 1790 hwc_rect_t r_roi = ctx->listStats[mDpy].rRoi; 1791 1792 if (dst.left < lSplit && isValidRect(getIntersection(dst, l_roi))) { 1793 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 1794 if(pipe_info.lIndex == ovutils::OV_INVALID) 1795 return false; 1796 } 1797 1798 if(dst.right > lSplit && isValidRect(getIntersection(dst, r_roi))) { 1799 pipeSpecs.mixer = Overlay::MIXER_RIGHT; 1800 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 1801 if(pipe_info.rIndex == ovutils::OV_INVALID) 1802 return false; 1803 } 1804 1805 return true; 1806} 1807 1808bool MDPCompSplit::allocLayerPipes(hwc_context_t *ctx, 1809 hwc_display_contents_1_t* list) { 1810 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 1811 1812 if(mCurrentFrame.isFBComposed[index]) continue; 1813 1814 hwc_layer_1_t* layer = &list->hwLayers[index]; 1815 private_handle_t *hnd = (private_handle_t *)layer->handle; 1816 hwc_rect_t dst = layer->displayFrame; 1817 const int lSplit = getLeftSplit(ctx, mDpy); 1818 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit){ 1819 if((dst.left > lSplit)||(dst.right < lSplit)){ 1820 if(allocSplitVGPipesfor4k2k(ctx, index)){ 1821 continue; 1822 } 1823 } 1824 } 1825 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1826 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1827 info.pipeInfo = new MdpPipeInfoSplit; 1828 info.rot = NULL; 1829 MdpPipeInfoSplit& pipe_info = *(MdpPipeInfoSplit*)info.pipeInfo; 1830 1831 if(!acquireMDPPipes(ctx, layer, pipe_info)) { 1832 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type", 1833 __FUNCTION__); 1834 return false; 1835 } 1836 } 1837 return true; 1838} 1839 1840int MDPCompSplit::configure4k2kYuv(hwc_context_t *ctx, hwc_layer_1_t *layer, 1841 PipeLayerPair& PipeLayerPair) { 1842 const int lSplit = getLeftSplit(ctx, mDpy); 1843 hwc_rect_t dst = layer->displayFrame; 1844 if((dst.left > lSplit)||(dst.right < lSplit)){ 1845 MdpYUVPipeInfo& mdp_info = 1846 *(static_cast<MdpYUVPipeInfo*>(PipeLayerPair.pipeInfo)); 1847 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1848 eIsFg isFg = IS_FG_OFF; 1849 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1850 eDest lDest = mdp_info.lIndex; 1851 eDest rDest = mdp_info.rIndex; 1852 1853 return configureSourceSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, 1854 lDest, rDest, &PipeLayerPair.rot); 1855 } 1856 else{ 1857 return configure(ctx, layer, PipeLayerPair); 1858 } 1859} 1860 1861/* 1862 * Configures pipe(s) for MDP composition 1863 */ 1864int MDPCompSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1865 PipeLayerPair& PipeLayerPair) { 1866 MdpPipeInfoSplit& mdp_info = 1867 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 1868 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1869 eIsFg isFg = IS_FG_OFF; 1870 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1871 eDest lDest = mdp_info.lIndex; 1872 eDest rDest = mdp_info.rIndex; 1873 1874 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 1875 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 1876 1877 return configureSplit(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 1878 rDest, &PipeLayerPair.rot); 1879} 1880 1881bool MDPCompSplit::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1882 1883 if(!isEnabled()) { 1884 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1885 return true; 1886 } 1887 1888 if(!ctx || !list) { 1889 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1890 return false; 1891 } 1892 1893 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1894 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1895 return true; 1896 } 1897 1898 // Set the Handle timeout to true for MDP or MIXED composition. 1899 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) { 1900 sHandleTimeout = true; 1901 } 1902 1903 overlay::Overlay& ov = *ctx->mOverlay; 1904 LayerProp *layerProp = ctx->layerProp[mDpy]; 1905 1906 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1907 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1908 { 1909 if(mCurrentFrame.isFBComposed[i]) continue; 1910 1911 hwc_layer_1_t *layer = &list->hwLayers[i]; 1912 private_handle_t *hnd = (private_handle_t *)layer->handle; 1913 if(!hnd) { 1914 ALOGE("%s handle null", __FUNCTION__); 1915 return false; 1916 } 1917 1918 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1919 continue; 1920 } 1921 1922 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1923 1924 if(is4kx2kYuvBuffer(hnd) && sEnable4k2kYUVSplit) 1925 { 1926 MdpYUVPipeInfo& pipe_info = 1927 *(MdpYUVPipeInfo*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1928 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1929 ovutils::eDest indexL = pipe_info.lIndex; 1930 ovutils::eDest indexR = pipe_info.rIndex; 1931 int fd = hnd->fd; 1932 uint32_t offset = (uint32_t)hnd->offset; 1933 if(rot) { 1934 rot->queueBuffer(fd, offset); 1935 fd = rot->getDstMemId(); 1936 offset = rot->getDstOffset(); 1937 } 1938 if(indexL != ovutils::OV_INVALID) { 1939 ovutils::eDest destL = (ovutils::eDest)indexL; 1940 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1941 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1942 if (!ov.queueBuffer(fd, offset, destL)) { 1943 ALOGE("%s: queueBuffer failed for display:%d", 1944 __FUNCTION__, mDpy); 1945 return false; 1946 } 1947 } 1948 1949 if(indexR != ovutils::OV_INVALID) { 1950 ovutils::eDest destR = (ovutils::eDest)indexR; 1951 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1952 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1953 if (!ov.queueBuffer(fd, offset, destR)) { 1954 ALOGE("%s: queueBuffer failed for display:%d", 1955 __FUNCTION__, mDpy); 1956 return false; 1957 } 1958 } 1959 } 1960 else{ 1961 MdpPipeInfoSplit& pipe_info = 1962 *(MdpPipeInfoSplit*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1963 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1964 1965 ovutils::eDest indexL = pipe_info.lIndex; 1966 ovutils::eDest indexR = pipe_info.rIndex; 1967 1968 int fd = hnd->fd; 1969 int offset = (uint32_t)hnd->offset; 1970 1971 if(ctx->mAD->isModeOn()) { 1972 if(ctx->mAD->draw(ctx, fd, offset)) { 1973 fd = ctx->mAD->getDstFd(); 1974 offset = ctx->mAD->getDstOffset(); 1975 } 1976 } 1977 1978 if(rot) { 1979 rot->queueBuffer(fd, offset); 1980 fd = rot->getDstMemId(); 1981 offset = rot->getDstOffset(); 1982 } 1983 1984 //************* play left mixer ********** 1985 if(indexL != ovutils::OV_INVALID) { 1986 ovutils::eDest destL = (ovutils::eDest)indexL; 1987 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1988 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1989 if (!ov.queueBuffer(fd, offset, destL)) { 1990 ALOGE("%s: queueBuffer failed for left mixer", 1991 __FUNCTION__); 1992 return false; 1993 } 1994 } 1995 1996 //************* play right mixer ********** 1997 if(indexR != ovutils::OV_INVALID) { 1998 ovutils::eDest destR = (ovutils::eDest)indexR; 1999 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 2000 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 2001 if (!ov.queueBuffer(fd, offset, destR)) { 2002 ALOGE("%s: queueBuffer failed for right mixer", 2003 __FUNCTION__); 2004 return false; 2005 } 2006 } 2007 } 2008 2009 layerProp[i].mFlags &= ~HWC_MDPCOMP; 2010 } 2011 2012 return true; 2013} 2014 2015//================MDPCompSrcSplit============================================== 2016bool MDPCompSrcSplit::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 2017 MdpPipeInfoSplit& pipe_info) { 2018 private_handle_t *hnd = (private_handle_t *)layer->handle; 2019 hwc_rect_t dst = layer->displayFrame; 2020 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2021 pipe_info.lIndex = ovutils::OV_INVALID; 2022 pipe_info.rIndex = ovutils::OV_INVALID; 2023 2024 //If 2 pipes are staged on a single stage of a mixer, then the left pipe 2025 //should have a higher priority than the right one. Pipe priorities are 2026 //starting with VG0, VG1 ... , RGB0 ..., DMA1 2027 2028 Overlay::PipeSpecs pipeSpecs; 2029 pipeSpecs.formatClass = isYuvBuffer(hnd) ? 2030 Overlay::FORMAT_YUV : Overlay::FORMAT_RGB; 2031 pipeSpecs.needsScaling = qhwc::needsScaling(layer); 2032 pipeSpecs.dpy = mDpy; 2033 pipeSpecs.fb = false; 2034 2035 //1 pipe by default for a layer 2036 pipe_info.lIndex = ctx->mOverlay->getPipe(pipeSpecs); 2037 if(pipe_info.lIndex == ovutils::OV_INVALID) { 2038 return false; 2039 } 2040 2041 /* Use 2 pipes IF 2042 a) Layer's crop width is > 2048 or 2043 b) Layer's dest width > 2048 or 2044 c) On primary, driver has indicated with caps to split always. This is 2045 based on an empirically derived value of panel height. Applied only 2046 if the layer's width is > mixer's width 2047 */ 2048 2049 bool primarySplitAlways = (mDpy == HWC_DISPLAY_PRIMARY) and 2050 qdutils::MDPVersion::getInstance().isSrcSplitAlways(); 2051 int lSplit = getLeftSplit(ctx, mDpy); 2052 int dstWidth = dst.right - dst.left; 2053 int cropWidth = has90Transform(layer) ? crop.bottom - crop.top : 2054 crop.right - crop.left; 2055 2056 if(dstWidth > qdutils::MAX_DISPLAY_DIM or 2057 cropWidth > qdutils::MAX_DISPLAY_DIM or 2058 (primarySplitAlways and (cropWidth > lSplit))) { 2059 pipe_info.rIndex = ctx->mOverlay->getPipe(pipeSpecs); 2060 if(pipe_info.rIndex == ovutils::OV_INVALID) { 2061 return false; 2062 } 2063 2064 // Return values 2065 // 1 Left pipe is higher priority, do nothing. 2066 // 0 Pipes of same priority. 2067 //-1 Right pipe is of higher priority, needs swap. 2068 if(ctx->mOverlay->comparePipePriority(pipe_info.lIndex, 2069 pipe_info.rIndex) == -1) { 2070 qhwc::swap(pipe_info.lIndex, pipe_info.rIndex); 2071 } 2072 } 2073 2074 return true; 2075} 2076 2077int MDPCompSrcSplit::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 2078 PipeLayerPair& PipeLayerPair) { 2079 private_handle_t *hnd = (private_handle_t *)layer->handle; 2080 if(!hnd) { 2081 ALOGE("%s: layer handle is NULL", __FUNCTION__); 2082 return -1; 2083 } 2084 MetaData_t *metadata = (MetaData_t *)hnd->base_metadata; 2085 MdpPipeInfoSplit& mdp_info = 2086 *(static_cast<MdpPipeInfoSplit*>(PipeLayerPair.pipeInfo)); 2087 Rotator **rot = &PipeLayerPair.rot; 2088 eZorder z = static_cast<eZorder>(mdp_info.zOrder); 2089 eIsFg isFg = IS_FG_OFF; 2090 eDest lDest = mdp_info.lIndex; 2091 eDest rDest = mdp_info.rIndex; 2092 hwc_rect_t crop = integerizeSourceCrop(layer->sourceCropf); 2093 hwc_rect_t dst = layer->displayFrame; 2094 int transform = layer->transform; 2095 eTransform orient = static_cast<eTransform>(transform); 2096 const int downscale = 0; 2097 int rotFlags = ROT_FLAGS_NONE; 2098 uint32_t format = ovutils::getMdpFormat(hnd->format, isTileRendered(hnd)); 2099 Whf whf(getWidth(hnd), getHeight(hnd), format, hnd->size); 2100 2101 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 2102 "dest_pipeR: %d",__FUNCTION__, layer, z, lDest, rDest); 2103 2104 // Handle R/B swap 2105 if (layer->flags & HWC_FORMAT_RB_SWAP) { 2106 if (hnd->format == HAL_PIXEL_FORMAT_RGBA_8888) 2107 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRA_8888); 2108 else if (hnd->format == HAL_PIXEL_FORMAT_RGBX_8888) 2109 whf.format = getMdpFormat(HAL_PIXEL_FORMAT_BGRX_8888); 2110 } 2111 2112 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 2113 setMdpFlags(layer, mdpFlags, 0, transform); 2114 2115 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2116 //Enable overfetch 2117 setMdpFlags(mdpFlags, OV_MDSS_MDP_DUAL_PIPE); 2118 } 2119 2120 if(isYuvBuffer(hnd) && (transform & HWC_TRANSFORM_ROT_90)) { 2121 (*rot) = ctx->mRotMgr->getNext(); 2122 if((*rot) == NULL) return -1; 2123 ctx->mLayerRotMap[mDpy]->add(layer, *rot); 2124 //If the video is using a single pipe, enable BWC 2125 if(rDest == OV_INVALID) { 2126 BwcPM::setBwc(crop, dst, transform, mdpFlags); 2127 } 2128 //Configure rotator for pre-rotation 2129 if(configRotator(*rot, whf, crop, mdpFlags, orient, downscale) < 0) { 2130 ALOGE("%s: configRotator failed!", __FUNCTION__); 2131 return -1; 2132 } 2133 whf.format = (*rot)->getDstFormat(); 2134 updateSource(orient, whf, crop); 2135 rotFlags |= ROT_PREROTATED; 2136 } 2137 2138 //If 2 pipes being used, divide layer into half, crop and dst 2139 hwc_rect_t cropL = crop; 2140 hwc_rect_t cropR = crop; 2141 hwc_rect_t dstL = dst; 2142 hwc_rect_t dstR = dst; 2143 if(lDest != OV_INVALID && rDest != OV_INVALID) { 2144 cropL.right = (crop.right + crop.left) / 2; 2145 cropR.left = cropL.right; 2146 sanitizeSourceCrop(cropL, cropR, hnd); 2147 2148 //Swap crops on H flip since 2 pipes are being used 2149 if((orient & OVERLAY_TRANSFORM_FLIP_H) && (*rot) == NULL) { 2150 hwc_rect_t tmp = cropL; 2151 cropL = cropR; 2152 cropR = tmp; 2153 } 2154 2155 dstL.right = (dst.right + dst.left) / 2; 2156 dstR.left = dstL.right; 2157 } 2158 2159 //For the mdp, since either we are pre-rotating or MDP does flips 2160 orient = OVERLAY_TRANSFORM_0; 2161 transform = 0; 2162 2163 //configure left pipe 2164 if(lDest != OV_INVALID) { 2165 PipeArgs pargL(mdpFlags, whf, z, isFg, 2166 static_cast<eRotFlags>(rotFlags), layer->planeAlpha, 2167 (ovutils::eBlending) getBlending(layer->blending)); 2168 2169 if(configMdp(ctx->mOverlay, pargL, orient, 2170 cropL, dstL, metadata, lDest) < 0) { 2171 ALOGE("%s: commit failed for left mixer config", __FUNCTION__); 2172 return -1; 2173 } 2174 } 2175 2176 //configure right pipe 2177 if(rDest != OV_INVALID) { 2178 PipeArgs pargR(mdpFlags, whf, z, isFg, 2179 static_cast<eRotFlags>(rotFlags), 2180 layer->planeAlpha, 2181 (ovutils::eBlending) getBlending(layer->blending)); 2182 if(configMdp(ctx->mOverlay, pargR, orient, 2183 cropR, dstR, metadata, rDest) < 0) { 2184 ALOGE("%s: commit failed for right mixer config", __FUNCTION__); 2185 return -1; 2186 } 2187 } 2188 2189 return 0; 2190} 2191 2192}; //namespace 2193 2194