hwc_mdpcomp.cpp revision 6df35c53ea402da873bcc837184b132cb45d97d6
1/* 2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19#include <math.h> 20#include "hwc_mdpcomp.h" 21#include <sys/ioctl.h> 22#include "external.h" 23#include "qdMetaData.h" 24#include "mdp_version.h" 25#include "hwc_fbupdate.h" 26#include "hwc_ad.h" 27#include <overlayRotator.h> 28 29using namespace overlay; 30using namespace qdutils; 31using namespace overlay::utils; 32namespace ovutils = overlay::utils; 33 34namespace qhwc { 35 36//==============MDPComp======================================================== 37 38IdleInvalidator *MDPComp::idleInvalidator = NULL; 39bool MDPComp::sIdleFallBack = false; 40bool MDPComp::sDebugLogs = false; 41bool MDPComp::sEnabled = false; 42bool MDPComp::sEnableMixedMode = true; 43int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 44 45MDPComp* MDPComp::getObject(const int& width, const int& rightSplit, 46 const int& dpy) { 47 if(width > MAX_DISPLAY_DIM || rightSplit) { 48 return new MDPCompHighRes(dpy); 49 } 50 return new MDPCompLowRes(dpy); 51} 52 53MDPComp::MDPComp(int dpy):mDpy(dpy){}; 54 55void MDPComp::dump(android::String8& buf) 56{ 57 Locker::Autolock _l(mMdpCompLock); 58 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 59 mDpy ? "\"EXTERNAL\"" : "\"PRIMARY\""); 60 dumpsys_log(buf,"PREV_FRAME: layerCount:%2d mdpCount:%2d \ 61 cacheCount:%2d \n", mCachedFrame.layerCount, 62 mCachedFrame.mdpCount, mCachedFrame.cacheCount); 63 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d \ 64 fbCount:%2d \n", mCurrentFrame.layerCount, 65 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 66 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 67 (mCurrentFrame.needsRedraw? "YES" : "NO"), 68 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 69 dumpsys_log(buf," --------------------------------------------- \n"); 70 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 71 dumpsys_log(buf," --------------------------------------------- \n"); 72 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 73 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 74 index, 75 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 76 mCurrentFrame.layerToMDP[index], 77 (mCurrentFrame.isFBComposed[index] ? 78 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE") : "MDP"), 79 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 80 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 81 dumpsys_log(buf,"\n"); 82} 83 84bool MDPComp::init(hwc_context_t *ctx) { 85 86 if(!ctx) { 87 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 88 return false; 89 } 90 91 char property[PROPERTY_VALUE_MAX]; 92 93 sEnabled = false; 94 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 95 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 96 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 97 sEnabled = true; 98 } 99 100 sEnableMixedMode = true; 101 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 102 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 103 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 104 sEnableMixedMode = false; 105 } 106 107 sDebugLogs = false; 108 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) { 109 if(atoi(property) != 0) 110 sDebugLogs = true; 111 } 112 113 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 114 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 115 int val = atoi(property); 116 if(val >= 0) 117 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 118 } 119 120 if(ctx->mMDP.panel != MIPI_CMD_PANEL) { 121 // Idle invalidation is not necessary on command mode panels 122 long idle_timeout = DEFAULT_IDLE_TIME; 123 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 124 if(atoi(property) != 0) 125 idle_timeout = atoi(property); 126 } 127 128 //create Idle Invalidator only when not disabled through property 129 if(idle_timeout != -1) 130 idleInvalidator = IdleInvalidator::getInstance(); 131 132 if(idleInvalidator == NULL) { 133 ALOGE("%s: failed to instantiate idleInvalidator object", 134 __FUNCTION__); 135 } else { 136 idleInvalidator->init(timeout_handler, ctx, idle_timeout); 137 } 138 } 139 return true; 140} 141 142void MDPComp::reset(const int& numLayers, hwc_display_contents_1_t* list) { 143 mCurrentFrame.reset(numLayers); 144 mCachedFrame.cacheAll(list); 145 mCachedFrame.updateCounts(mCurrentFrame); 146} 147 148void MDPComp::timeout_handler(void *udata) { 149 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 150 151 if(!ctx) { 152 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 153 return; 154 } 155 156 if(!ctx->proc) { 157 ALOGE("%s: HWC proc not registered", __FUNCTION__); 158 return; 159 } 160 sIdleFallBack = true; 161 /* Trigger SF to redraw the current frame */ 162 ctx->proc->invalidate(ctx->proc); 163} 164 165void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 166 hwc_display_contents_1_t* list) { 167 LayerProp *layerProp = ctx->layerProp[mDpy]; 168 169 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 170 hwc_layer_1_t* layer = &(list->hwLayers[index]); 171 if(!mCurrentFrame.isFBComposed[index]) { 172 layerProp[index].mFlags |= HWC_MDPCOMP; 173 layer->compositionType = HWC_OVERLAY; 174 layer->hints |= HWC_HINT_CLEAR_FB; 175 mCachedFrame.hnd[index] = NULL; 176 } else { 177 if(!mCurrentFrame.needsRedraw) 178 layer->compositionType = HWC_OVERLAY; 179 } 180 } 181} 182 183MDPComp::FrameInfo::FrameInfo() { 184 reset(0); 185} 186 187void MDPComp::FrameInfo::reset(const int& numLayers) { 188 for(int i = 0 ; i < MAX_PIPES_PER_MIXER && numLayers; i++ ) { 189 if(mdpToLayer[i].pipeInfo) { 190 delete mdpToLayer[i].pipeInfo; 191 mdpToLayer[i].pipeInfo = NULL; 192 //We dont own the rotator 193 mdpToLayer[i].rot = NULL; 194 } 195 } 196 197 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 198 memset(&layerToMDP, -1, sizeof(layerToMDP)); 199 memset(&isFBComposed, 1, sizeof(isFBComposed)); 200 201 layerCount = numLayers; 202 fbCount = numLayers; 203 mdpCount = 0; 204 needsRedraw = true; 205 fbZ = 0; 206} 207 208void MDPComp::FrameInfo::map() { 209 // populate layer and MDP maps 210 int mdpIdx = 0; 211 for(int idx = 0; idx < layerCount; idx++) { 212 if(!isFBComposed[idx]) { 213 mdpToLayer[mdpIdx].listIndex = idx; 214 layerToMDP[idx] = mdpIdx++; 215 } 216 } 217} 218 219MDPComp::LayerCache::LayerCache() { 220 reset(); 221} 222 223void MDPComp::LayerCache::reset() { 224 memset(&hnd, 0, sizeof(hnd)); 225 mdpCount = 0; 226 cacheCount = 0; 227 layerCount = 0; 228 fbZ = -1; 229} 230 231void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 232 const int numAppLayers = list->numHwLayers - 1; 233 for(int i = 0; i < numAppLayers; i++) { 234 hnd[i] = list->hwLayers[i].handle; 235 } 236} 237 238void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 239 mdpCount = curFrame.mdpCount; 240 cacheCount = curFrame.fbCount; 241 layerCount = curFrame.layerCount; 242 fbZ = curFrame.fbZ; 243} 244 245bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 246 const int dpy = HWC_DISPLAY_PRIMARY; 247 private_handle_t *hnd = (private_handle_t *)layer->handle; 248 249 if(!hnd) { 250 ALOGE("%s: layer handle is NULL", __FUNCTION__); 251 return false; 252 } 253 254 int hw_w = ctx->dpyAttr[mDpy].xres; 255 int hw_h = ctx->dpyAttr[mDpy].yres; 256 257 hwc_rect_t crop = layer->sourceCrop; 258 hwc_rect_t dst = layer->displayFrame; 259 260 if(dst.left < 0 || dst.top < 0 || dst.right > hw_w || dst.bottom > hw_h) { 261 hwc_rect_t scissor = {0, 0, hw_w, hw_h }; 262 qhwc::calculate_crop_rects(crop, dst, scissor, layer->transform); 263 } 264 265 int crop_w = crop.right - crop.left; 266 int crop_h = crop.bottom - crop.top; 267 int dst_w = dst.right - dst.left; 268 int dst_h = dst.bottom - dst.top; 269 float w_dscale = ceilf((float)crop_w / (float)dst_w); 270 float h_dscale = ceilf((float)crop_h / (float)dst_h); 271 272 //Workaround for MDP HW limitation in DSI command mode panels where 273 //FPS will not go beyond 30 if buffers on RGB pipes are of width < 5 274 275 if((crop_w < 5)||(crop_h < 5)) 276 return false; 277 278 const uint32_t downscale = 279 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 280 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 281 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 282 if(crop_w > MAX_DISPLAY_DIM || w_dscale > downscale || 283 h_dscale > downscale) 284 return false; 285 } else if(w_dscale > 64 || h_dscale > 64) { 286 return false; 287 } 288 } else { //A-family 289 if(w_dscale > downscale || h_dscale > downscale) 290 return false; 291 } 292 293 return true; 294} 295 296ovutils::eDest MDPComp::getMdpPipe(hwc_context_t *ctx, ePipeType type, 297 int mixer) { 298 overlay::Overlay& ov = *ctx->mOverlay; 299 ovutils::eDest mdp_pipe = ovutils::OV_INVALID; 300 301 switch(type) { 302 case MDPCOMP_OV_DMA: 303 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_DMA, mDpy, mixer); 304 if(mdp_pipe != ovutils::OV_INVALID) { 305 return mdp_pipe; 306 } 307 case MDPCOMP_OV_ANY: 308 case MDPCOMP_OV_RGB: 309 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy, mixer); 310 if(mdp_pipe != ovutils::OV_INVALID) { 311 return mdp_pipe; 312 } 313 314 if(type == MDPCOMP_OV_RGB) { 315 //Requested only for RGB pipe 316 break; 317 } 318 case MDPCOMP_OV_VG: 319 return ov.nextPipe(ovutils::OV_MDP_PIPE_VG, mDpy, mixer); 320 default: 321 ALOGE("%s: Invalid pipe type",__FUNCTION__); 322 return ovutils::OV_INVALID; 323 }; 324 return ovutils::OV_INVALID; 325} 326 327bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 328 bool ret = true; 329 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 330 331 if(!isEnabled()) { 332 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 333 ret = false; 334 } else if(qdutils::MDPVersion::getInstance().is8x26() && 335 ctx->mVideoTransFlag && 336 ctx->mExtDisplay->isExternalConnected()) { 337 //1 Padding round to shift pipes across mixers 338 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 339 __FUNCTION__); 340 ret = false; 341 } else if(isSecondaryConfiguring(ctx)) { 342 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 343 __FUNCTION__); 344 ret = false; 345 } else if(ctx->isPaddingRound) { 346 ctx->isPaddingRound = false; 347 ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__); 348 ret = false; 349 } 350 return ret; 351} 352 353/* Checks for conditions where all the layers marked for MDP comp cannot be 354 * bypassed. On such conditions we try to bypass atleast YUV layers */ 355bool MDPComp::isFullFrameDoable(hwc_context_t *ctx, 356 hwc_display_contents_1_t* list){ 357 358 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 359 360 if(sIdleFallBack) { 361 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 362 return false; 363 } 364 365 if(mDpy > HWC_DISPLAY_PRIMARY){ 366 ALOGD_IF(isDebug(), "%s: Cannot support External display(s)", 367 __FUNCTION__); 368 return false; 369 } 370 371 if(isSkipPresent(ctx, mDpy)) { 372 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 373 __FUNCTION__, 374 isSkipPresent(ctx, mDpy)); 375 return false; 376 } 377 378 if(ctx->listStats[mDpy].needsAlphaScale 379 && ctx->mMDP.version < qdutils::MDSS_V5) { 380 ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__); 381 return false; 382 } 383 384 //MDP composition is not efficient if layer needs rotator. 385 for(int i = 0; i < numAppLayers; ++i) { 386 // As MDP h/w supports flip operation, use MDP comp only for 387 // 180 transforms. Fail for any transform involving 90 (90, 270). 388 hwc_layer_1_t* layer = &list->hwLayers[i]; 389 private_handle_t *hnd = (private_handle_t *)layer->handle; 390 if(isYuvBuffer(hnd) ) { 391 if(isSecuring(ctx, layer)) { 392 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 393 return false; 394 } 395 } else if(layer->transform & HWC_TRANSFORM_ROT_90) { 396 ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__); 397 return false; 398 } 399 400 if(!isValidDimension(ctx,layer)) { 401 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 402 __FUNCTION__); 403 return false; 404 } 405 406 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 407 // may not need it if Gfx pre-rotation can handle all flips & rotations 408 if(qdutils::MDPVersion::getInstance().is8x26() && 409 (ctx->dpyAttr[mDpy].xres > 1024) && 410 (layer->transform & HWC_TRANSFORM_FLIP_H) && 411 (!isYuvBuffer(hnd))) 412 return false; 413 } 414 415 if(ctx->mAD->isDoable()) { 416 return false; 417 } 418 419 //If all above hard conditions are met we can do full or partial MDP comp. 420 bool ret = false; 421 if(fullMDPComp(ctx, list)) { 422 ret = true; 423 } else if(partialMDPComp(ctx, list)) { 424 ret = true; 425 } 426 return ret; 427} 428 429bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 430 //Setup mCurrentFrame 431 mCurrentFrame.mdpCount = mCurrentFrame.layerCount; 432 mCurrentFrame.fbCount = 0; 433 mCurrentFrame.fbZ = -1; 434 memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed)); 435 436 int mdpCount = mCurrentFrame.mdpCount; 437 if(mdpCount > sMaxPipesPerMixer) { 438 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 439 return false; 440 } 441 442 if(!arePipesAvailable(ctx, list)) { 443 return false; 444 } 445 446 return true; 447} 448 449bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 450{ 451 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 452 453 if(!sEnableMixedMode) { 454 //Mixed mode is disabled. No need to even try caching. 455 return false; 456 } 457 458 //Setup mCurrentFrame 459 mCurrentFrame.reset(numAppLayers); 460 updateLayerCache(ctx, list); 461 updateYUV(ctx, list); 462 batchLayers(); //sets up fbZ also 463 464 int mdpCount = mCurrentFrame.mdpCount; 465 if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used 466 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 467 return false; 468 } 469 470 if(!arePipesAvailable(ctx, list)) { 471 return false; 472 } 473 474 return true; 475} 476 477bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx, 478 hwc_display_contents_1_t* list){ 479 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 480 mCurrentFrame.reset(numAppLayers); 481 updateYUV(ctx, list); 482 int mdpCount = mCurrentFrame.mdpCount; 483 int fbNeeded = int(mCurrentFrame.fbCount != 0); 484 485 if(!isYuvPresent(ctx, mDpy)) { 486 return false; 487 } 488 489 if(!mdpCount) 490 return false; 491 492 if(mdpCount > (sMaxPipesPerMixer - fbNeeded)) { 493 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 494 return false; 495 } 496 497 if(!arePipesAvailable(ctx, list)) { 498 return false; 499 } 500 501 int nYuvCount = ctx->listStats[mDpy].yuvCount; 502 for(int index = 0; index < nYuvCount ; index ++) { 503 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 504 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 505 if(layer->planeAlpha < 0xFF) { 506 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 507 in video only mode", 508 __FUNCTION__); 509 return false; 510 } 511 } 512 513 return true; 514} 515 516/* Checks for conditions where YUV layers cannot be bypassed */ 517bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 518 if(isSkipLayer(layer)) { 519 ALOGE("%s: Unable to bypass skipped YUV", __FUNCTION__); 520 return false; 521 } 522 523 if(isSecuring(ctx, layer)) { 524 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 525 return false; 526 } 527 528 if(!isValidDimension(ctx, layer)) { 529 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 530 __FUNCTION__); 531 return false; 532 } 533 534 return true; 535} 536 537void MDPComp::batchLayers() { 538 /* Idea is to keep as many contiguous non-updating(cached) layers in FB and 539 * send rest of them through MDP. NEVER mark an updating layer for caching. 540 * But cached ones can be marked for MDP*/ 541 542 int maxBatchStart = -1; 543 int maxBatchCount = 0; 544 545 /* All or Nothing is cached. No batching needed */ 546 if(!mCurrentFrame.fbCount) { 547 mCurrentFrame.fbZ = -1; 548 return; 549 } 550 if(!mCurrentFrame.mdpCount) { 551 mCurrentFrame.fbZ = 0; 552 return; 553 } 554 555 /* Search for max number of contiguous (cached) layers */ 556 int i = 0; 557 while (i < mCurrentFrame.layerCount) { 558 int count = 0; 559 while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) { 560 count++; i++; 561 } 562 if(count > maxBatchCount) { 563 maxBatchCount = count; 564 maxBatchStart = i - count; 565 mCurrentFrame.fbZ = maxBatchStart; 566 } 567 if(i < mCurrentFrame.layerCount) i++; 568 } 569 570 /* reset rest of the layers for MDP comp */ 571 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 572 if(i != maxBatchStart){ 573 mCurrentFrame.isFBComposed[i] = false; 574 } else { 575 i += maxBatchCount; 576 } 577 } 578 579 mCurrentFrame.fbCount = maxBatchCount; 580 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 581 mCurrentFrame.fbCount; 582 583 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 584 mCurrentFrame.fbCount); 585} 586 587void MDPComp::updateLayerCache(hwc_context_t* ctx, 588 hwc_display_contents_1_t* list) { 589 590 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 591 int numCacheableLayers = 0; 592 593 for(int i = 0; i < numAppLayers; i++) { 594 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 595 numCacheableLayers++; 596 mCurrentFrame.isFBComposed[i] = true; 597 } else { 598 mCurrentFrame.isFBComposed[i] = false; 599 mCachedFrame.hnd[i] = list->hwLayers[i].handle; 600 } 601 } 602 603 mCurrentFrame.fbCount = numCacheableLayers; 604 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 605 mCurrentFrame.fbCount; 606 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers); 607} 608 609void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list) { 610 611 int nYuvCount = ctx->listStats[mDpy].yuvCount; 612 for(int index = 0;index < nYuvCount; index++){ 613 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 614 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 615 616 if(!isYUVDoable(ctx, layer)) { 617 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 618 mCurrentFrame.isFBComposed[nYuvIndex] = true; 619 mCurrentFrame.fbCount++; 620 } 621 } else { 622 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 623 mCurrentFrame.isFBComposed[nYuvIndex] = false; 624 mCurrentFrame.fbCount--; 625 } 626 } 627 } 628 629 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 630 mCurrentFrame.fbCount; 631 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 632 mCurrentFrame.fbCount); 633} 634 635bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 636 if(!allocLayerPipes(ctx, list)) { 637 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 638 return false; 639 } 640 641 bool fbBatch = false; 642 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 643 index++) { 644 if(!mCurrentFrame.isFBComposed[index]) { 645 int mdpIndex = mCurrentFrame.layerToMDP[index]; 646 hwc_layer_1_t* layer = &list->hwLayers[index]; 647 648 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 649 cur_pipe->zOrder = mdpNextZOrder++; 650 651 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 652 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 653 layer %d",__FUNCTION__, index); 654 return false; 655 } 656 } else if(fbBatch == false) { 657 mdpNextZOrder++; 658 fbBatch = true; 659 } 660 } 661 662 return true; 663} 664 665bool MDPComp::programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 666 if(!allocLayerPipes(ctx, list)) { 667 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 668 return false; 669 } 670 //If we are in this block, it means we have yuv + rgb layers both 671 int mdpIdx = 0; 672 for (int index = 0; index < mCurrentFrame.layerCount; index++) { 673 if(!mCurrentFrame.isFBComposed[index]) { 674 hwc_layer_1_t* layer = &list->hwLayers[index]; 675 int mdpIndex = mCurrentFrame.layerToMDP[index]; 676 MdpPipeInfo* cur_pipe = 677 mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 678 cur_pipe->zOrder = mdpIdx++; 679 680 if(configure(ctx, layer, 681 mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 682 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 683 layer %d",__FUNCTION__, index); 684 return false; 685 } 686 } 687 } 688 return true; 689} 690 691int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 692 const int numLayers = ctx->listStats[mDpy].numAppLayers; 693 694 { //LOCK SCOPE BEGIN 695 Locker::Autolock _l(mMdpCompLock); 696 697 //reset old data 698 mCurrentFrame.reset(numLayers); 699 700 //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU 701 //do not cache the information for next draw cycle. 702 if(numLayers > MAX_NUM_APP_LAYERS) { 703 mCachedFrame.updateCounts(mCurrentFrame); 704 ALOGD_IF(isDebug(), "%s: Number of App layers exceeded the limit ", 705 __FUNCTION__); 706 return -1; 707 } 708 709 //Hard conditions, if not met, cannot do MDP comp 710 if(!isFrameDoable(ctx)) { 711 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 712 __FUNCTION__); 713 reset(numLayers, list); 714 return -1; 715 } 716 717 //Check whether layers marked for MDP Composition is actually doable. 718 if(isFullFrameDoable(ctx, list)){ 719 mCurrentFrame.map(); 720 //Configure framebuffer first if applicable 721 if(mCurrentFrame.fbZ >= 0) { 722 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, 723 mCurrentFrame.fbZ)) { 724 ALOGE("%s configure framebuffer failed", __func__); 725 reset(numLayers, list); 726 return -1; 727 } 728 } 729 //Acquire and Program MDP pipes 730 if(!programMDP(ctx, list)) { 731 reset(numLayers, list); 732 return -1; 733 } else { //Success 734 //Any change in composition types needs an FB refresh 735 mCurrentFrame.needsRedraw = false; 736 if(mCurrentFrame.fbCount && 737 ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) || 738 (mCurrentFrame.fbCount != mCachedFrame.cacheCount) || 739 (mCurrentFrame.fbZ != mCachedFrame.fbZ) || 740 (!mCurrentFrame.mdpCount) || 741 (list->flags & HWC_GEOMETRY_CHANGED) || 742 isSkipPresent(ctx, mDpy) || 743 (mDpy > HWC_DISPLAY_PRIMARY))) { 744 mCurrentFrame.needsRedraw = true; 745 } 746 } 747 } else if(isOnlyVideoDoable(ctx, list)) { 748 //All layers marked for MDP comp cannot be bypassed. 749 //Try to compose atleast YUV layers through MDP comp and let 750 //all the RGB layers compose in FB 751 //Destination over 752 mCurrentFrame.fbZ = -1; 753 if(mCurrentFrame.fbCount) 754 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 755 756 mCurrentFrame.map(); 757 758 //Configure framebuffer first if applicable 759 if(mCurrentFrame.fbZ >= 0) { 760 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, mCurrentFrame.fbZ)) { 761 ALOGE("%s configure framebuffer failed", __func__); 762 reset(numLayers, list); 763 return -1; 764 } 765 } 766 if(!programYUV(ctx, list)) { 767 reset(numLayers, list); 768 return -1; 769 } 770 } else { 771 reset(numLayers, list); 772 return -1; 773 } 774 775 //UpdateLayerFlags 776 setMDPCompLayerFlags(ctx, list); 777 mCachedFrame.updateCounts(mCurrentFrame); 778 779 } //LOCK SCOPE END. dump also need this lock. 780 // unlock it before calling dump function to avoid deadlock 781 if(isDebug()) { 782 ALOGD("GEOMETRY change: %d", (list->flags & HWC_GEOMETRY_CHANGED)); 783 android::String8 sDump(""); 784 dump(sDump); 785 ALOGE("%s",sDump.string()); 786 } 787 788 return 0; 789} 790 791//=============MDPCompLowRes=================================================== 792 793/* 794 * Configures pipe(s) for MDP composition 795 */ 796int MDPCompLowRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 797 PipeLayerPair& PipeLayerPair) { 798 MdpPipeInfoLowRes& mdp_info = 799 *(static_cast<MdpPipeInfoLowRes*>(PipeLayerPair.pipeInfo)); 800 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 801 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 802 eIsFg isFg = IS_FG_OFF; 803 eDest dest = mdp_info.index; 804 805 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 806 __FUNCTION__, layer, zOrder, dest); 807 808 return configureLowRes(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 809 &PipeLayerPair.rot); 810} 811 812bool MDPCompLowRes::arePipesAvailable(hwc_context_t *ctx, 813 hwc_display_contents_1_t* list) { 814 overlay::Overlay& ov = *ctx->mOverlay; 815 int numPipesNeeded = mCurrentFrame.mdpCount; 816 int availPipes = ov.availablePipes(mDpy, Overlay::MIXER_DEFAULT); 817 818 //Reserve pipe for FB 819 if(mCurrentFrame.fbCount) 820 availPipes -= 1; 821 822 if(numPipesNeeded > availPipes) { 823 ALOGD_IF(isDebug(), "%s: Insufficient pipes, dpy %d needed %d, avail %d", 824 __FUNCTION__, mDpy, numPipesNeeded, availPipes); 825 return false; 826 } 827 828 return true; 829} 830 831bool MDPCompLowRes::allocLayerPipes(hwc_context_t *ctx, 832 hwc_display_contents_1_t* list) { 833 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 834 835 if(mCurrentFrame.isFBComposed[index]) continue; 836 837 hwc_layer_1_t* layer = &list->hwLayers[index]; 838 private_handle_t *hnd = (private_handle_t *)layer->handle; 839 int mdpIndex = mCurrentFrame.layerToMDP[index]; 840 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 841 info.pipeInfo = new MdpPipeInfoLowRes; 842 info.rot = NULL; 843 MdpPipeInfoLowRes& pipe_info = *(MdpPipeInfoLowRes*)info.pipeInfo; 844 ePipeType type = MDPCOMP_OV_ANY; 845 846 if(isYuvBuffer(hnd)) { 847 type = MDPCOMP_OV_VG; 848 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 849 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 850 && ctx->mMDP.version >= qdutils::MDSS_V5) { 851 type = MDPCOMP_OV_DMA; 852 } 853 854 pipe_info.index = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT); 855 if(pipe_info.index == ovutils::OV_INVALID) { 856 ALOGD_IF(isDebug(), "%s: Unable to get pipe type = %d", 857 __FUNCTION__, (int) type); 858 return false; 859 } 860 } 861 return true; 862} 863 864bool MDPCompLowRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 865 866 if(!isEnabled()) { 867 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 868 return true; 869 } 870 871 if(!ctx || !list) { 872 ALOGE("%s: invalid contxt or list",__FUNCTION__); 873 return false; 874 } 875 876 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 877 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 878 return true; 879 } 880 881 Locker::Autolock _l(mMdpCompLock); 882 883 /* reset Invalidator */ 884 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 885 idleInvalidator->markForSleep(); 886 887 overlay::Overlay& ov = *ctx->mOverlay; 888 LayerProp *layerProp = ctx->layerProp[mDpy]; 889 890 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 891 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 892 { 893 if(mCurrentFrame.isFBComposed[i]) continue; 894 895 hwc_layer_1_t *layer = &list->hwLayers[i]; 896 private_handle_t *hnd = (private_handle_t *)layer->handle; 897 if(!hnd) { 898 ALOGE("%s handle null", __FUNCTION__); 899 return false; 900 } 901 902 int mdpIndex = mCurrentFrame.layerToMDP[i]; 903 904 MdpPipeInfoLowRes& pipe_info = 905 *(MdpPipeInfoLowRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 906 ovutils::eDest dest = pipe_info.index; 907 if(dest == ovutils::OV_INVALID) { 908 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 909 return false; 910 } 911 912 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 913 continue; 914 } 915 916 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 917 using pipe: %d", __FUNCTION__, layer, 918 hnd, dest ); 919 920 int fd = hnd->fd; 921 uint32_t offset = hnd->offset; 922 923 if(ctx->mAD->isModeOn()) { 924 if(ctx->mAD->draw(ctx, fd, offset)) { 925 fd = ctx->mAD->getDstFd(ctx); 926 offset = ctx->mAD->getDstOffset(ctx); 927 } 928 } 929 930 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 931 if(rot) { 932 if(!rot->queueBuffer(fd, offset)) 933 return false; 934 fd = rot->getDstMemId(); 935 offset = rot->getDstOffset(); 936 } 937 938 if (!ov.queueBuffer(fd, offset, dest)) { 939 ALOGE("%s: queueBuffer failed for display:%d ", __FUNCTION__, mDpy); 940 return false; 941 } 942 943 layerProp[i].mFlags &= ~HWC_MDPCOMP; 944 } 945 return true; 946} 947 948//=============MDPCompHighRes=================================================== 949 950int MDPCompHighRes::pipesNeeded(hwc_context_t *ctx, 951 hwc_display_contents_1_t* list, 952 int mixer) { 953 int pipesNeeded = 0; 954 const int xres = ctx->dpyAttr[mDpy].xres; 955 956 const int lSplit = getLeftSplit(ctx, mDpy); 957 958 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 959 if(!mCurrentFrame.isFBComposed[i]) { 960 hwc_layer_1_t* layer = &list->hwLayers[i]; 961 hwc_rect_t dst = layer->displayFrame; 962 if(mixer == Overlay::MIXER_LEFT && dst.left < lSplit) { 963 pipesNeeded++; 964 } else if(mixer == Overlay::MIXER_RIGHT && dst.right > lSplit) { 965 pipesNeeded++; 966 } 967 } 968 } 969 return pipesNeeded; 970} 971 972bool MDPCompHighRes::arePipesAvailable(hwc_context_t *ctx, 973 hwc_display_contents_1_t* list) { 974 overlay::Overlay& ov = *ctx->mOverlay; 975 976 for(int i = 0; i < Overlay::MIXER_MAX; i++) { 977 int numPipesNeeded = pipesNeeded(ctx, list, i); 978 int availPipes = ov.availablePipes(mDpy, i); 979 980 //Reserve pipe(s)for FB 981 if(mCurrentFrame.fbCount) 982 availPipes -= 1; 983 984 if(numPipesNeeded > availPipes) { 985 ALOGD_IF(isDebug(), "%s: Insufficient pipes for " 986 "dpy %d mixer %d needed %d, avail %d", 987 __FUNCTION__, mDpy, i, numPipesNeeded, availPipes); 988 return false; 989 } 990 } 991 return true; 992} 993 994bool MDPCompHighRes::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 995 MdpPipeInfoHighRes& pipe_info, 996 ePipeType type) { 997 const int xres = ctx->dpyAttr[mDpy].xres; 998 const int lSplit = getLeftSplit(ctx, mDpy); 999 1000 hwc_rect_t dst = layer->displayFrame; 1001 pipe_info.lIndex = ovutils::OV_INVALID; 1002 pipe_info.rIndex = ovutils::OV_INVALID; 1003 1004 if (dst.left < lSplit) { 1005 pipe_info.lIndex = getMdpPipe(ctx, type, Overlay::MIXER_LEFT); 1006 if(pipe_info.lIndex == ovutils::OV_INVALID) 1007 return false; 1008 } 1009 1010 if(dst.right > lSplit) { 1011 pipe_info.rIndex = getMdpPipe(ctx, type, Overlay::MIXER_RIGHT); 1012 if(pipe_info.rIndex == ovutils::OV_INVALID) 1013 return false; 1014 } 1015 1016 return true; 1017} 1018 1019bool MDPCompHighRes::allocLayerPipes(hwc_context_t *ctx, 1020 hwc_display_contents_1_t* list) { 1021 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 1022 1023 if(mCurrentFrame.isFBComposed[index]) continue; 1024 1025 hwc_layer_1_t* layer = &list->hwLayers[index]; 1026 private_handle_t *hnd = (private_handle_t *)layer->handle; 1027 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1028 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1029 info.pipeInfo = new MdpPipeInfoHighRes; 1030 info.rot = NULL; 1031 MdpPipeInfoHighRes& pipe_info = *(MdpPipeInfoHighRes*)info.pipeInfo; 1032 ePipeType type = MDPCOMP_OV_ANY; 1033 1034 if(isYuvBuffer(hnd)) { 1035 type = MDPCOMP_OV_VG; 1036 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 1037 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 1038 && ctx->mMDP.version >= qdutils::MDSS_V5) { 1039 type = MDPCOMP_OV_DMA; 1040 } 1041 1042 if(!acquireMDPPipes(ctx, layer, pipe_info, type)) { 1043 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type = %d", 1044 __FUNCTION__, (int) type); 1045 return false; 1046 } 1047 } 1048 return true; 1049} 1050 1051/* 1052 * Configures pipe(s) for MDP composition 1053 */ 1054int MDPCompHighRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1055 PipeLayerPair& PipeLayerPair) { 1056 MdpPipeInfoHighRes& mdp_info = 1057 *(static_cast<MdpPipeInfoHighRes*>(PipeLayerPair.pipeInfo)); 1058 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1059 eIsFg isFg = IS_FG_OFF; 1060 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1061 eDest lDest = mdp_info.lIndex; 1062 eDest rDest = mdp_info.rIndex; 1063 1064 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 1065 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 1066 1067 return configureHighRes(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 1068 rDest, &PipeLayerPair.rot); 1069} 1070 1071bool MDPCompHighRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1072 1073 if(!isEnabled()) { 1074 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1075 return true; 1076 } 1077 1078 if(!ctx || !list) { 1079 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1080 return false; 1081 } 1082 1083 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1084 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1085 return true; 1086 } 1087 1088 Locker::Autolock _l(mMdpCompLock); 1089 1090 /* reset Invalidator */ 1091 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 1092 idleInvalidator->markForSleep(); 1093 1094 overlay::Overlay& ov = *ctx->mOverlay; 1095 LayerProp *layerProp = ctx->layerProp[mDpy]; 1096 1097 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1098 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1099 { 1100 if(mCurrentFrame.isFBComposed[i]) continue; 1101 1102 hwc_layer_1_t *layer = &list->hwLayers[i]; 1103 private_handle_t *hnd = (private_handle_t *)layer->handle; 1104 if(!hnd) { 1105 ALOGE("%s handle null", __FUNCTION__); 1106 return false; 1107 } 1108 1109 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1110 continue; 1111 } 1112 1113 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1114 1115 MdpPipeInfoHighRes& pipe_info = 1116 *(MdpPipeInfoHighRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1117 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1118 1119 ovutils::eDest indexL = pipe_info.lIndex; 1120 ovutils::eDest indexR = pipe_info.rIndex; 1121 1122 int fd = hnd->fd; 1123 int offset = hnd->offset; 1124 1125 if(ctx->mAD->isModeOn()) { 1126 if(ctx->mAD->draw(ctx, fd, offset)) { 1127 fd = ctx->mAD->getDstFd(ctx); 1128 offset = ctx->mAD->getDstOffset(ctx); 1129 } 1130 } 1131 1132 if(rot) { 1133 rot->queueBuffer(fd, offset); 1134 fd = rot->getDstMemId(); 1135 offset = rot->getDstOffset(); 1136 } 1137 1138 //************* play left mixer ********** 1139 if(indexL != ovutils::OV_INVALID) { 1140 ovutils::eDest destL = (ovutils::eDest)indexL; 1141 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1142 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1143 if (!ov.queueBuffer(fd, offset, destL)) { 1144 ALOGE("%s: queueBuffer failed for left mixer", __FUNCTION__); 1145 return false; 1146 } 1147 } 1148 1149 //************* play right mixer ********** 1150 if(indexR != ovutils::OV_INVALID) { 1151 ovutils::eDest destR = (ovutils::eDest)indexR; 1152 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1153 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1154 if (!ov.queueBuffer(fd, offset, destR)) { 1155 ALOGE("%s: queueBuffer failed for right mixer", __FUNCTION__); 1156 return false; 1157 } 1158 } 1159 1160 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1161 } 1162 1163 return true; 1164} 1165}; //namespace 1166 1167