hwc_mdpcomp.cpp revision 0276ce984ec32143bc54428cefe0a8a1a8853e3f
1/* 2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19#include <math.h> 20#include "hwc_mdpcomp.h" 21#include <sys/ioctl.h> 22#include "external.h" 23#include "qdMetaData.h" 24#include "mdp_version.h" 25#include <overlayRotator.h> 26 27using namespace overlay; 28using namespace qdutils; 29using namespace overlay::utils; 30namespace ovutils = overlay::utils; 31 32namespace qhwc { 33 34//==============MDPComp======================================================== 35 36IdleInvalidator *MDPComp::idleInvalidator = NULL; 37bool MDPComp::sIdleFallBack = false; 38bool MDPComp::sDebugLogs = false; 39bool MDPComp::sEnabled = false; 40bool MDPComp::sEnableMixedMode = true; 41int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 42 43MDPComp* MDPComp::getObject(const int& width, int dpy) { 44 if(width <= MAX_DISPLAY_DIM) { 45 return new MDPCompLowRes(dpy); 46 } else { 47 return new MDPCompHighRes(dpy); 48 } 49} 50 51MDPComp::MDPComp(int dpy):mDpy(dpy){}; 52 53void MDPComp::dump(android::String8& buf) 54{ 55 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 56 mDpy ? "\"EXTERNAL\"" : "\"PRIMARY\""); 57 dumpsys_log(buf,"PREV_FRAME: layerCount:%2d mdpCount:%2d \ 58 cacheCount:%2d \n", mCachedFrame.layerCount, 59 mCachedFrame.mdpCount, mCachedFrame.cacheCount); 60 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d \ 61 fbCount:%2d \n", mCurrentFrame.layerCount, 62 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 63 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 64 (mCurrentFrame.needsRedraw? "YES" : "NO"), 65 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 66 dumpsys_log(buf," --------------------------------------------- \n"); 67 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 68 dumpsys_log(buf," --------------------------------------------- \n"); 69 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 70 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 71 index, 72 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 73 mCurrentFrame.layerToMDP[index], 74 (mCurrentFrame.isFBComposed[index] ? 75 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE") : "MDP"), 76 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 77 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 78 dumpsys_log(buf,"\n"); 79} 80 81bool MDPComp::init(hwc_context_t *ctx) { 82 83 if(!ctx) { 84 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 85 return false; 86 } 87 88 char property[PROPERTY_VALUE_MAX]; 89 90 sEnabled = false; 91 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 92 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 93 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 94 sEnabled = true; 95 if(!setupBasePipe(ctx)) { 96 ALOGE("%s: Failed to setup primary base pipe", __FUNCTION__); 97 return false; 98 } 99 } 100 101 sEnableMixedMode = true; 102 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 103 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 104 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 105 sEnableMixedMode = false; 106 } 107 108 sDebugLogs = false; 109 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) { 110 if(atoi(property) != 0) 111 sDebugLogs = true; 112 } 113 114 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 115 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 116 int val = atoi(property); 117 if(val >= 0) 118 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 119 } 120 121 unsigned long idle_timeout = DEFAULT_IDLE_TIME; 122 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 123 if(atoi(property) != 0) 124 idle_timeout = atoi(property); 125 } 126 127 //create Idle Invalidator 128 idleInvalidator = IdleInvalidator::getInstance(); 129 130 if(idleInvalidator == NULL) { 131 ALOGE("%s: failed to instantiate idleInvalidator object", __FUNCTION__); 132 } else { 133 idleInvalidator->init(timeout_handler, ctx, idle_timeout); 134 } 135 return true; 136} 137 138void MDPComp::timeout_handler(void *udata) { 139 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 140 141 if(!ctx) { 142 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 143 return; 144 } 145 146 if(!ctx->proc) { 147 ALOGE("%s: HWC proc not registered", __FUNCTION__); 148 return; 149 } 150 sIdleFallBack = true; 151 /* Trigger SF to redraw the current frame */ 152 ctx->proc->invalidate(ctx->proc); 153} 154 155void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 156 hwc_display_contents_1_t* list) { 157 LayerProp *layerProp = ctx->layerProp[mDpy]; 158 159 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 160 hwc_layer_1_t* layer = &(list->hwLayers[index]); 161 if(!mCurrentFrame.isFBComposed[index]) { 162 layerProp[index].mFlags |= HWC_MDPCOMP; 163 layer->compositionType = HWC_OVERLAY; 164 layer->hints |= HWC_HINT_CLEAR_FB; 165 mCachedFrame.hnd[index] = NULL; 166 } else { 167 if(!mCurrentFrame.needsRedraw) 168 layer->compositionType = HWC_OVERLAY; 169 } 170 } 171} 172 173/* 174 * Sets up BORDERFILL as default base pipe and detaches RGB0. 175 * Framebuffer is always updated using PLAY ioctl. 176 */ 177bool MDPComp::setupBasePipe(hwc_context_t *ctx) { 178 const int dpy = HWC_DISPLAY_PRIMARY; 179 int fb_stride = ctx->dpyAttr[dpy].stride; 180 int fb_width = ctx->dpyAttr[dpy].xres; 181 int fb_height = ctx->dpyAttr[dpy].yres; 182 int fb_fd = ctx->dpyAttr[dpy].fd; 183 184 mdp_overlay ovInfo; 185 msmfb_overlay_data ovData; 186 memset(&ovInfo, 0, sizeof(mdp_overlay)); 187 memset(&ovData, 0, sizeof(msmfb_overlay_data)); 188 189 ovInfo.src.format = MDP_RGB_BORDERFILL; 190 ovInfo.src.width = fb_width; 191 ovInfo.src.height = fb_height; 192 ovInfo.src_rect.w = fb_width; 193 ovInfo.src_rect.h = fb_height; 194 ovInfo.dst_rect.w = fb_width; 195 ovInfo.dst_rect.h = fb_height; 196 ovInfo.id = MSMFB_NEW_REQUEST; 197 198 if (ioctl(fb_fd, MSMFB_OVERLAY_SET, &ovInfo) < 0) { 199 ALOGE("Failed to call ioctl MSMFB_OVERLAY_SET err=%s", 200 strerror(errno)); 201 return false; 202 } 203 204 ovData.id = ovInfo.id; 205 if (ioctl(fb_fd, MSMFB_OVERLAY_PLAY, &ovData) < 0) { 206 ALOGE("Failed to call ioctl MSMFB_OVERLAY_PLAY err=%s", 207 strerror(errno)); 208 return false; 209 } 210 return true; 211} 212 213MDPComp::FrameInfo::FrameInfo() { 214 reset(0); 215} 216 217void MDPComp::FrameInfo::reset(const int& numLayers) { 218 for(int i = 0 ; i < MAX_PIPES_PER_MIXER && numLayers; i++ ) { 219 if(mdpToLayer[i].pipeInfo) { 220 delete mdpToLayer[i].pipeInfo; 221 mdpToLayer[i].pipeInfo = NULL; 222 //We dont own the rotator 223 mdpToLayer[i].rot = NULL; 224 } 225 } 226 227 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 228 memset(&layerToMDP, -1, sizeof(layerToMDP)); 229 memset(&isFBComposed, 1, sizeof(isFBComposed)); 230 231 layerCount = numLayers; 232 fbCount = numLayers; 233 mdpCount = 0; 234 needsRedraw = true; 235 fbZ = 0; 236} 237 238void MDPComp::FrameInfo::map() { 239 // populate layer and MDP maps 240 int mdpIdx = 0; 241 for(int idx = 0; idx < layerCount; idx++) { 242 if(!isFBComposed[idx]) { 243 mdpToLayer[mdpIdx].listIndex = idx; 244 layerToMDP[idx] = mdpIdx++; 245 } 246 } 247} 248 249MDPComp::LayerCache::LayerCache() { 250 reset(); 251} 252 253void MDPComp::LayerCache::reset() { 254 memset(&hnd, 0, sizeof(hnd)); 255 mdpCount = 0; 256 cacheCount = 0; 257 layerCount = 0; 258 fbZ = -1; 259} 260 261void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 262 const int numAppLayers = list->numHwLayers - 1; 263 for(int i = 0; i < numAppLayers; i++) { 264 hnd[i] = list->hwLayers[i].handle; 265 } 266} 267 268void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 269 mdpCount = curFrame.mdpCount; 270 cacheCount = curFrame.fbCount; 271 layerCount = curFrame.layerCount; 272 fbZ = curFrame.fbZ; 273} 274 275bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 276 const int dpy = HWC_DISPLAY_PRIMARY; 277 private_handle_t *hnd = (private_handle_t *)layer->handle; 278 279 if(!hnd) { 280 ALOGE("%s: layer handle is NULL", __FUNCTION__); 281 return false; 282 } 283 284 int hw_w = ctx->dpyAttr[mDpy].xres; 285 int hw_h = ctx->dpyAttr[mDpy].yres; 286 287 hwc_rect_t crop = layer->sourceCrop; 288 hwc_rect_t dst = layer->displayFrame; 289 290 if(dst.left < 0 || dst.top < 0 || dst.right > hw_w || dst.bottom > hw_h) { 291 hwc_rect_t scissor = {0, 0, hw_w, hw_h }; 292 qhwc::calculate_crop_rects(crop, dst, scissor, layer->transform); 293 } 294 295 int crop_w = crop.right - crop.left; 296 int crop_h = crop.bottom - crop.top; 297 int dst_w = dst.right - dst.left; 298 int dst_h = dst.bottom - dst.top; 299 float w_dscale = ceilf((float)crop_w / (float)dst_w); 300 float h_dscale = ceilf((float)crop_h / (float)dst_h); 301 302 //Workaround for MDP HW limitation in DSI command mode panels where 303 //FPS will not go beyond 30 if buffers on RGB pipes are of width < 5 304 305 if((crop_w < 5)||(crop_h < 5)) 306 return false; 307 308 const uint32_t downscale = 309 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 310 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 311 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 312 if(crop_w > MAX_DISPLAY_DIM || w_dscale > downscale || 313 h_dscale > downscale) 314 return false; 315 } else if(w_dscale > 64 || h_dscale > 64) { 316 return false; 317 } 318 } else { //A-family 319 if(w_dscale > downscale || h_dscale > downscale) 320 return false; 321 } 322 323 return true; 324} 325 326ovutils::eDest MDPComp::getMdpPipe(hwc_context_t *ctx, ePipeType type) { 327 overlay::Overlay& ov = *ctx->mOverlay; 328 ovutils::eDest mdp_pipe = ovutils::OV_INVALID; 329 330 switch(type) { 331 case MDPCOMP_OV_DMA: 332 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_DMA, mDpy); 333 if(mdp_pipe != ovutils::OV_INVALID) { 334 return mdp_pipe; 335 } 336 case MDPCOMP_OV_ANY: 337 case MDPCOMP_OV_RGB: 338 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy); 339 if(mdp_pipe != ovutils::OV_INVALID) { 340 return mdp_pipe; 341 } 342 343 if(type == MDPCOMP_OV_RGB) { 344 //Requested only for RGB pipe 345 break; 346 } 347 case MDPCOMP_OV_VG: 348 return ov.nextPipe(ovutils::OV_MDP_PIPE_VG, mDpy); 349 default: 350 ALOGE("%s: Invalid pipe type",__FUNCTION__); 351 return ovutils::OV_INVALID; 352 }; 353 return ovutils::OV_INVALID; 354} 355 356bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 357 bool ret = true; 358 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 359 360 if(!isEnabled()) { 361 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 362 ret = false; 363 } else if(ctx->mExtDispConfiguring) { 364 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 365 __FUNCTION__); 366 ret = false; 367 } else if(ctx->isPaddingRound) { 368 ctx->isPaddingRound = false; 369 ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__); 370 ret = false; 371 } 372 return ret; 373} 374 375/* Checks for conditions where all the layers marked for MDP comp cannot be 376 * bypassed. On such conditions we try to bypass atleast YUV layers */ 377bool MDPComp::isFullFrameDoable(hwc_context_t *ctx, 378 hwc_display_contents_1_t* list){ 379 380 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 381 382 if(sIdleFallBack) { 383 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 384 return false; 385 } 386 387 if(mDpy > HWC_DISPLAY_PRIMARY){ 388 ALOGD_IF(isDebug(), "%s: Cannot support External display(s)", 389 __FUNCTION__); 390 return false; 391 } 392 393 if(isSkipPresent(ctx, mDpy)) { 394 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 395 __FUNCTION__, 396 isSkipPresent(ctx, mDpy)); 397 return false; 398 } 399 400 if(ctx->listStats[mDpy].planeAlpha 401 && ctx->mMDP.version >= qdutils::MDSS_V5) { 402 ALOGD_IF(isDebug(), "%s: plane alpha not implemented on MDSS", 403 __FUNCTION__); 404 return false; 405 } 406 407 if(ctx->listStats[mDpy].needsAlphaScale 408 && ctx->mMDP.version < qdutils::MDSS_V5) { 409 ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__); 410 return false; 411 } 412 413 //MDP composition is not efficient if layer needs rotator. 414 for(int i = 0; i < numAppLayers; ++i) { 415 // As MDP h/w supports flip operation, use MDP comp only for 416 // 180 transforms. Fail for any transform involving 90 (90, 270). 417 hwc_layer_1_t* layer = &list->hwLayers[i]; 418 private_handle_t *hnd = (private_handle_t *)layer->handle; 419 if(isYuvBuffer(hnd) ) { 420 if(isSecuring(ctx, layer)) { 421 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 422 return false; 423 } 424 } else if(layer->transform & HWC_TRANSFORM_ROT_90) { 425 ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__); 426 return false; 427 } 428 429 if(!isValidDimension(ctx,layer)) { 430 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 431 __FUNCTION__); 432 return false; 433 } 434 } 435 436 //If all above hard conditions are met we can do full or partial MDP comp. 437 bool ret = false; 438 if(fullMDPComp(ctx, list)) { 439 ret = true; 440 } else if(partialMDPComp(ctx, list)) { 441 ret = true; 442 } 443 return ret; 444} 445 446bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 447 //Setup mCurrentFrame 448 mCurrentFrame.mdpCount = mCurrentFrame.layerCount; 449 mCurrentFrame.fbCount = 0; 450 mCurrentFrame.fbZ = -1; 451 memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed)); 452 453 int mdpCount = mCurrentFrame.mdpCount; 454 if(mdpCount > sMaxPipesPerMixer) { 455 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 456 return false; 457 } 458 459 int numPipesNeeded = pipesNeeded(ctx, list); 460 int availPipes = getAvailablePipes(ctx); 461 462 if(numPipesNeeded > availPipes) { 463 ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d", 464 __FUNCTION__, numPipesNeeded, availPipes); 465 return false; 466 } 467 468 return true; 469} 470 471bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 472{ 473 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 474 475 if(!sEnableMixedMode) { 476 //Mixed mode is disabled. No need to even try caching. 477 return false; 478 } 479 480 //Setup mCurrentFrame 481 mCurrentFrame.reset(numAppLayers); 482 updateLayerCache(ctx, list); 483 updateYUV(ctx, list); 484 batchLayers(); //sets up fbZ also 485 486 int mdpCount = mCurrentFrame.mdpCount; 487 if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used 488 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 489 return false; 490 } 491 492 int numPipesNeeded = pipesNeeded(ctx, list); 493 int availPipes = getAvailablePipes(ctx); 494 495 if(numPipesNeeded > availPipes) { 496 ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d", 497 __FUNCTION__, numPipesNeeded, availPipes); 498 return false; 499 } 500 501 return true; 502} 503 504bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx, 505 hwc_display_contents_1_t* list){ 506 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 507 mCurrentFrame.reset(numAppLayers); 508 updateYUV(ctx, list); 509 int mdpCount = mCurrentFrame.mdpCount; 510 int fbNeeded = int(mCurrentFrame.fbCount != 0); 511 512 if(!isYuvPresent(ctx, mDpy)) { 513 return false; 514 } 515 516 if(!mdpCount) 517 return false; 518 519 if(mdpCount > (sMaxPipesPerMixer - fbNeeded)) { 520 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 521 return false; 522 } 523 524 int numPipesNeeded = pipesNeeded(ctx, list); 525 int availPipes = getAvailablePipes(ctx); 526 if(numPipesNeeded > availPipes) { 527 ALOGD_IF(isDebug(), "%s: Insufficient MDP pipes, needed %d, avail %d", 528 __FUNCTION__, numPipesNeeded, availPipes); 529 return false; 530 } 531 532 return true; 533} 534 535/* Checks for conditions where YUV layers cannot be bypassed */ 536bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 537 if(isSkipLayer(layer)) { 538 ALOGE("%s: Unable to bypass skipped YUV", __FUNCTION__); 539 return false; 540 } 541 542 if(isSecuring(ctx, layer)) { 543 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 544 return false; 545 } 546 547 if(!isValidDimension(ctx, layer)) { 548 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 549 __FUNCTION__); 550 return false; 551 } 552 553 return true; 554} 555 556void MDPComp::batchLayers() { 557 /* Idea is to keep as many contiguous non-updating(cached) layers in FB and 558 * send rest of them through MDP. NEVER mark an updating layer for caching. 559 * But cached ones can be marked for MDP*/ 560 561 int maxBatchStart = -1; 562 int maxBatchCount = 0; 563 564 /* All or Nothing is cached. No batching needed */ 565 if(!mCurrentFrame.fbCount) { 566 mCurrentFrame.fbZ = -1; 567 return; 568 } 569 if(!mCurrentFrame.mdpCount) { 570 mCurrentFrame.fbZ = 0; 571 return; 572 } 573 574 /* Search for max number of contiguous (cached) layers */ 575 int i = 0; 576 while (i < mCurrentFrame.layerCount) { 577 int count = 0; 578 while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) { 579 count++; i++; 580 } 581 if(count > maxBatchCount) { 582 maxBatchCount = count; 583 maxBatchStart = i - count; 584 mCurrentFrame.fbZ = maxBatchStart; 585 } 586 if(i < mCurrentFrame.layerCount) i++; 587 } 588 589 /* reset rest of the layers for MDP comp */ 590 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 591 if(i != maxBatchStart){ 592 mCurrentFrame.isFBComposed[i] = false; 593 } else { 594 i += maxBatchCount; 595 } 596 } 597 598 mCurrentFrame.fbCount = maxBatchCount; 599 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 600 mCurrentFrame.fbCount; 601 602 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 603 mCurrentFrame.fbCount); 604} 605 606void MDPComp::updateLayerCache(hwc_context_t* ctx, 607 hwc_display_contents_1_t* list) { 608 609 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 610 int numCacheableLayers = 0; 611 612 for(int i = 0; i < numAppLayers; i++) { 613 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 614 numCacheableLayers++; 615 mCurrentFrame.isFBComposed[i] = true; 616 } else { 617 mCurrentFrame.isFBComposed[i] = false; 618 mCachedFrame.hnd[i] = list->hwLayers[i].handle; 619 } 620 } 621 622 mCurrentFrame.fbCount = numCacheableLayers; 623 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 624 mCurrentFrame.fbCount; 625 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers); 626} 627 628int MDPComp::getAvailablePipes(hwc_context_t* ctx) { 629 int numDMAPipes = qdutils::MDPVersion::getInstance().getDMAPipes(); 630 overlay::Overlay& ov = *ctx->mOverlay; 631 632 int numAvailable = ov.availablePipes(mDpy); 633 634 //Reserve DMA for rotator 635 if(Overlay::getDMAMode() == Overlay::DMA_BLOCK_MODE) 636 numAvailable -= numDMAPipes; 637 638 //Reserve pipe(s)for FB 639 if(mCurrentFrame.fbCount) 640 numAvailable -= pipesForFB(); 641 642 return numAvailable; 643} 644 645void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list) { 646 647 int nYuvCount = ctx->listStats[mDpy].yuvCount; 648 for(int index = 0;index < nYuvCount; index++){ 649 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 650 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 651 652 if(!isYUVDoable(ctx, layer)) { 653 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 654 mCurrentFrame.isFBComposed[nYuvIndex] = true; 655 mCurrentFrame.fbCount++; 656 } 657 } else { 658 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 659 mCurrentFrame.isFBComposed[nYuvIndex] = false; 660 mCurrentFrame.fbCount--; 661 } 662 } 663 } 664 665 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 666 mCurrentFrame.fbCount; 667 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 668 mCurrentFrame.fbCount); 669} 670 671bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 672 if(!allocLayerPipes(ctx, list)) { 673 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 674 return false; 675 } 676 677 bool fbBatch = false; 678 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 679 index++) { 680 if(!mCurrentFrame.isFBComposed[index]) { 681 int mdpIndex = mCurrentFrame.layerToMDP[index]; 682 hwc_layer_1_t* layer = &list->hwLayers[index]; 683 684 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 685 cur_pipe->zOrder = mdpNextZOrder++; 686 687 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 688 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 689 layer %d",__FUNCTION__, index); 690 return false; 691 } 692 } else if(fbBatch == false) { 693 mdpNextZOrder++; 694 fbBatch = true; 695 } 696 } 697 698 return true; 699} 700 701bool MDPComp::programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 702 if(!allocLayerPipes(ctx, list)) { 703 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 704 return false; 705 } 706 //If we are in this block, it means we have yuv + rgb layers both 707 int mdpIdx = 0; 708 for (int index = 0; index < mCurrentFrame.layerCount; index++) { 709 if(!mCurrentFrame.isFBComposed[index]) { 710 hwc_layer_1_t* layer = &list->hwLayers[index]; 711 int mdpIndex = mCurrentFrame.layerToMDP[index]; 712 MdpPipeInfo* cur_pipe = 713 mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 714 cur_pipe->zOrder = mdpIdx++; 715 716 if(configure(ctx, layer, 717 mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 718 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 719 layer %d",__FUNCTION__, index); 720 return false; 721 } 722 } 723 } 724 return true; 725} 726 727int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 728 729 const int numLayers = ctx->listStats[mDpy].numAppLayers; 730 731 //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU 732 //do not cache the information for next draw cycle. 733 if(numLayers > MAX_NUM_APP_LAYERS) { 734 ALOGD_IF(isDebug(), "%s: Number of App layers exceeded the limit ", 735 __FUNCTION__); 736 return 0; 737 } 738 //reset old data 739 mCurrentFrame.reset(numLayers); 740 741 //Hard conditions, if not met, cannot do MDP comp 742 if(!isFrameDoable(ctx)) { 743 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 744 __FUNCTION__); 745 mCurrentFrame.reset(numLayers); 746 mCachedFrame.cacheAll(list); 747 mCachedFrame.updateCounts(mCurrentFrame); 748 return 0; 749 } 750 751 //Check whether layers marked for MDP Composition is actually doable. 752 if(isFullFrameDoable(ctx, list)){ 753 mCurrentFrame.map(); 754 //Acquire and Program MDP pipes 755 if(!programMDP(ctx, list)) { 756 mCurrentFrame.reset(numLayers); 757 mCachedFrame.cacheAll(list); 758 } else { //Success 759 //Any change in composition types needs an FB refresh 760 mCurrentFrame.needsRedraw = false; 761 if(mCurrentFrame.fbCount && 762 ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) || 763 (mCurrentFrame.fbCount != mCachedFrame.cacheCount) || 764 (mCurrentFrame.fbZ != mCachedFrame.fbZ) || 765 (!mCurrentFrame.mdpCount) || 766 (list->flags & HWC_GEOMETRY_CHANGED) || 767 isSkipPresent(ctx, mDpy) || 768 (mDpy > HWC_DISPLAY_PRIMARY))) { 769 mCurrentFrame.needsRedraw = true; 770 } 771 } 772 } else if(isOnlyVideoDoable(ctx, list)) { 773 //All layers marked for MDP comp cannot be bypassed. 774 //Try to compose atleast YUV layers through MDP comp and let 775 //all the RGB layers compose in FB 776 //Destination over 777 mCurrentFrame.fbZ = -1; 778 if(mCurrentFrame.fbCount) 779 mCurrentFrame.fbZ = ctx->listStats[mDpy].yuvCount; 780 781 mCurrentFrame.map(); 782 if(!programYUV(ctx, list)) { 783 mCurrentFrame.reset(numLayers); 784 mCachedFrame.cacheAll(list); 785 } 786 } else { 787 mCurrentFrame.reset(numLayers); 788 mCachedFrame.cacheAll(list); 789 } 790 791 //UpdateLayerFlags 792 setMDPCompLayerFlags(ctx, list); 793 mCachedFrame.updateCounts(mCurrentFrame); 794 795 if(isDebug()) { 796 ALOGD("GEOMETRY change: %d", (list->flags & HWC_GEOMETRY_CHANGED)); 797 android::String8 sDump(""); 798 dump(sDump); 799 ALOGE("%s",sDump.string()); 800 } 801 802 return mCurrentFrame.fbZ; 803} 804 805//=============MDPCompLowRes=================================================== 806 807/* 808 * Configures pipe(s) for MDP composition 809 */ 810int MDPCompLowRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 811 PipeLayerPair& PipeLayerPair) { 812 MdpPipeInfoLowRes& mdp_info = 813 *(static_cast<MdpPipeInfoLowRes*>(PipeLayerPair.pipeInfo)); 814 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 815 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 816 eIsFg isFg = IS_FG_OFF; 817 eDest dest = mdp_info.index; 818 819 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 820 __FUNCTION__, layer, zOrder, dest); 821 822 return configureLowRes(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 823 &PipeLayerPair.rot); 824} 825 826int MDPCompLowRes::pipesNeeded(hwc_context_t *ctx, 827 hwc_display_contents_1_t* list) { 828 return mCurrentFrame.mdpCount; 829} 830 831bool MDPCompLowRes::allocLayerPipes(hwc_context_t *ctx, 832 hwc_display_contents_1_t* list) { 833 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 834 835 if(mCurrentFrame.isFBComposed[index]) continue; 836 837 hwc_layer_1_t* layer = &list->hwLayers[index]; 838 private_handle_t *hnd = (private_handle_t *)layer->handle; 839 int mdpIndex = mCurrentFrame.layerToMDP[index]; 840 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 841 info.pipeInfo = new MdpPipeInfoLowRes; 842 info.rot = NULL; 843 MdpPipeInfoLowRes& pipe_info = *(MdpPipeInfoLowRes*)info.pipeInfo; 844 ePipeType type = MDPCOMP_OV_ANY; 845 846 if(isYuvBuffer(hnd)) { 847 type = MDPCOMP_OV_VG; 848 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 849 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 850 && ctx->mMDP.version >= qdutils::MDSS_V5) { 851 type = MDPCOMP_OV_DMA; 852 } 853 854 pipe_info.index = getMdpPipe(ctx, type); 855 if(pipe_info.index == ovutils::OV_INVALID) { 856 ALOGD_IF(isDebug(), "%s: Unable to get pipe type = %d", 857 __FUNCTION__, (int) type); 858 return false; 859 } 860 } 861 return true; 862} 863 864bool MDPCompLowRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 865 866 if(!isEnabled()) { 867 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 868 return true; 869 } 870 871 if(!ctx || !list) { 872 ALOGE("%s: invalid contxt or list",__FUNCTION__); 873 return false; 874 } 875 876 /* reset Invalidator */ 877 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 878 idleInvalidator->markForSleep(); 879 880 overlay::Overlay& ov = *ctx->mOverlay; 881 LayerProp *layerProp = ctx->layerProp[mDpy]; 882 883 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 884 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 885 { 886 if(mCurrentFrame.isFBComposed[i]) continue; 887 888 hwc_layer_1_t *layer = &list->hwLayers[i]; 889 private_handle_t *hnd = (private_handle_t *)layer->handle; 890 if(!hnd) { 891 ALOGE("%s handle null", __FUNCTION__); 892 return false; 893 } 894 895 int mdpIndex = mCurrentFrame.layerToMDP[i]; 896 897 MdpPipeInfoLowRes& pipe_info = 898 *(MdpPipeInfoLowRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 899 ovutils::eDest dest = pipe_info.index; 900 if(dest == ovutils::OV_INVALID) { 901 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 902 return false; 903 } 904 905 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 906 continue; 907 } 908 909 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 910 using pipe: %d", __FUNCTION__, layer, 911 hnd, dest ); 912 913 int fd = hnd->fd; 914 uint32_t offset = hnd->offset; 915 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 916 if(rot) { 917 if(!rot->queueBuffer(fd, offset)) 918 return false; 919 fd = rot->getDstMemId(); 920 offset = rot->getDstOffset(); 921 } 922 923 if (!ov.queueBuffer(fd, offset, dest)) { 924 ALOGE("%s: queueBuffer failed for external", __FUNCTION__); 925 return false; 926 } 927 928 layerProp[i].mFlags &= ~HWC_MDPCOMP; 929 } 930 return true; 931} 932 933//=============MDPCompHighRes=================================================== 934 935int MDPCompHighRes::pipesNeeded(hwc_context_t *ctx, 936 hwc_display_contents_1_t* list) { 937 int pipesNeeded = 0; 938 int hw_w = ctx->dpyAttr[mDpy].xres; 939 940 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 941 if(!mCurrentFrame.isFBComposed[i]) { 942 hwc_layer_1_t* layer = &list->hwLayers[i]; 943 hwc_rect_t dst = layer->displayFrame; 944 if(dst.left > hw_w/2) { 945 pipesNeeded++; 946 } else if(dst.right <= hw_w/2) { 947 pipesNeeded++; 948 } else { 949 pipesNeeded += 2; 950 } 951 } 952 } 953 return pipesNeeded; 954} 955 956bool MDPCompHighRes::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 957 MdpPipeInfoHighRes& pipe_info, 958 ePipeType type) { 959 int hw_w = ctx->dpyAttr[mDpy].xres; 960 961 hwc_rect_t dst = layer->displayFrame; 962 if(dst.left > hw_w/2) { 963 pipe_info.lIndex = ovutils::OV_INVALID; 964 pipe_info.rIndex = getMdpPipe(ctx, type); 965 if(pipe_info.rIndex == ovutils::OV_INVALID) 966 return false; 967 } else if (dst.right <= hw_w/2) { 968 pipe_info.rIndex = ovutils::OV_INVALID; 969 pipe_info.lIndex = getMdpPipe(ctx, type); 970 if(pipe_info.lIndex == ovutils::OV_INVALID) 971 return false; 972 } else { 973 pipe_info.rIndex = getMdpPipe(ctx, type); 974 pipe_info.lIndex = getMdpPipe(ctx, type); 975 if(pipe_info.rIndex == ovutils::OV_INVALID || 976 pipe_info.lIndex == ovutils::OV_INVALID) 977 return false; 978 } 979 return true; 980} 981 982bool MDPCompHighRes::allocLayerPipes(hwc_context_t *ctx, 983 hwc_display_contents_1_t* list) { 984 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 985 986 if(mCurrentFrame.isFBComposed[index]) continue; 987 988 hwc_layer_1_t* layer = &list->hwLayers[index]; 989 private_handle_t *hnd = (private_handle_t *)layer->handle; 990 int mdpIndex = mCurrentFrame.layerToMDP[index]; 991 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 992 info.pipeInfo = new MdpPipeInfoHighRes; 993 info.rot = NULL; 994 MdpPipeInfoHighRes& pipe_info = *(MdpPipeInfoHighRes*)info.pipeInfo; 995 ePipeType type = MDPCOMP_OV_ANY; 996 997 if(isYuvBuffer(hnd)) { 998 type = MDPCOMP_OV_VG; 999 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 1000 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 1001 && ctx->mMDP.version >= qdutils::MDSS_V5) { 1002 type = MDPCOMP_OV_DMA; 1003 } 1004 1005 if(!acquireMDPPipes(ctx, layer, pipe_info, type)) { 1006 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type = %d", 1007 __FUNCTION__, (int) type); 1008 return false; 1009 } 1010 } 1011 return true; 1012} 1013/* 1014 * Configures pipe(s) for MDP composition 1015 */ 1016int MDPCompHighRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1017 PipeLayerPair& PipeLayerPair) { 1018 MdpPipeInfoHighRes& mdp_info = 1019 *(static_cast<MdpPipeInfoHighRes*>(PipeLayerPair.pipeInfo)); 1020 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1021 eIsFg isFg = IS_FG_OFF; 1022 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1023 eDest lDest = mdp_info.lIndex; 1024 eDest rDest = mdp_info.rIndex; 1025 1026 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 1027 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 1028 1029 return configureHighRes(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 1030 rDest, &PipeLayerPair.rot); 1031} 1032 1033bool MDPCompHighRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1034 1035 if(!isEnabled()) { 1036 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1037 return true; 1038 } 1039 1040 if(!ctx || !list) { 1041 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1042 return false; 1043 } 1044 1045 /* reset Invalidator */ 1046 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 1047 idleInvalidator->markForSleep(); 1048 1049 overlay::Overlay& ov = *ctx->mOverlay; 1050 LayerProp *layerProp = ctx->layerProp[mDpy]; 1051 1052 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1053 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1054 { 1055 if(mCurrentFrame.isFBComposed[i]) continue; 1056 1057 hwc_layer_1_t *layer = &list->hwLayers[i]; 1058 private_handle_t *hnd = (private_handle_t *)layer->handle; 1059 if(!hnd) { 1060 ALOGE("%s handle null", __FUNCTION__); 1061 return false; 1062 } 1063 1064 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1065 continue; 1066 } 1067 1068 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1069 1070 MdpPipeInfoHighRes& pipe_info = 1071 *(MdpPipeInfoHighRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1072 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1073 1074 ovutils::eDest indexL = pipe_info.lIndex; 1075 ovutils::eDest indexR = pipe_info.rIndex; 1076 1077 int fd = hnd->fd; 1078 int offset = hnd->offset; 1079 1080 if(rot) { 1081 rot->queueBuffer(fd, offset); 1082 fd = rot->getDstMemId(); 1083 offset = rot->getDstOffset(); 1084 } 1085 1086 //************* play left mixer ********** 1087 if(indexL != ovutils::OV_INVALID) { 1088 ovutils::eDest destL = (ovutils::eDest)indexL; 1089 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1090 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1091 if (!ov.queueBuffer(fd, offset, destL)) { 1092 ALOGE("%s: queueBuffer failed for left mixer", __FUNCTION__); 1093 return false; 1094 } 1095 } 1096 1097 //************* play right mixer ********** 1098 if(indexR != ovutils::OV_INVALID) { 1099 ovutils::eDest destR = (ovutils::eDest)indexR; 1100 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1101 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1102 if (!ov.queueBuffer(fd, offset, destR)) { 1103 ALOGE("%s: queueBuffer failed for right mixer", __FUNCTION__); 1104 return false; 1105 } 1106 } 1107 1108 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1109 } 1110 1111 return true; 1112} 1113}; //namespace 1114 1115