hwc_mdpcomp.cpp revision a8c3d11acf21811ee74589d08dbcc037cd763526
1/* 2 * Copyright (C) 2012-2013, The Linux Foundation. All rights reserved. 3 * Not a Contribution, Apache license notifications and license are retained 4 * for attribution purposes only. 5 * 6 * Licensed under the Apache License, Version 2.0 (the "License"); 7 * you may not use this file except in compliance with the License. 8 * You may obtain a copy of the License at 9 * 10 * http://www.apache.org/licenses/LICENSE-2.0 11 * 12 * Unless required by applicable law or agreed to in writing, software 13 * distributed under the License is distributed on an "AS IS" BASIS, 14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 * See the License for the specific language governing permissions and 16 * limitations under the License. 17 */ 18 19#include <math.h> 20#include "hwc_mdpcomp.h" 21#include <sys/ioctl.h> 22#include "external.h" 23#include "qdMetaData.h" 24#include "mdp_version.h" 25#include "hwc_fbupdate.h" 26#include <overlayRotator.h> 27 28using namespace overlay; 29using namespace qdutils; 30using namespace overlay::utils; 31namespace ovutils = overlay::utils; 32 33namespace qhwc { 34 35//==============MDPComp======================================================== 36 37IdleInvalidator *MDPComp::idleInvalidator = NULL; 38bool MDPComp::sIdleFallBack = false; 39bool MDPComp::sDebugLogs = false; 40bool MDPComp::sEnabled = false; 41bool MDPComp::sEnableMixedMode = true; 42int MDPComp::sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 43 44MDPComp* MDPComp::getObject(const int& width, const int& rightSplit, 45 const int& dpy) { 46 if(width > MAX_DISPLAY_DIM || rightSplit) { 47 return new MDPCompHighRes(dpy); 48 } 49 return new MDPCompLowRes(dpy); 50} 51 52MDPComp::MDPComp(int dpy):mDpy(dpy){}; 53 54void MDPComp::dump(android::String8& buf) 55{ 56 Locker::Autolock _l(mMdpCompLock); 57 dumpsys_log(buf,"HWC Map for Dpy: %s \n", 58 mDpy ? "\"EXTERNAL\"" : "\"PRIMARY\""); 59 dumpsys_log(buf,"PREV_FRAME: layerCount:%2d mdpCount:%2d \ 60 cacheCount:%2d \n", mCachedFrame.layerCount, 61 mCachedFrame.mdpCount, mCachedFrame.cacheCount); 62 dumpsys_log(buf,"CURR_FRAME: layerCount:%2d mdpCount:%2d \ 63 fbCount:%2d \n", mCurrentFrame.layerCount, 64 mCurrentFrame.mdpCount, mCurrentFrame.fbCount); 65 dumpsys_log(buf,"needsFBRedraw:%3s pipesUsed:%2d MaxPipesPerMixer: %d \n", 66 (mCurrentFrame.needsRedraw? "YES" : "NO"), 67 mCurrentFrame.mdpCount, sMaxPipesPerMixer); 68 dumpsys_log(buf," --------------------------------------------- \n"); 69 dumpsys_log(buf," listIdx | cached? | mdpIndex | comptype | Z \n"); 70 dumpsys_log(buf," --------------------------------------------- \n"); 71 for(int index = 0; index < mCurrentFrame.layerCount; index++ ) 72 dumpsys_log(buf," %7d | %7s | %8d | %9s | %2d \n", 73 index, 74 (mCurrentFrame.isFBComposed[index] ? "YES" : "NO"), 75 mCurrentFrame.layerToMDP[index], 76 (mCurrentFrame.isFBComposed[index] ? 77 (mCurrentFrame.needsRedraw ? "GLES" : "CACHE") : "MDP"), 78 (mCurrentFrame.isFBComposed[index] ? mCurrentFrame.fbZ : 79 mCurrentFrame.mdpToLayer[mCurrentFrame.layerToMDP[index]].pipeInfo->zOrder)); 80 dumpsys_log(buf,"\n"); 81} 82 83bool MDPComp::init(hwc_context_t *ctx) { 84 85 if(!ctx) { 86 ALOGE("%s: Invalid hwc context!!",__FUNCTION__); 87 return false; 88 } 89 90 char property[PROPERTY_VALUE_MAX]; 91 92 sEnabled = false; 93 if((property_get("persist.hwc.mdpcomp.enable", property, NULL) > 0) && 94 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 95 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 96 sEnabled = true; 97 if(!setupBasePipe(ctx)) { 98 ALOGE("%s: Failed to setup primary base pipe", __FUNCTION__); 99 return false; 100 } 101 } 102 103 sEnableMixedMode = true; 104 if((property_get("debug.mdpcomp.mixedmode.disable", property, NULL) > 0) && 105 (!strncmp(property, "1", PROPERTY_VALUE_MAX ) || 106 (!strncasecmp(property,"true", PROPERTY_VALUE_MAX )))) { 107 sEnableMixedMode = false; 108 } 109 110 sDebugLogs = false; 111 if(property_get("debug.mdpcomp.logs", property, NULL) > 0) { 112 if(atoi(property) != 0) 113 sDebugLogs = true; 114 } 115 116 sMaxPipesPerMixer = MAX_PIPES_PER_MIXER; 117 if(property_get("debug.mdpcomp.maxpermixer", property, "-1") > 0) { 118 int val = atoi(property); 119 if(val >= 0) 120 sMaxPipesPerMixer = min(val, MAX_PIPES_PER_MIXER); 121 } 122 123 long idle_timeout = DEFAULT_IDLE_TIME; 124 if(property_get("debug.mdpcomp.idletime", property, NULL) > 0) { 125 if(atoi(property) != 0) 126 idle_timeout = atoi(property); 127 } 128 129 //create Idle Invalidator only when not disabled through property 130 if(idle_timeout != -1) 131 idleInvalidator = IdleInvalidator::getInstance(); 132 133 if(idleInvalidator == NULL) { 134 ALOGE("%s: failed to instantiate idleInvalidator object", __FUNCTION__); 135 } else { 136 idleInvalidator->init(timeout_handler, ctx, idle_timeout); 137 } 138 return true; 139} 140 141void MDPComp::reset(const int& numLayers, hwc_display_contents_1_t* list) { 142 mCurrentFrame.reset(numLayers); 143 mCachedFrame.cacheAll(list); 144 mCachedFrame.updateCounts(mCurrentFrame); 145} 146 147void MDPComp::timeout_handler(void *udata) { 148 struct hwc_context_t* ctx = (struct hwc_context_t*)(udata); 149 150 if(!ctx) { 151 ALOGE("%s: received empty data in timer callback", __FUNCTION__); 152 return; 153 } 154 155 if(!ctx->proc) { 156 ALOGE("%s: HWC proc not registered", __FUNCTION__); 157 return; 158 } 159 sIdleFallBack = true; 160 /* Trigger SF to redraw the current frame */ 161 ctx->proc->invalidate(ctx->proc); 162} 163 164void MDPComp::setMDPCompLayerFlags(hwc_context_t *ctx, 165 hwc_display_contents_1_t* list) { 166 LayerProp *layerProp = ctx->layerProp[mDpy]; 167 168 for(int index = 0; index < ctx->listStats[mDpy].numAppLayers; index++) { 169 hwc_layer_1_t* layer = &(list->hwLayers[index]); 170 if(!mCurrentFrame.isFBComposed[index]) { 171 layerProp[index].mFlags |= HWC_MDPCOMP; 172 layer->compositionType = HWC_OVERLAY; 173 layer->hints |= HWC_HINT_CLEAR_FB; 174 mCachedFrame.hnd[index] = NULL; 175 } else { 176 if(!mCurrentFrame.needsRedraw) 177 layer->compositionType = HWC_OVERLAY; 178 } 179 } 180} 181 182/* 183 * Sets up BORDERFILL as default base pipe and detaches RGB0. 184 * Framebuffer is always updated using PLAY ioctl. 185 */ 186bool MDPComp::setupBasePipe(hwc_context_t *ctx) { 187 const int dpy = HWC_DISPLAY_PRIMARY; 188 int fb_stride = ctx->dpyAttr[dpy].stride; 189 int fb_width = ctx->dpyAttr[dpy].xres; 190 int fb_height = ctx->dpyAttr[dpy].yres; 191 int fb_fd = ctx->dpyAttr[dpy].fd; 192 193 mdp_overlay ovInfo; 194 msmfb_overlay_data ovData; 195 memset(&ovInfo, 0, sizeof(mdp_overlay)); 196 memset(&ovData, 0, sizeof(msmfb_overlay_data)); 197 198 ovInfo.src.format = MDP_RGB_BORDERFILL; 199 ovInfo.src.width = fb_width; 200 ovInfo.src.height = fb_height; 201 ovInfo.src_rect.w = fb_width; 202 ovInfo.src_rect.h = fb_height; 203 ovInfo.dst_rect.w = fb_width; 204 ovInfo.dst_rect.h = fb_height; 205 ovInfo.id = MSMFB_NEW_REQUEST; 206 207 if (ioctl(fb_fd, MSMFB_OVERLAY_SET, &ovInfo) < 0) { 208 ALOGE("Failed to call ioctl MSMFB_OVERLAY_SET err=%s", 209 strerror(errno)); 210 return false; 211 } 212 213 ovData.id = ovInfo.id; 214 if (ioctl(fb_fd, MSMFB_OVERLAY_PLAY, &ovData) < 0) { 215 ALOGE("Failed to call ioctl MSMFB_OVERLAY_PLAY err=%s", 216 strerror(errno)); 217 return false; 218 } 219 return true; 220} 221 222MDPComp::FrameInfo::FrameInfo() { 223 reset(0); 224} 225 226void MDPComp::FrameInfo::reset(const int& numLayers) { 227 for(int i = 0 ; i < MAX_PIPES_PER_MIXER && numLayers; i++ ) { 228 if(mdpToLayer[i].pipeInfo) { 229 delete mdpToLayer[i].pipeInfo; 230 mdpToLayer[i].pipeInfo = NULL; 231 //We dont own the rotator 232 mdpToLayer[i].rot = NULL; 233 } 234 } 235 236 memset(&mdpToLayer, 0, sizeof(mdpToLayer)); 237 memset(&layerToMDP, -1, sizeof(layerToMDP)); 238 memset(&isFBComposed, 1, sizeof(isFBComposed)); 239 240 layerCount = numLayers; 241 fbCount = numLayers; 242 mdpCount = 0; 243 needsRedraw = true; 244 fbZ = 0; 245} 246 247void MDPComp::FrameInfo::map() { 248 // populate layer and MDP maps 249 int mdpIdx = 0; 250 for(int idx = 0; idx < layerCount; idx++) { 251 if(!isFBComposed[idx]) { 252 mdpToLayer[mdpIdx].listIndex = idx; 253 layerToMDP[idx] = mdpIdx++; 254 } 255 } 256} 257 258MDPComp::LayerCache::LayerCache() { 259 reset(); 260} 261 262void MDPComp::LayerCache::reset() { 263 memset(&hnd, 0, sizeof(hnd)); 264 mdpCount = 0; 265 cacheCount = 0; 266 layerCount = 0; 267 fbZ = -1; 268} 269 270void MDPComp::LayerCache::cacheAll(hwc_display_contents_1_t* list) { 271 const int numAppLayers = list->numHwLayers - 1; 272 for(int i = 0; i < numAppLayers; i++) { 273 hnd[i] = list->hwLayers[i].handle; 274 } 275} 276 277void MDPComp::LayerCache::updateCounts(const FrameInfo& curFrame) { 278 mdpCount = curFrame.mdpCount; 279 cacheCount = curFrame.fbCount; 280 layerCount = curFrame.layerCount; 281 fbZ = curFrame.fbZ; 282} 283 284bool MDPComp::isValidDimension(hwc_context_t *ctx, hwc_layer_1_t *layer) { 285 const int dpy = HWC_DISPLAY_PRIMARY; 286 private_handle_t *hnd = (private_handle_t *)layer->handle; 287 288 if(!hnd) { 289 ALOGE("%s: layer handle is NULL", __FUNCTION__); 290 return false; 291 } 292 293 int hw_w = ctx->dpyAttr[mDpy].xres; 294 int hw_h = ctx->dpyAttr[mDpy].yres; 295 296 hwc_rect_t crop = layer->sourceCrop; 297 hwc_rect_t dst = layer->displayFrame; 298 299 if(dst.left < 0 || dst.top < 0 || dst.right > hw_w || dst.bottom > hw_h) { 300 hwc_rect_t scissor = {0, 0, hw_w, hw_h }; 301 qhwc::calculate_crop_rects(crop, dst, scissor, layer->transform); 302 } 303 304 int crop_w = crop.right - crop.left; 305 int crop_h = crop.bottom - crop.top; 306 int dst_w = dst.right - dst.left; 307 int dst_h = dst.bottom - dst.top; 308 float w_dscale = ceilf((float)crop_w / (float)dst_w); 309 float h_dscale = ceilf((float)crop_h / (float)dst_h); 310 311 //Workaround for MDP HW limitation in DSI command mode panels where 312 //FPS will not go beyond 30 if buffers on RGB pipes are of width < 5 313 314 if((crop_w < 5)||(crop_h < 5)) 315 return false; 316 317 const uint32_t downscale = 318 qdutils::MDPVersion::getInstance().getMaxMDPDownscale(); 319 if(ctx->mMDP.version >= qdutils::MDSS_V5) { 320 if(!qdutils::MDPVersion::getInstance().supportsDecimation()) { 321 if(crop_w > MAX_DISPLAY_DIM || w_dscale > downscale || 322 h_dscale > downscale) 323 return false; 324 } else if(w_dscale > 64 || h_dscale > 64) { 325 return false; 326 } 327 } else { //A-family 328 if(w_dscale > downscale || h_dscale > downscale) 329 return false; 330 } 331 332 return true; 333} 334 335ovutils::eDest MDPComp::getMdpPipe(hwc_context_t *ctx, ePipeType type, 336 int mixer) { 337 overlay::Overlay& ov = *ctx->mOverlay; 338 ovutils::eDest mdp_pipe = ovutils::OV_INVALID; 339 340 switch(type) { 341 case MDPCOMP_OV_DMA: 342 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_DMA, mDpy, mixer); 343 if(mdp_pipe != ovutils::OV_INVALID) { 344 return mdp_pipe; 345 } 346 case MDPCOMP_OV_ANY: 347 case MDPCOMP_OV_RGB: 348 mdp_pipe = ov.nextPipe(ovutils::OV_MDP_PIPE_RGB, mDpy, mixer); 349 if(mdp_pipe != ovutils::OV_INVALID) { 350 return mdp_pipe; 351 } 352 353 if(type == MDPCOMP_OV_RGB) { 354 //Requested only for RGB pipe 355 break; 356 } 357 case MDPCOMP_OV_VG: 358 return ov.nextPipe(ovutils::OV_MDP_PIPE_VG, mDpy, mixer); 359 default: 360 ALOGE("%s: Invalid pipe type",__FUNCTION__); 361 return ovutils::OV_INVALID; 362 }; 363 return ovutils::OV_INVALID; 364} 365 366bool MDPComp::isFrameDoable(hwc_context_t *ctx) { 367 bool ret = true; 368 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 369 370 if(!isEnabled()) { 371 ALOGD_IF(isDebug(),"%s: MDP Comp. not enabled.", __FUNCTION__); 372 ret = false; 373 } else if(qdutils::MDPVersion::getInstance().is8x26() && 374 ctx->mVideoTransFlag && 375 ctx->mExtDisplay->isExternalConnected()) { 376 //1 Padding round to shift pipes across mixers 377 ALOGD_IF(isDebug(),"%s: MDP Comp. video transition padding round", 378 __FUNCTION__); 379 ret = false; 380 } else if(ctx->mExtDispConfiguring) { 381 ALOGD_IF( isDebug(),"%s: External Display connection is pending", 382 __FUNCTION__); 383 ret = false; 384 } else if(ctx->isPaddingRound) { 385 ctx->isPaddingRound = false; 386 ALOGD_IF(isDebug(), "%s: padding round",__FUNCTION__); 387 ret = false; 388 } 389 return ret; 390} 391 392/* Checks for conditions where all the layers marked for MDP comp cannot be 393 * bypassed. On such conditions we try to bypass atleast YUV layers */ 394bool MDPComp::isFullFrameDoable(hwc_context_t *ctx, 395 hwc_display_contents_1_t* list){ 396 397 const int numAppLayers = ctx->listStats[mDpy].numAppLayers; 398 399 if(sIdleFallBack) { 400 ALOGD_IF(isDebug(), "%s: Idle fallback dpy %d",__FUNCTION__, mDpy); 401 return false; 402 } 403 404 if(mDpy > HWC_DISPLAY_PRIMARY){ 405 ALOGD_IF(isDebug(), "%s: Cannot support External display(s)", 406 __FUNCTION__); 407 return false; 408 } 409 410 if(isSkipPresent(ctx, mDpy)) { 411 ALOGD_IF(isDebug(),"%s: SKIP present: %d", 412 __FUNCTION__, 413 isSkipPresent(ctx, mDpy)); 414 return false; 415 } 416 417 if(ctx->listStats[mDpy].planeAlpha 418 && ctx->mMDP.version >= qdutils::MDSS_V5) { 419 ALOGD_IF(isDebug(), "%s: plane alpha not implemented on MDSS", 420 __FUNCTION__); 421 return false; 422 } 423 424 if(ctx->listStats[mDpy].needsAlphaScale 425 && ctx->mMDP.version < qdutils::MDSS_V5) { 426 ALOGD_IF(isDebug(), "%s: frame needs alpha downscaling",__FUNCTION__); 427 return false; 428 } 429 430 //MDP composition is not efficient if layer needs rotator. 431 for(int i = 0; i < numAppLayers; ++i) { 432 // As MDP h/w supports flip operation, use MDP comp only for 433 // 180 transforms. Fail for any transform involving 90 (90, 270). 434 hwc_layer_1_t* layer = &list->hwLayers[i]; 435 private_handle_t *hnd = (private_handle_t *)layer->handle; 436 if(isYuvBuffer(hnd) ) { 437 if(isSecuring(ctx, layer)) { 438 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 439 return false; 440 } 441 } else if(layer->transform & HWC_TRANSFORM_ROT_90) { 442 ALOGD_IF(isDebug(), "%s: orientation involved",__FUNCTION__); 443 return false; 444 } 445 446 if(!isValidDimension(ctx,layer)) { 447 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 448 __FUNCTION__); 449 return false; 450 } 451 452 //For 8x26 with panel width>1k, if RGB layer needs HFLIP fail mdp comp 453 // may not need it if Gfx pre-rotation can handle all flips & rotations 454 if(qdutils::MDPVersion::getInstance().is8x26() && 455 (ctx->dpyAttr[mDpy].xres > 1024) && 456 (layer->transform & HWC_TRANSFORM_FLIP_H) && 457 (!isYuvBuffer(hnd))) 458 return false; 459 } 460 461 //If all above hard conditions are met we can do full or partial MDP comp. 462 bool ret = false; 463 if(fullMDPComp(ctx, list)) { 464 ret = true; 465 } else if(partialMDPComp(ctx, list)) { 466 ret = true; 467 } 468 return ret; 469} 470 471bool MDPComp::fullMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 472 //Setup mCurrentFrame 473 mCurrentFrame.mdpCount = mCurrentFrame.layerCount; 474 mCurrentFrame.fbCount = 0; 475 mCurrentFrame.fbZ = -1; 476 memset(&mCurrentFrame.isFBComposed, 0, sizeof(mCurrentFrame.isFBComposed)); 477 478 int mdpCount = mCurrentFrame.mdpCount; 479 if(mdpCount > sMaxPipesPerMixer) { 480 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 481 return false; 482 } 483 484 if(!arePipesAvailable(ctx, list)) { 485 return false; 486 } 487 488 return true; 489} 490 491bool MDPComp::partialMDPComp(hwc_context_t *ctx, hwc_display_contents_1_t* list) 492{ 493 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 494 495 if(!sEnableMixedMode) { 496 //Mixed mode is disabled. No need to even try caching. 497 return false; 498 } 499 500 //Setup mCurrentFrame 501 mCurrentFrame.reset(numAppLayers); 502 updateLayerCache(ctx, list); 503 updateYUV(ctx, list); 504 batchLayers(); //sets up fbZ also 505 506 int mdpCount = mCurrentFrame.mdpCount; 507 if(mdpCount > (sMaxPipesPerMixer - 1)) { // -1 since FB is used 508 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 509 return false; 510 } 511 512 if(!arePipesAvailable(ctx, list)) { 513 return false; 514 } 515 516 return true; 517} 518 519bool MDPComp::isOnlyVideoDoable(hwc_context_t *ctx, 520 hwc_display_contents_1_t* list){ 521 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 522 mCurrentFrame.reset(numAppLayers); 523 updateYUV(ctx, list); 524 int mdpCount = mCurrentFrame.mdpCount; 525 int fbNeeded = int(mCurrentFrame.fbCount != 0); 526 527 if(!isYuvPresent(ctx, mDpy)) { 528 return false; 529 } 530 531 if(!mdpCount) 532 return false; 533 534 if(mdpCount > (sMaxPipesPerMixer - fbNeeded)) { 535 ALOGD_IF(isDebug(), "%s: Exceeds MAX_PIPES_PER_MIXER",__FUNCTION__); 536 return false; 537 } 538 539 if(!arePipesAvailable(ctx, list)) { 540 return false; 541 } 542 543 int nYuvCount = ctx->listStats[mDpy].yuvCount; 544 for(int index = 0; index < nYuvCount ; index ++) { 545 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 546 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 547 if(layer->planeAlpha < 0xFF) { 548 ALOGD_IF(isDebug(), "%s: Cannot handle YUV layer with plane alpha\ 549 in video only mode", 550 __FUNCTION__); 551 return false; 552 } 553 } 554 555 return true; 556} 557 558/* Checks for conditions where YUV layers cannot be bypassed */ 559bool MDPComp::isYUVDoable(hwc_context_t* ctx, hwc_layer_1_t* layer) { 560 if(isSkipLayer(layer)) { 561 ALOGE("%s: Unable to bypass skipped YUV", __FUNCTION__); 562 return false; 563 } 564 565 if(isSecuring(ctx, layer)) { 566 ALOGD_IF(isDebug(), "%s: MDP securing is active", __FUNCTION__); 567 return false; 568 } 569 570 if(!isValidDimension(ctx, layer)) { 571 ALOGD_IF(isDebug(), "%s: Buffer is of invalid width", 572 __FUNCTION__); 573 return false; 574 } 575 576 return true; 577} 578 579void MDPComp::batchLayers() { 580 /* Idea is to keep as many contiguous non-updating(cached) layers in FB and 581 * send rest of them through MDP. NEVER mark an updating layer for caching. 582 * But cached ones can be marked for MDP*/ 583 584 int maxBatchStart = -1; 585 int maxBatchCount = 0; 586 587 /* All or Nothing is cached. No batching needed */ 588 if(!mCurrentFrame.fbCount) { 589 mCurrentFrame.fbZ = -1; 590 return; 591 } 592 if(!mCurrentFrame.mdpCount) { 593 mCurrentFrame.fbZ = 0; 594 return; 595 } 596 597 /* Search for max number of contiguous (cached) layers */ 598 int i = 0; 599 while (i < mCurrentFrame.layerCount) { 600 int count = 0; 601 while(mCurrentFrame.isFBComposed[i] && i < mCurrentFrame.layerCount) { 602 count++; i++; 603 } 604 if(count > maxBatchCount) { 605 maxBatchCount = count; 606 maxBatchStart = i - count; 607 mCurrentFrame.fbZ = maxBatchStart; 608 } 609 if(i < mCurrentFrame.layerCount) i++; 610 } 611 612 /* reset rest of the layers for MDP comp */ 613 for(int i = 0; i < mCurrentFrame.layerCount; i++) { 614 if(i != maxBatchStart){ 615 mCurrentFrame.isFBComposed[i] = false; 616 } else { 617 i += maxBatchCount; 618 } 619 } 620 621 mCurrentFrame.fbCount = maxBatchCount; 622 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 623 mCurrentFrame.fbCount; 624 625 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 626 mCurrentFrame.fbCount); 627} 628 629void MDPComp::updateLayerCache(hwc_context_t* ctx, 630 hwc_display_contents_1_t* list) { 631 632 int numAppLayers = ctx->listStats[mDpy].numAppLayers; 633 int numCacheableLayers = 0; 634 635 for(int i = 0; i < numAppLayers; i++) { 636 if (mCachedFrame.hnd[i] == list->hwLayers[i].handle) { 637 numCacheableLayers++; 638 mCurrentFrame.isFBComposed[i] = true; 639 } else { 640 mCurrentFrame.isFBComposed[i] = false; 641 mCachedFrame.hnd[i] = list->hwLayers[i].handle; 642 } 643 } 644 645 mCurrentFrame.fbCount = numCacheableLayers; 646 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 647 mCurrentFrame.fbCount; 648 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, numCacheableLayers); 649} 650 651void MDPComp::updateYUV(hwc_context_t* ctx, hwc_display_contents_1_t* list) { 652 653 int nYuvCount = ctx->listStats[mDpy].yuvCount; 654 for(int index = 0;index < nYuvCount; index++){ 655 int nYuvIndex = ctx->listStats[mDpy].yuvIndices[index]; 656 hwc_layer_1_t* layer = &list->hwLayers[nYuvIndex]; 657 658 if(!isYUVDoable(ctx, layer)) { 659 if(!mCurrentFrame.isFBComposed[nYuvIndex]) { 660 mCurrentFrame.isFBComposed[nYuvIndex] = true; 661 mCurrentFrame.fbCount++; 662 } 663 } else { 664 if(mCurrentFrame.isFBComposed[nYuvIndex]) { 665 mCurrentFrame.isFBComposed[nYuvIndex] = false; 666 mCurrentFrame.fbCount--; 667 } 668 } 669 } 670 671 mCurrentFrame.mdpCount = mCurrentFrame.layerCount - 672 mCurrentFrame.fbCount; 673 ALOGD_IF(isDebug(),"%s: cached count: %d",__FUNCTION__, 674 mCurrentFrame.fbCount); 675} 676 677bool MDPComp::programMDP(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 678 if(!allocLayerPipes(ctx, list)) { 679 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 680 return false; 681 } 682 683 bool fbBatch = false; 684 for (int index = 0, mdpNextZOrder = 0; index < mCurrentFrame.layerCount; 685 index++) { 686 if(!mCurrentFrame.isFBComposed[index]) { 687 int mdpIndex = mCurrentFrame.layerToMDP[index]; 688 hwc_layer_1_t* layer = &list->hwLayers[index]; 689 690 MdpPipeInfo* cur_pipe = mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 691 cur_pipe->zOrder = mdpNextZOrder++; 692 693 if(configure(ctx, layer, mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 694 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 695 layer %d",__FUNCTION__, index); 696 return false; 697 } 698 } else if(fbBatch == false) { 699 mdpNextZOrder++; 700 fbBatch = true; 701 } 702 } 703 704 return true; 705} 706 707bool MDPComp::programYUV(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 708 if(!allocLayerPipes(ctx, list)) { 709 ALOGD_IF(isDebug(), "%s: Unable to allocate MDP pipes", __FUNCTION__); 710 return false; 711 } 712 //If we are in this block, it means we have yuv + rgb layers both 713 int mdpIdx = 0; 714 for (int index = 0; index < mCurrentFrame.layerCount; index++) { 715 if(!mCurrentFrame.isFBComposed[index]) { 716 hwc_layer_1_t* layer = &list->hwLayers[index]; 717 int mdpIndex = mCurrentFrame.layerToMDP[index]; 718 MdpPipeInfo* cur_pipe = 719 mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 720 cur_pipe->zOrder = mdpIdx++; 721 722 if(configure(ctx, layer, 723 mCurrentFrame.mdpToLayer[mdpIndex]) != 0 ){ 724 ALOGD_IF(isDebug(), "%s: Failed to configure overlay for \ 725 layer %d",__FUNCTION__, index); 726 return false; 727 } 728 } 729 } 730 return true; 731} 732 733int MDPComp::prepare(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 734 const int numLayers = ctx->listStats[mDpy].numAppLayers; 735 736 { //LOCK SCOPE BEGIN 737 Locker::Autolock _l(mMdpCompLock); 738 739 //reset old data 740 mCurrentFrame.reset(numLayers); 741 742 //number of app layers exceeds MAX_NUM_APP_LAYERS fall back to GPU 743 //do not cache the information for next draw cycle. 744 if(numLayers > MAX_NUM_APP_LAYERS) { 745 mCachedFrame.updateCounts(mCurrentFrame); 746 ALOGD_IF(isDebug(), "%s: Number of App layers exceeded the limit ", 747 __FUNCTION__); 748 return -1; 749 } 750 751 //Hard conditions, if not met, cannot do MDP comp 752 if(!isFrameDoable(ctx)) { 753 ALOGD_IF( isDebug(),"%s: MDP Comp not possible for this frame", 754 __FUNCTION__); 755 reset(numLayers, list); 756 return -1; 757 } 758 759 //Check whether layers marked for MDP Composition is actually doable. 760 if(isFullFrameDoable(ctx, list)){ 761 mCurrentFrame.map(); 762 //Configure framebuffer first if applicable 763 if(mCurrentFrame.fbZ >= 0) { 764 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, 765 mCurrentFrame.fbZ)) { 766 ALOGE("%s configure framebuffer failed", __func__); 767 reset(numLayers, list); 768 return -1; 769 } 770 } 771 //Acquire and Program MDP pipes 772 if(!programMDP(ctx, list)) { 773 reset(numLayers, list); 774 return -1; 775 } else { //Success 776 //Any change in composition types needs an FB refresh 777 mCurrentFrame.needsRedraw = false; 778 if(mCurrentFrame.fbCount && 779 ((mCurrentFrame.mdpCount != mCachedFrame.mdpCount) || 780 (mCurrentFrame.fbCount != mCachedFrame.cacheCount) || 781 (mCurrentFrame.fbZ != mCachedFrame.fbZ) || 782 (!mCurrentFrame.mdpCount) || 783 (list->flags & HWC_GEOMETRY_CHANGED) || 784 isSkipPresent(ctx, mDpy) || 785 (mDpy > HWC_DISPLAY_PRIMARY))) { 786 mCurrentFrame.needsRedraw = true; 787 } 788 } 789 } else if(isOnlyVideoDoable(ctx, list)) { 790 //All layers marked for MDP comp cannot be bypassed. 791 //Try to compose atleast YUV layers through MDP comp and let 792 //all the RGB layers compose in FB 793 //Destination over 794 mCurrentFrame.fbZ = -1; 795 if(mCurrentFrame.fbCount) 796 mCurrentFrame.fbZ = mCurrentFrame.mdpCount; 797 798 mCurrentFrame.map(); 799 800 //Configure framebuffer first if applicable 801 if(mCurrentFrame.fbZ >= 0) { 802 if(!ctx->mFBUpdate[mDpy]->prepare(ctx, list, mCurrentFrame.fbZ)) { 803 ALOGE("%s configure framebuffer failed", __func__); 804 reset(numLayers, list); 805 return -1; 806 } 807 } 808 if(!programYUV(ctx, list)) { 809 reset(numLayers, list); 810 return -1; 811 } 812 } else { 813 reset(numLayers, list); 814 return -1; 815 } 816 817 //UpdateLayerFlags 818 setMDPCompLayerFlags(ctx, list); 819 mCachedFrame.updateCounts(mCurrentFrame); 820 821 } //LOCK SCOPE END. dump also need this lock. 822 // unlock it before calling dump function to avoid deadlock 823 if(isDebug()) { 824 ALOGD("GEOMETRY change: %d", (list->flags & HWC_GEOMETRY_CHANGED)); 825 android::String8 sDump(""); 826 dump(sDump); 827 ALOGE("%s",sDump.string()); 828 } 829 830 return 0; 831} 832 833//=============MDPCompLowRes=================================================== 834 835/* 836 * Configures pipe(s) for MDP composition 837 */ 838int MDPCompLowRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 839 PipeLayerPair& PipeLayerPair) { 840 MdpPipeInfoLowRes& mdp_info = 841 *(static_cast<MdpPipeInfoLowRes*>(PipeLayerPair.pipeInfo)); 842 eMdpFlags mdpFlags = OV_MDP_BACKEND_COMPOSITION; 843 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 844 eIsFg isFg = IS_FG_OFF; 845 eDest dest = mdp_info.index; 846 847 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipe: %d", 848 __FUNCTION__, layer, zOrder, dest); 849 850 return configureLowRes(ctx, layer, mDpy, mdpFlags, zOrder, isFg, dest, 851 &PipeLayerPair.rot); 852} 853 854bool MDPCompLowRes::arePipesAvailable(hwc_context_t *ctx, 855 hwc_display_contents_1_t* list) { 856 overlay::Overlay& ov = *ctx->mOverlay; 857 int numPipesNeeded = mCurrentFrame.mdpCount; 858 int availPipes = ov.availablePipes(mDpy, Overlay::MIXER_DEFAULT); 859 860 //Reserve pipe for FB 861 if(mCurrentFrame.fbCount) 862 availPipes -= 1; 863 864 if(numPipesNeeded > availPipes) { 865 ALOGD_IF(isDebug(), "%s: Insufficient pipes, dpy %d needed %d, avail %d", 866 __FUNCTION__, mDpy, numPipesNeeded, availPipes); 867 return false; 868 } 869 870 return true; 871} 872 873bool MDPCompLowRes::allocLayerPipes(hwc_context_t *ctx, 874 hwc_display_contents_1_t* list) { 875 for(int index = 0; index < mCurrentFrame.layerCount; index++) { 876 877 if(mCurrentFrame.isFBComposed[index]) continue; 878 879 hwc_layer_1_t* layer = &list->hwLayers[index]; 880 private_handle_t *hnd = (private_handle_t *)layer->handle; 881 int mdpIndex = mCurrentFrame.layerToMDP[index]; 882 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 883 info.pipeInfo = new MdpPipeInfoLowRes; 884 info.rot = NULL; 885 MdpPipeInfoLowRes& pipe_info = *(MdpPipeInfoLowRes*)info.pipeInfo; 886 ePipeType type = MDPCOMP_OV_ANY; 887 888 if(isYuvBuffer(hnd)) { 889 type = MDPCOMP_OV_VG; 890 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 891 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 892 && ctx->mMDP.version >= qdutils::MDSS_V5) { 893 type = MDPCOMP_OV_DMA; 894 } 895 896 pipe_info.index = getMdpPipe(ctx, type, Overlay::MIXER_DEFAULT); 897 if(pipe_info.index == ovutils::OV_INVALID) { 898 ALOGD_IF(isDebug(), "%s: Unable to get pipe type = %d", 899 __FUNCTION__, (int) type); 900 return false; 901 } 902 } 903 return true; 904} 905 906bool MDPCompLowRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 907 908 if(!isEnabled()) { 909 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 910 return true; 911 } 912 913 if(!ctx || !list) { 914 ALOGE("%s: invalid contxt or list",__FUNCTION__); 915 return false; 916 } 917 918 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 919 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 920 return true; 921 } 922 923 Locker::Autolock _l(mMdpCompLock); 924 925 /* reset Invalidator */ 926 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 927 idleInvalidator->markForSleep(); 928 929 overlay::Overlay& ov = *ctx->mOverlay; 930 LayerProp *layerProp = ctx->layerProp[mDpy]; 931 932 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 933 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 934 { 935 if(mCurrentFrame.isFBComposed[i]) continue; 936 937 hwc_layer_1_t *layer = &list->hwLayers[i]; 938 private_handle_t *hnd = (private_handle_t *)layer->handle; 939 if(!hnd) { 940 ALOGE("%s handle null", __FUNCTION__); 941 return false; 942 } 943 944 int mdpIndex = mCurrentFrame.layerToMDP[i]; 945 946 MdpPipeInfoLowRes& pipe_info = 947 *(MdpPipeInfoLowRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 948 ovutils::eDest dest = pipe_info.index; 949 if(dest == ovutils::OV_INVALID) { 950 ALOGE("%s: Invalid pipe index (%d)", __FUNCTION__, dest); 951 return false; 952 } 953 954 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 955 continue; 956 } 957 958 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 959 using pipe: %d", __FUNCTION__, layer, 960 hnd, dest ); 961 962 int fd = hnd->fd; 963 uint32_t offset = hnd->offset; 964 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 965 if(rot) { 966 if(!rot->queueBuffer(fd, offset)) 967 return false; 968 fd = rot->getDstMemId(); 969 offset = rot->getDstOffset(); 970 } 971 972 if (!ov.queueBuffer(fd, offset, dest)) { 973 ALOGE("%s: queueBuffer failed for display:%d ", __FUNCTION__, mDpy); 974 return false; 975 } 976 977 layerProp[i].mFlags &= ~HWC_MDPCOMP; 978 } 979 return true; 980} 981 982//=============MDPCompHighRes=================================================== 983 984int MDPCompHighRes::pipesNeeded(hwc_context_t *ctx, 985 hwc_display_contents_1_t* list, 986 int mixer) { 987 int pipesNeeded = 0; 988 const int xres = ctx->dpyAttr[mDpy].xres; 989 //Default even split for all displays with high res 990 int lSplit = xres / 2; 991 if(mDpy == HWC_DISPLAY_PRIMARY && 992 qdutils::MDPVersion::getInstance().getLeftSplit()) { 993 //Override if split published by driver for primary 994 lSplit = qdutils::MDPVersion::getInstance().getLeftSplit(); 995 } 996 997 for(int i = 0; i < mCurrentFrame.layerCount; ++i) { 998 if(!mCurrentFrame.isFBComposed[i]) { 999 hwc_layer_1_t* layer = &list->hwLayers[i]; 1000 hwc_rect_t dst = layer->displayFrame; 1001 if(mixer == Overlay::MIXER_LEFT && dst.left < lSplit) { 1002 pipesNeeded++; 1003 } else if(mixer == Overlay::MIXER_RIGHT && dst.right > lSplit) { 1004 pipesNeeded++; 1005 } 1006 } 1007 } 1008 return pipesNeeded; 1009} 1010 1011bool MDPCompHighRes::arePipesAvailable(hwc_context_t *ctx, 1012 hwc_display_contents_1_t* list) { 1013 overlay::Overlay& ov = *ctx->mOverlay; 1014 1015 for(int i = 0; i < Overlay::MIXER_MAX; i++) { 1016 int numPipesNeeded = pipesNeeded(ctx, list, i); 1017 int availPipes = ov.availablePipes(mDpy, i); 1018 1019 //Reserve pipe(s)for FB 1020 if(mCurrentFrame.fbCount) 1021 availPipes -= 1; 1022 1023 if(numPipesNeeded > availPipes) { 1024 ALOGD_IF(isDebug(), "%s: Insufficient pipes for " 1025 "dpy %d mixer %d needed %d, avail %d", 1026 __FUNCTION__, mDpy, i, numPipesNeeded, availPipes); 1027 return false; 1028 } 1029 } 1030 return true; 1031} 1032 1033bool MDPCompHighRes::acquireMDPPipes(hwc_context_t *ctx, hwc_layer_1_t* layer, 1034 MdpPipeInfoHighRes& pipe_info, 1035 ePipeType type) { 1036 const int xres = ctx->dpyAttr[mDpy].xres; 1037 //Default even split for all displays with high res 1038 int lSplit = xres / 2; 1039 if(mDpy == HWC_DISPLAY_PRIMARY && 1040 qdutils::MDPVersion::getInstance().getLeftSplit()) { 1041 //Override if split published by driver for primary 1042 lSplit = qdutils::MDPVersion::getInstance().getLeftSplit(); 1043 } 1044 1045 hwc_rect_t dst = layer->displayFrame; 1046 pipe_info.lIndex = ovutils::OV_INVALID; 1047 pipe_info.rIndex = ovutils::OV_INVALID; 1048 1049 if (dst.left < lSplit) { 1050 pipe_info.lIndex = getMdpPipe(ctx, type, Overlay::MIXER_LEFT); 1051 if(pipe_info.lIndex == ovutils::OV_INVALID) 1052 return false; 1053 } 1054 1055 if(dst.right > lSplit) { 1056 pipe_info.rIndex = getMdpPipe(ctx, type, Overlay::MIXER_RIGHT); 1057 if(pipe_info.rIndex == ovutils::OV_INVALID) 1058 return false; 1059 } 1060 1061 return true; 1062} 1063 1064bool MDPCompHighRes::allocLayerPipes(hwc_context_t *ctx, 1065 hwc_display_contents_1_t* list) { 1066 for(int index = 0 ; index < mCurrentFrame.layerCount; index++) { 1067 1068 if(mCurrentFrame.isFBComposed[index]) continue; 1069 1070 hwc_layer_1_t* layer = &list->hwLayers[index]; 1071 private_handle_t *hnd = (private_handle_t *)layer->handle; 1072 int mdpIndex = mCurrentFrame.layerToMDP[index]; 1073 PipeLayerPair& info = mCurrentFrame.mdpToLayer[mdpIndex]; 1074 info.pipeInfo = new MdpPipeInfoHighRes; 1075 info.rot = NULL; 1076 MdpPipeInfoHighRes& pipe_info = *(MdpPipeInfoHighRes*)info.pipeInfo; 1077 ePipeType type = MDPCOMP_OV_ANY; 1078 1079 if(isYuvBuffer(hnd)) { 1080 type = MDPCOMP_OV_VG; 1081 } else if(!qhwc::needsScaling(ctx, layer, mDpy) 1082 && Overlay::getDMAMode() != Overlay::DMA_BLOCK_MODE 1083 && ctx->mMDP.version >= qdutils::MDSS_V5) { 1084 type = MDPCOMP_OV_DMA; 1085 } 1086 1087 if(!acquireMDPPipes(ctx, layer, pipe_info, type)) { 1088 ALOGD_IF(isDebug(), "%s: Unable to get pipe for type = %d", 1089 __FUNCTION__, (int) type); 1090 return false; 1091 } 1092 } 1093 return true; 1094} 1095 1096/* 1097 * Configures pipe(s) for MDP composition 1098 */ 1099int MDPCompHighRes::configure(hwc_context_t *ctx, hwc_layer_1_t *layer, 1100 PipeLayerPair& PipeLayerPair) { 1101 MdpPipeInfoHighRes& mdp_info = 1102 *(static_cast<MdpPipeInfoHighRes*>(PipeLayerPair.pipeInfo)); 1103 eZorder zOrder = static_cast<eZorder>(mdp_info.zOrder); 1104 eIsFg isFg = IS_FG_OFF; 1105 eMdpFlags mdpFlagsL = OV_MDP_BACKEND_COMPOSITION; 1106 eDest lDest = mdp_info.lIndex; 1107 eDest rDest = mdp_info.rIndex; 1108 1109 ALOGD_IF(isDebug(),"%s: configuring: layer: %p z_order: %d dest_pipeL: %d" 1110 "dest_pipeR: %d",__FUNCTION__, layer, zOrder, lDest, rDest); 1111 1112 return configureHighRes(ctx, layer, mDpy, mdpFlagsL, zOrder, isFg, lDest, 1113 rDest, &PipeLayerPair.rot); 1114} 1115 1116bool MDPCompHighRes::draw(hwc_context_t *ctx, hwc_display_contents_1_t* list) { 1117 1118 if(!isEnabled()) { 1119 ALOGD_IF(isDebug(),"%s: MDP Comp not configured", __FUNCTION__); 1120 return true; 1121 } 1122 1123 if(!ctx || !list) { 1124 ALOGE("%s: invalid contxt or list",__FUNCTION__); 1125 return false; 1126 } 1127 1128 if(ctx->listStats[mDpy].numAppLayers > MAX_NUM_APP_LAYERS) { 1129 ALOGD_IF(isDebug(),"%s: Exceeding max layer count", __FUNCTION__); 1130 return true; 1131 } 1132 1133 Locker::Autolock _l(mMdpCompLock); 1134 1135 /* reset Invalidator */ 1136 if(idleInvalidator && !sIdleFallBack && mCurrentFrame.mdpCount) 1137 idleInvalidator->markForSleep(); 1138 1139 overlay::Overlay& ov = *ctx->mOverlay; 1140 LayerProp *layerProp = ctx->layerProp[mDpy]; 1141 1142 int numHwLayers = ctx->listStats[mDpy].numAppLayers; 1143 for(int i = 0; i < numHwLayers && mCurrentFrame.mdpCount; i++ ) 1144 { 1145 if(mCurrentFrame.isFBComposed[i]) continue; 1146 1147 hwc_layer_1_t *layer = &list->hwLayers[i]; 1148 private_handle_t *hnd = (private_handle_t *)layer->handle; 1149 if(!hnd) { 1150 ALOGE("%s handle null", __FUNCTION__); 1151 return false; 1152 } 1153 1154 if(!(layerProp[i].mFlags & HWC_MDPCOMP)) { 1155 continue; 1156 } 1157 1158 int mdpIndex = mCurrentFrame.layerToMDP[i]; 1159 1160 MdpPipeInfoHighRes& pipe_info = 1161 *(MdpPipeInfoHighRes*)mCurrentFrame.mdpToLayer[mdpIndex].pipeInfo; 1162 Rotator *rot = mCurrentFrame.mdpToLayer[mdpIndex].rot; 1163 1164 ovutils::eDest indexL = pipe_info.lIndex; 1165 ovutils::eDest indexR = pipe_info.rIndex; 1166 1167 int fd = hnd->fd; 1168 int offset = hnd->offset; 1169 1170 if(rot) { 1171 rot->queueBuffer(fd, offset); 1172 fd = rot->getDstMemId(); 1173 offset = rot->getDstOffset(); 1174 } 1175 1176 //************* play left mixer ********** 1177 if(indexL != ovutils::OV_INVALID) { 1178 ovutils::eDest destL = (ovutils::eDest)indexL; 1179 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1180 using pipe: %d", __FUNCTION__, layer, hnd, indexL ); 1181 if (!ov.queueBuffer(fd, offset, destL)) { 1182 ALOGE("%s: queueBuffer failed for left mixer", __FUNCTION__); 1183 return false; 1184 } 1185 } 1186 1187 //************* play right mixer ********** 1188 if(indexR != ovutils::OV_INVALID) { 1189 ovutils::eDest destR = (ovutils::eDest)indexR; 1190 ALOGD_IF(isDebug(),"%s: MDP Comp: Drawing layer: %p hnd: %p \ 1191 using pipe: %d", __FUNCTION__, layer, hnd, indexR ); 1192 if (!ov.queueBuffer(fd, offset, destR)) { 1193 ALOGE("%s: queueBuffer failed for right mixer", __FUNCTION__); 1194 return false; 1195 } 1196 } 1197 1198 layerProp[i].mFlags &= ~HWC_MDPCOMP; 1199 } 1200 1201 return true; 1202} 1203}; //namespace 1204 1205