intel_dp.c revision 1f70385510991992f3aef339982ca790faa52b06
1/* 2 * Copyright © 2008 Intel Corporation 3 * 4 * Permission is hereby granted, free of charge, to any person obtaining a 5 * copy of this software and associated documentation files (the "Software"), 6 * to deal in the Software without restriction, including without limitation 7 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8 * and/or sell copies of the Software, and to permit persons to whom the 9 * Software is furnished to do so, subject to the following conditions: 10 * 11 * The above copyright notice and this permission notice (including the next 12 * paragraph) shall be included in all copies or substantial portions of the 13 * Software. 14 * 15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING 20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS 21 * IN THE SOFTWARE. 22 * 23 * Authors: 24 * Keith Packard <keithp@keithp.com> 25 * 26 */ 27 28#include <linux/i2c.h> 29#include <linux/slab.h> 30#include <linux/export.h> 31#include "drmP.h" 32#include "drm.h" 33#include "drm_crtc.h" 34#include "drm_crtc_helper.h" 35#include "drm_edid.h" 36#include "intel_drv.h" 37#include "i915_drm.h" 38#include "i915_drv.h" 39 40#define DP_LINK_STATUS_SIZE 6 41#define DP_LINK_CHECK_TIMEOUT (10 * 1000) 42 43/** 44 * is_edp - is the given port attached to an eDP panel (either CPU or PCH) 45 * @intel_dp: DP struct 46 * 47 * If a CPU or PCH DP output is attached to an eDP panel, this function 48 * will return true, and false otherwise. 49 */ 50static bool is_edp(struct intel_dp *intel_dp) 51{ 52 return intel_dp->base.type == INTEL_OUTPUT_EDP; 53} 54 55/** 56 * is_pch_edp - is the port on the PCH and attached to an eDP panel? 57 * @intel_dp: DP struct 58 * 59 * Returns true if the given DP struct corresponds to a PCH DP port attached 60 * to an eDP panel, false otherwise. Helpful for determining whether we 61 * may need FDI resources for a given DP output or not. 62 */ 63static bool is_pch_edp(struct intel_dp *intel_dp) 64{ 65 return intel_dp->is_pch_edp; 66} 67 68/** 69 * is_cpu_edp - is the port on the CPU and attached to an eDP panel? 70 * @intel_dp: DP struct 71 * 72 * Returns true if the given DP struct corresponds to a CPU eDP port. 73 */ 74static bool is_cpu_edp(struct intel_dp *intel_dp) 75{ 76 return is_edp(intel_dp) && !is_pch_edp(intel_dp); 77} 78 79static struct intel_dp *enc_to_intel_dp(struct drm_encoder *encoder) 80{ 81 return container_of(encoder, struct intel_dp, base.base); 82} 83 84static struct intel_dp *intel_attached_dp(struct drm_connector *connector) 85{ 86 return container_of(intel_attached_encoder(connector), 87 struct intel_dp, base); 88} 89 90/** 91 * intel_encoder_is_pch_edp - is the given encoder a PCH attached eDP? 92 * @encoder: DRM encoder 93 * 94 * Return true if @encoder corresponds to a PCH attached eDP panel. Needed 95 * by intel_display.c. 96 */ 97bool intel_encoder_is_pch_edp(struct drm_encoder *encoder) 98{ 99 struct intel_dp *intel_dp; 100 101 if (!encoder) 102 return false; 103 104 intel_dp = enc_to_intel_dp(encoder); 105 106 return is_pch_edp(intel_dp); 107} 108 109static void intel_dp_start_link_train(struct intel_dp *intel_dp); 110static void intel_dp_complete_link_train(struct intel_dp *intel_dp); 111static void intel_dp_link_down(struct intel_dp *intel_dp); 112 113void 114intel_edp_link_config(struct intel_encoder *intel_encoder, 115 int *lane_num, int *link_bw) 116{ 117 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 118 119 *lane_num = intel_dp->lane_count; 120 if (intel_dp->link_bw == DP_LINK_BW_1_62) 121 *link_bw = 162000; 122 else if (intel_dp->link_bw == DP_LINK_BW_2_7) 123 *link_bw = 270000; 124} 125 126int 127intel_edp_target_clock(struct intel_encoder *intel_encoder, 128 struct drm_display_mode *mode) 129{ 130 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 131 132 if (intel_dp->panel_fixed_mode) 133 return intel_dp->panel_fixed_mode->clock; 134 else 135 return mode->clock; 136} 137 138static int 139intel_dp_max_lane_count(struct intel_dp *intel_dp) 140{ 141 int max_lane_count = intel_dp->dpcd[DP_MAX_LANE_COUNT] & 0x1f; 142 switch (max_lane_count) { 143 case 1: case 2: case 4: 144 break; 145 default: 146 max_lane_count = 4; 147 } 148 return max_lane_count; 149} 150 151static int 152intel_dp_max_link_bw(struct intel_dp *intel_dp) 153{ 154 int max_link_bw = intel_dp->dpcd[DP_MAX_LINK_RATE]; 155 156 switch (max_link_bw) { 157 case DP_LINK_BW_1_62: 158 case DP_LINK_BW_2_7: 159 break; 160 default: 161 max_link_bw = DP_LINK_BW_1_62; 162 break; 163 } 164 return max_link_bw; 165} 166 167static int 168intel_dp_link_clock(uint8_t link_bw) 169{ 170 if (link_bw == DP_LINK_BW_2_7) 171 return 270000; 172 else 173 return 162000; 174} 175 176/* 177 * The units on the numbers in the next two are... bizarre. Examples will 178 * make it clearer; this one parallels an example in the eDP spec. 179 * 180 * intel_dp_max_data_rate for one lane of 2.7GHz evaluates as: 181 * 182 * 270000 * 1 * 8 / 10 == 216000 183 * 184 * The actual data capacity of that configuration is 2.16Gbit/s, so the 185 * units are decakilobits. ->clock in a drm_display_mode is in kilohertz - 186 * or equivalently, kilopixels per second - so for 1680x1050R it'd be 187 * 119000. At 18bpp that's 2142000 kilobits per second. 188 * 189 * Thus the strange-looking division by 10 in intel_dp_link_required, to 190 * get the result in decakilobits instead of kilobits. 191 */ 192 193static int 194intel_dp_link_required(int pixel_clock, int bpp) 195{ 196 return (pixel_clock * bpp + 9) / 10; 197} 198 199static int 200intel_dp_max_data_rate(int max_link_clock, int max_lanes) 201{ 202 return (max_link_clock * max_lanes * 8) / 10; 203} 204 205static bool 206intel_dp_adjust_dithering(struct intel_dp *intel_dp, 207 struct drm_display_mode *mode, 208 bool adjust_mode) 209{ 210 int max_link_clock = intel_dp_link_clock(intel_dp_max_link_bw(intel_dp)); 211 int max_lanes = intel_dp_max_lane_count(intel_dp); 212 int max_rate, mode_rate; 213 214 mode_rate = intel_dp_link_required(mode->clock, 24); 215 max_rate = intel_dp_max_data_rate(max_link_clock, max_lanes); 216 217 if (mode_rate > max_rate) { 218 mode_rate = intel_dp_link_required(mode->clock, 18); 219 if (mode_rate > max_rate) 220 return false; 221 222 if (adjust_mode) 223 mode->private_flags 224 |= INTEL_MODE_DP_FORCE_6BPC; 225 226 return true; 227 } 228 229 return true; 230} 231 232static int 233intel_dp_mode_valid(struct drm_connector *connector, 234 struct drm_display_mode *mode) 235{ 236 struct intel_dp *intel_dp = intel_attached_dp(connector); 237 238 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 239 if (mode->hdisplay > intel_dp->panel_fixed_mode->hdisplay) 240 return MODE_PANEL; 241 242 if (mode->vdisplay > intel_dp->panel_fixed_mode->vdisplay) 243 return MODE_PANEL; 244 } 245 246 if (!intel_dp_adjust_dithering(intel_dp, mode, false)) 247 return MODE_CLOCK_HIGH; 248 249 if (mode->clock < 10000) 250 return MODE_CLOCK_LOW; 251 252 if (mode->flags & DRM_MODE_FLAG_DBLCLK) 253 return MODE_H_ILLEGAL; 254 255 return MODE_OK; 256} 257 258static uint32_t 259pack_aux(uint8_t *src, int src_bytes) 260{ 261 int i; 262 uint32_t v = 0; 263 264 if (src_bytes > 4) 265 src_bytes = 4; 266 for (i = 0; i < src_bytes; i++) 267 v |= ((uint32_t) src[i]) << ((3-i) * 8); 268 return v; 269} 270 271static void 272unpack_aux(uint32_t src, uint8_t *dst, int dst_bytes) 273{ 274 int i; 275 if (dst_bytes > 4) 276 dst_bytes = 4; 277 for (i = 0; i < dst_bytes; i++) 278 dst[i] = src >> ((3-i) * 8); 279} 280 281/* hrawclock is 1/4 the FSB frequency */ 282static int 283intel_hrawclk(struct drm_device *dev) 284{ 285 struct drm_i915_private *dev_priv = dev->dev_private; 286 uint32_t clkcfg; 287 288 clkcfg = I915_READ(CLKCFG); 289 switch (clkcfg & CLKCFG_FSB_MASK) { 290 case CLKCFG_FSB_400: 291 return 100; 292 case CLKCFG_FSB_533: 293 return 133; 294 case CLKCFG_FSB_667: 295 return 166; 296 case CLKCFG_FSB_800: 297 return 200; 298 case CLKCFG_FSB_1067: 299 return 266; 300 case CLKCFG_FSB_1333: 301 return 333; 302 /* these two are just a guess; one of them might be right */ 303 case CLKCFG_FSB_1600: 304 case CLKCFG_FSB_1600_ALT: 305 return 400; 306 default: 307 return 133; 308 } 309} 310 311static bool ironlake_edp_have_panel_power(struct intel_dp *intel_dp) 312{ 313 struct drm_device *dev = intel_dp->base.base.dev; 314 struct drm_i915_private *dev_priv = dev->dev_private; 315 316 return (I915_READ(PCH_PP_STATUS) & PP_ON) != 0; 317} 318 319static bool ironlake_edp_have_panel_vdd(struct intel_dp *intel_dp) 320{ 321 struct drm_device *dev = intel_dp->base.base.dev; 322 struct drm_i915_private *dev_priv = dev->dev_private; 323 324 return (I915_READ(PCH_PP_CONTROL) & EDP_FORCE_VDD) != 0; 325} 326 327static void 328intel_dp_check_edp(struct intel_dp *intel_dp) 329{ 330 struct drm_device *dev = intel_dp->base.base.dev; 331 struct drm_i915_private *dev_priv = dev->dev_private; 332 333 if (!is_edp(intel_dp)) 334 return; 335 if (!ironlake_edp_have_panel_power(intel_dp) && !ironlake_edp_have_panel_vdd(intel_dp)) { 336 WARN(1, "eDP powered off while attempting aux channel communication.\n"); 337 DRM_DEBUG_KMS("Status 0x%08x Control 0x%08x\n", 338 I915_READ(PCH_PP_STATUS), 339 I915_READ(PCH_PP_CONTROL)); 340 } 341} 342 343static int 344intel_dp_aux_ch(struct intel_dp *intel_dp, 345 uint8_t *send, int send_bytes, 346 uint8_t *recv, int recv_size) 347{ 348 uint32_t output_reg = intel_dp->output_reg; 349 struct drm_device *dev = intel_dp->base.base.dev; 350 struct drm_i915_private *dev_priv = dev->dev_private; 351 uint32_t ch_ctl = output_reg + 0x10; 352 uint32_t ch_data = ch_ctl + 4; 353 int i; 354 int recv_bytes; 355 uint32_t status; 356 uint32_t aux_clock_divider; 357 int try, precharge; 358 359 intel_dp_check_edp(intel_dp); 360 /* The clock divider is based off the hrawclk, 361 * and would like to run at 2MHz. So, take the 362 * hrawclk value and divide by 2 and use that 363 * 364 * Note that PCH attached eDP panels should use a 125MHz input 365 * clock divider. 366 */ 367 if (is_cpu_edp(intel_dp)) { 368 if (IS_GEN6(dev) || IS_GEN7(dev)) 369 aux_clock_divider = 200; /* SNB & IVB eDP input clock at 400Mhz */ 370 else 371 aux_clock_divider = 225; /* eDP input clock at 450Mhz */ 372 } else if (HAS_PCH_SPLIT(dev)) 373 aux_clock_divider = 63; /* IRL input clock fixed at 125Mhz */ 374 else 375 aux_clock_divider = intel_hrawclk(dev) / 2; 376 377 if (IS_GEN6(dev)) 378 precharge = 3; 379 else 380 precharge = 5; 381 382 /* Try to wait for any previous AUX channel activity */ 383 for (try = 0; try < 3; try++) { 384 status = I915_READ(ch_ctl); 385 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 386 break; 387 msleep(1); 388 } 389 390 if (try == 3) { 391 WARN(1, "dp_aux_ch not started status 0x%08x\n", 392 I915_READ(ch_ctl)); 393 return -EBUSY; 394 } 395 396 /* Must try at least 3 times according to DP spec */ 397 for (try = 0; try < 5; try++) { 398 /* Load the send data into the aux channel data registers */ 399 for (i = 0; i < send_bytes; i += 4) 400 I915_WRITE(ch_data + i, 401 pack_aux(send + i, send_bytes - i)); 402 403 /* Send the command and wait for it to complete */ 404 I915_WRITE(ch_ctl, 405 DP_AUX_CH_CTL_SEND_BUSY | 406 DP_AUX_CH_CTL_TIME_OUT_400us | 407 (send_bytes << DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT) | 408 (precharge << DP_AUX_CH_CTL_PRECHARGE_2US_SHIFT) | 409 (aux_clock_divider << DP_AUX_CH_CTL_BIT_CLOCK_2X_SHIFT) | 410 DP_AUX_CH_CTL_DONE | 411 DP_AUX_CH_CTL_TIME_OUT_ERROR | 412 DP_AUX_CH_CTL_RECEIVE_ERROR); 413 for (;;) { 414 status = I915_READ(ch_ctl); 415 if ((status & DP_AUX_CH_CTL_SEND_BUSY) == 0) 416 break; 417 udelay(100); 418 } 419 420 /* Clear done status and any errors */ 421 I915_WRITE(ch_ctl, 422 status | 423 DP_AUX_CH_CTL_DONE | 424 DP_AUX_CH_CTL_TIME_OUT_ERROR | 425 DP_AUX_CH_CTL_RECEIVE_ERROR); 426 427 if (status & (DP_AUX_CH_CTL_TIME_OUT_ERROR | 428 DP_AUX_CH_CTL_RECEIVE_ERROR)) 429 continue; 430 if (status & DP_AUX_CH_CTL_DONE) 431 break; 432 } 433 434 if ((status & DP_AUX_CH_CTL_DONE) == 0) { 435 DRM_ERROR("dp_aux_ch not done status 0x%08x\n", status); 436 return -EBUSY; 437 } 438 439 /* Check for timeout or receive error. 440 * Timeouts occur when the sink is not connected 441 */ 442 if (status & DP_AUX_CH_CTL_RECEIVE_ERROR) { 443 DRM_ERROR("dp_aux_ch receive error status 0x%08x\n", status); 444 return -EIO; 445 } 446 447 /* Timeouts occur when the device isn't connected, so they're 448 * "normal" -- don't fill the kernel log with these */ 449 if (status & DP_AUX_CH_CTL_TIME_OUT_ERROR) { 450 DRM_DEBUG_KMS("dp_aux_ch timeout status 0x%08x\n", status); 451 return -ETIMEDOUT; 452 } 453 454 /* Unload any bytes sent back from the other side */ 455 recv_bytes = ((status & DP_AUX_CH_CTL_MESSAGE_SIZE_MASK) >> 456 DP_AUX_CH_CTL_MESSAGE_SIZE_SHIFT); 457 if (recv_bytes > recv_size) 458 recv_bytes = recv_size; 459 460 for (i = 0; i < recv_bytes; i += 4) 461 unpack_aux(I915_READ(ch_data + i), 462 recv + i, recv_bytes - i); 463 464 return recv_bytes; 465} 466 467/* Write data to the aux channel in native mode */ 468static int 469intel_dp_aux_native_write(struct intel_dp *intel_dp, 470 uint16_t address, uint8_t *send, int send_bytes) 471{ 472 int ret; 473 uint8_t msg[20]; 474 int msg_bytes; 475 uint8_t ack; 476 477 intel_dp_check_edp(intel_dp); 478 if (send_bytes > 16) 479 return -1; 480 msg[0] = AUX_NATIVE_WRITE << 4; 481 msg[1] = address >> 8; 482 msg[2] = address & 0xff; 483 msg[3] = send_bytes - 1; 484 memcpy(&msg[4], send, send_bytes); 485 msg_bytes = send_bytes + 4; 486 for (;;) { 487 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, &ack, 1); 488 if (ret < 0) 489 return ret; 490 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) 491 break; 492 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 493 udelay(100); 494 else 495 return -EIO; 496 } 497 return send_bytes; 498} 499 500/* Write a single byte to the aux channel in native mode */ 501static int 502intel_dp_aux_native_write_1(struct intel_dp *intel_dp, 503 uint16_t address, uint8_t byte) 504{ 505 return intel_dp_aux_native_write(intel_dp, address, &byte, 1); 506} 507 508/* read bytes from a native aux channel */ 509static int 510intel_dp_aux_native_read(struct intel_dp *intel_dp, 511 uint16_t address, uint8_t *recv, int recv_bytes) 512{ 513 uint8_t msg[4]; 514 int msg_bytes; 515 uint8_t reply[20]; 516 int reply_bytes; 517 uint8_t ack; 518 int ret; 519 520 intel_dp_check_edp(intel_dp); 521 msg[0] = AUX_NATIVE_READ << 4; 522 msg[1] = address >> 8; 523 msg[2] = address & 0xff; 524 msg[3] = recv_bytes - 1; 525 526 msg_bytes = 4; 527 reply_bytes = recv_bytes + 1; 528 529 for (;;) { 530 ret = intel_dp_aux_ch(intel_dp, msg, msg_bytes, 531 reply, reply_bytes); 532 if (ret == 0) 533 return -EPROTO; 534 if (ret < 0) 535 return ret; 536 ack = reply[0]; 537 if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_ACK) { 538 memcpy(recv, reply + 1, ret - 1); 539 return ret - 1; 540 } 541 else if ((ack & AUX_NATIVE_REPLY_MASK) == AUX_NATIVE_REPLY_DEFER) 542 udelay(100); 543 else 544 return -EIO; 545 } 546} 547 548static int 549intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, 550 uint8_t write_byte, uint8_t *read_byte) 551{ 552 struct i2c_algo_dp_aux_data *algo_data = adapter->algo_data; 553 struct intel_dp *intel_dp = container_of(adapter, 554 struct intel_dp, 555 adapter); 556 uint16_t address = algo_data->address; 557 uint8_t msg[5]; 558 uint8_t reply[2]; 559 unsigned retry; 560 int msg_bytes; 561 int reply_bytes; 562 int ret; 563 564 intel_dp_check_edp(intel_dp); 565 /* Set up the command byte */ 566 if (mode & MODE_I2C_READ) 567 msg[0] = AUX_I2C_READ << 4; 568 else 569 msg[0] = AUX_I2C_WRITE << 4; 570 571 if (!(mode & MODE_I2C_STOP)) 572 msg[0] |= AUX_I2C_MOT << 4; 573 574 msg[1] = address >> 8; 575 msg[2] = address; 576 577 switch (mode) { 578 case MODE_I2C_WRITE: 579 msg[3] = 0; 580 msg[4] = write_byte; 581 msg_bytes = 5; 582 reply_bytes = 1; 583 break; 584 case MODE_I2C_READ: 585 msg[3] = 0; 586 msg_bytes = 4; 587 reply_bytes = 2; 588 break; 589 default: 590 msg_bytes = 3; 591 reply_bytes = 1; 592 break; 593 } 594 595 for (retry = 0; retry < 5; retry++) { 596 ret = intel_dp_aux_ch(intel_dp, 597 msg, msg_bytes, 598 reply, reply_bytes); 599 if (ret < 0) { 600 DRM_DEBUG_KMS("aux_ch failed %d\n", ret); 601 return ret; 602 } 603 604 switch (reply[0] & AUX_NATIVE_REPLY_MASK) { 605 case AUX_NATIVE_REPLY_ACK: 606 /* I2C-over-AUX Reply field is only valid 607 * when paired with AUX ACK. 608 */ 609 break; 610 case AUX_NATIVE_REPLY_NACK: 611 DRM_DEBUG_KMS("aux_ch native nack\n"); 612 return -EREMOTEIO; 613 case AUX_NATIVE_REPLY_DEFER: 614 udelay(100); 615 continue; 616 default: 617 DRM_ERROR("aux_ch invalid native reply 0x%02x\n", 618 reply[0]); 619 return -EREMOTEIO; 620 } 621 622 switch (reply[0] & AUX_I2C_REPLY_MASK) { 623 case AUX_I2C_REPLY_ACK: 624 if (mode == MODE_I2C_READ) { 625 *read_byte = reply[1]; 626 } 627 return reply_bytes - 1; 628 case AUX_I2C_REPLY_NACK: 629 DRM_DEBUG_KMS("aux_i2c nack\n"); 630 return -EREMOTEIO; 631 case AUX_I2C_REPLY_DEFER: 632 DRM_DEBUG_KMS("aux_i2c defer\n"); 633 udelay(100); 634 break; 635 default: 636 DRM_ERROR("aux_i2c invalid reply 0x%02x\n", reply[0]); 637 return -EREMOTEIO; 638 } 639 } 640 641 DRM_ERROR("too many retries, giving up\n"); 642 return -EREMOTEIO; 643} 644 645static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp); 646static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync); 647 648static int 649intel_dp_i2c_init(struct intel_dp *intel_dp, 650 struct intel_connector *intel_connector, const char *name) 651{ 652 int ret; 653 654 DRM_DEBUG_KMS("i2c_init %s\n", name); 655 intel_dp->algo.running = false; 656 intel_dp->algo.address = 0; 657 intel_dp->algo.aux_ch = intel_dp_i2c_aux_ch; 658 659 memset(&intel_dp->adapter, '\0', sizeof(intel_dp->adapter)); 660 intel_dp->adapter.owner = THIS_MODULE; 661 intel_dp->adapter.class = I2C_CLASS_DDC; 662 strncpy(intel_dp->adapter.name, name, sizeof(intel_dp->adapter.name) - 1); 663 intel_dp->adapter.name[sizeof(intel_dp->adapter.name) - 1] = '\0'; 664 intel_dp->adapter.algo_data = &intel_dp->algo; 665 intel_dp->adapter.dev.parent = &intel_connector->base.kdev; 666 667 ironlake_edp_panel_vdd_on(intel_dp); 668 ret = i2c_dp_aux_add_bus(&intel_dp->adapter); 669 ironlake_edp_panel_vdd_off(intel_dp, false); 670 return ret; 671} 672 673static bool 674intel_dp_mode_fixup(struct drm_encoder *encoder, 675 const struct drm_display_mode *mode, 676 struct drm_display_mode *adjusted_mode) 677{ 678 struct drm_device *dev = encoder->dev; 679 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 680 int lane_count, clock; 681 int max_lane_count = intel_dp_max_lane_count(intel_dp); 682 int max_clock = intel_dp_max_link_bw(intel_dp) == DP_LINK_BW_2_7 ? 1 : 0; 683 int bpp, mode_rate; 684 static int bws[2] = { DP_LINK_BW_1_62, DP_LINK_BW_2_7 }; 685 686 if (is_edp(intel_dp) && intel_dp->panel_fixed_mode) { 687 intel_fixed_panel_mode(intel_dp->panel_fixed_mode, adjusted_mode); 688 intel_pch_panel_fitting(dev, DRM_MODE_SCALE_FULLSCREEN, 689 mode, adjusted_mode); 690 } 691 692 if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) 693 return false; 694 695 DRM_DEBUG_KMS("DP link computation with max lane count %i " 696 "max bw %02x pixel clock %iKHz\n", 697 max_lane_count, bws[max_clock], adjusted_mode->clock); 698 699 if (!intel_dp_adjust_dithering(intel_dp, adjusted_mode, true)) 700 return false; 701 702 bpp = adjusted_mode->private_flags & INTEL_MODE_DP_FORCE_6BPC ? 18 : 24; 703 mode_rate = intel_dp_link_required(adjusted_mode->clock, bpp); 704 705 for (clock = 0; clock <= max_clock; clock++) { 706 for (lane_count = 1; lane_count <= max_lane_count; lane_count <<= 1) { 707 int link_avail = intel_dp_max_data_rate(intel_dp_link_clock(bws[clock]), lane_count); 708 709 if (mode_rate <= link_avail) { 710 intel_dp->link_bw = bws[clock]; 711 intel_dp->lane_count = lane_count; 712 adjusted_mode->clock = intel_dp_link_clock(intel_dp->link_bw); 713 DRM_DEBUG_KMS("DP link bw %02x lane " 714 "count %d clock %d bpp %d\n", 715 intel_dp->link_bw, intel_dp->lane_count, 716 adjusted_mode->clock, bpp); 717 DRM_DEBUG_KMS("DP link bw required %i available %i\n", 718 mode_rate, link_avail); 719 return true; 720 } 721 } 722 } 723 724 return false; 725} 726 727struct intel_dp_m_n { 728 uint32_t tu; 729 uint32_t gmch_m; 730 uint32_t gmch_n; 731 uint32_t link_m; 732 uint32_t link_n; 733}; 734 735static void 736intel_reduce_ratio(uint32_t *num, uint32_t *den) 737{ 738 while (*num > 0xffffff || *den > 0xffffff) { 739 *num >>= 1; 740 *den >>= 1; 741 } 742} 743 744static void 745intel_dp_compute_m_n(int bpp, 746 int nlanes, 747 int pixel_clock, 748 int link_clock, 749 struct intel_dp_m_n *m_n) 750{ 751 m_n->tu = 64; 752 m_n->gmch_m = (pixel_clock * bpp) >> 3; 753 m_n->gmch_n = link_clock * nlanes; 754 intel_reduce_ratio(&m_n->gmch_m, &m_n->gmch_n); 755 m_n->link_m = pixel_clock; 756 m_n->link_n = link_clock; 757 intel_reduce_ratio(&m_n->link_m, &m_n->link_n); 758} 759 760void 761intel_dp_set_m_n(struct drm_crtc *crtc, struct drm_display_mode *mode, 762 struct drm_display_mode *adjusted_mode) 763{ 764 struct drm_device *dev = crtc->dev; 765 struct intel_encoder *encoder; 766 struct drm_i915_private *dev_priv = dev->dev_private; 767 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 768 int lane_count = 4; 769 struct intel_dp_m_n m_n; 770 int pipe = intel_crtc->pipe; 771 772 /* 773 * Find the lane count in the intel_encoder private 774 */ 775 for_each_encoder_on_crtc(dev, crtc, encoder) { 776 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 777 778 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 779 intel_dp->base.type == INTEL_OUTPUT_EDP) 780 { 781 lane_count = intel_dp->lane_count; 782 break; 783 } 784 } 785 786 /* 787 * Compute the GMCH and Link ratios. The '3' here is 788 * the number of bytes_per_pixel post-LUT, which we always 789 * set up for 8-bits of R/G/B, or 3 bytes total. 790 */ 791 intel_dp_compute_m_n(intel_crtc->bpp, lane_count, 792 mode->clock, adjusted_mode->clock, &m_n); 793 794 if (HAS_PCH_SPLIT(dev)) { 795 I915_WRITE(TRANSDATA_M1(pipe), 796 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 797 m_n.gmch_m); 798 I915_WRITE(TRANSDATA_N1(pipe), m_n.gmch_n); 799 I915_WRITE(TRANSDPLINK_M1(pipe), m_n.link_m); 800 I915_WRITE(TRANSDPLINK_N1(pipe), m_n.link_n); 801 } else { 802 I915_WRITE(PIPE_GMCH_DATA_M(pipe), 803 ((m_n.tu - 1) << PIPE_GMCH_DATA_M_TU_SIZE_SHIFT) | 804 m_n.gmch_m); 805 I915_WRITE(PIPE_GMCH_DATA_N(pipe), m_n.gmch_n); 806 I915_WRITE(PIPE_DP_LINK_M(pipe), m_n.link_m); 807 I915_WRITE(PIPE_DP_LINK_N(pipe), m_n.link_n); 808 } 809} 810 811static void ironlake_edp_pll_on(struct drm_encoder *encoder); 812static void ironlake_edp_pll_off(struct drm_encoder *encoder); 813 814static void 815intel_dp_mode_set(struct drm_encoder *encoder, struct drm_display_mode *mode, 816 struct drm_display_mode *adjusted_mode) 817{ 818 struct drm_device *dev = encoder->dev; 819 struct drm_i915_private *dev_priv = dev->dev_private; 820 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 821 struct drm_crtc *crtc = intel_dp->base.base.crtc; 822 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 823 824 /* Turn on the eDP PLL if needed */ 825 if (is_edp(intel_dp)) { 826 if (!is_pch_edp(intel_dp)) 827 ironlake_edp_pll_on(encoder); 828 else 829 ironlake_edp_pll_off(encoder); 830 } 831 832 /* 833 * There are four kinds of DP registers: 834 * 835 * IBX PCH 836 * SNB CPU 837 * IVB CPU 838 * CPT PCH 839 * 840 * IBX PCH and CPU are the same for almost everything, 841 * except that the CPU DP PLL is configured in this 842 * register 843 * 844 * CPT PCH is quite different, having many bits moved 845 * to the TRANS_DP_CTL register instead. That 846 * configuration happens (oddly) in ironlake_pch_enable 847 */ 848 849 /* Preserve the BIOS-computed detected bit. This is 850 * supposed to be read-only. 851 */ 852 intel_dp->DP = I915_READ(intel_dp->output_reg) & DP_DETECTED; 853 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 854 855 /* Handle DP bits in common between all three register formats */ 856 857 intel_dp->DP |= DP_VOLTAGE_0_4 | DP_PRE_EMPHASIS_0; 858 859 switch (intel_dp->lane_count) { 860 case 1: 861 intel_dp->DP |= DP_PORT_WIDTH_1; 862 break; 863 case 2: 864 intel_dp->DP |= DP_PORT_WIDTH_2; 865 break; 866 case 4: 867 intel_dp->DP |= DP_PORT_WIDTH_4; 868 break; 869 } 870 if (intel_dp->has_audio) { 871 DRM_DEBUG_DRIVER("Enabling DP audio on pipe %c\n", 872 pipe_name(intel_crtc->pipe)); 873 intel_dp->DP |= DP_AUDIO_OUTPUT_ENABLE; 874 intel_write_eld(encoder, adjusted_mode); 875 } 876 memset(intel_dp->link_configuration, 0, DP_LINK_CONFIGURATION_SIZE); 877 intel_dp->link_configuration[0] = intel_dp->link_bw; 878 intel_dp->link_configuration[1] = intel_dp->lane_count; 879 intel_dp->link_configuration[8] = DP_SET_ANSI_8B10B; 880 /* 881 * Check for DPCD version > 1.1 and enhanced framing support 882 */ 883 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 884 (intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_ENHANCED_FRAME_CAP)) { 885 intel_dp->link_configuration[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; 886 } 887 888 /* Split out the IBX/CPU vs CPT settings */ 889 890 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 891 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 892 intel_dp->DP |= DP_SYNC_HS_HIGH; 893 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 894 intel_dp->DP |= DP_SYNC_VS_HIGH; 895 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 896 897 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 898 intel_dp->DP |= DP_ENHANCED_FRAMING; 899 900 intel_dp->DP |= intel_crtc->pipe << 29; 901 902 /* don't miss out required setting for eDP */ 903 intel_dp->DP |= DP_PLL_ENABLE; 904 if (adjusted_mode->clock < 200000) 905 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 906 else 907 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 908 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 909 intel_dp->DP |= intel_dp->color_range; 910 911 if (adjusted_mode->flags & DRM_MODE_FLAG_PHSYNC) 912 intel_dp->DP |= DP_SYNC_HS_HIGH; 913 if (adjusted_mode->flags & DRM_MODE_FLAG_PVSYNC) 914 intel_dp->DP |= DP_SYNC_VS_HIGH; 915 intel_dp->DP |= DP_LINK_TRAIN_OFF; 916 917 if (intel_dp->link_configuration[1] & DP_LANE_COUNT_ENHANCED_FRAME_EN) 918 intel_dp->DP |= DP_ENHANCED_FRAMING; 919 920 if (intel_crtc->pipe == 1) 921 intel_dp->DP |= DP_PIPEB_SELECT; 922 923 if (is_cpu_edp(intel_dp)) { 924 /* don't miss out required setting for eDP */ 925 intel_dp->DP |= DP_PLL_ENABLE; 926 if (adjusted_mode->clock < 200000) 927 intel_dp->DP |= DP_PLL_FREQ_160MHZ; 928 else 929 intel_dp->DP |= DP_PLL_FREQ_270MHZ; 930 } 931 } else { 932 intel_dp->DP |= DP_LINK_TRAIN_OFF_CPT; 933 } 934} 935 936#define IDLE_ON_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 937#define IDLE_ON_VALUE (PP_ON | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_ON_IDLE) 938 939#define IDLE_OFF_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | 0 | PP_SEQUENCE_STATE_MASK) 940#define IDLE_OFF_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 941 942#define IDLE_CYCLE_MASK (PP_ON | 0 | PP_SEQUENCE_MASK | PP_CYCLE_DELAY_ACTIVE | PP_SEQUENCE_STATE_MASK) 943#define IDLE_CYCLE_VALUE (0 | 0 | PP_SEQUENCE_NONE | 0 | PP_SEQUENCE_STATE_OFF_IDLE) 944 945static void ironlake_wait_panel_status(struct intel_dp *intel_dp, 946 u32 mask, 947 u32 value) 948{ 949 struct drm_device *dev = intel_dp->base.base.dev; 950 struct drm_i915_private *dev_priv = dev->dev_private; 951 952 DRM_DEBUG_KMS("mask %08x value %08x status %08x control %08x\n", 953 mask, value, 954 I915_READ(PCH_PP_STATUS), 955 I915_READ(PCH_PP_CONTROL)); 956 957 if (_wait_for((I915_READ(PCH_PP_STATUS) & mask) == value, 5000, 10)) { 958 DRM_ERROR("Panel status timeout: status %08x control %08x\n", 959 I915_READ(PCH_PP_STATUS), 960 I915_READ(PCH_PP_CONTROL)); 961 } 962} 963 964static void ironlake_wait_panel_on(struct intel_dp *intel_dp) 965{ 966 DRM_DEBUG_KMS("Wait for panel power on\n"); 967 ironlake_wait_panel_status(intel_dp, IDLE_ON_MASK, IDLE_ON_VALUE); 968} 969 970static void ironlake_wait_panel_off(struct intel_dp *intel_dp) 971{ 972 DRM_DEBUG_KMS("Wait for panel power off time\n"); 973 ironlake_wait_panel_status(intel_dp, IDLE_OFF_MASK, IDLE_OFF_VALUE); 974} 975 976static void ironlake_wait_panel_power_cycle(struct intel_dp *intel_dp) 977{ 978 DRM_DEBUG_KMS("Wait for panel power cycle\n"); 979 ironlake_wait_panel_status(intel_dp, IDLE_CYCLE_MASK, IDLE_CYCLE_VALUE); 980} 981 982 983/* Read the current pp_control value, unlocking the register if it 984 * is locked 985 */ 986 987static u32 ironlake_get_pp_control(struct drm_i915_private *dev_priv) 988{ 989 u32 control = I915_READ(PCH_PP_CONTROL); 990 991 control &= ~PANEL_UNLOCK_MASK; 992 control |= PANEL_UNLOCK_REGS; 993 return control; 994} 995 996static void ironlake_edp_panel_vdd_on(struct intel_dp *intel_dp) 997{ 998 struct drm_device *dev = intel_dp->base.base.dev; 999 struct drm_i915_private *dev_priv = dev->dev_private; 1000 u32 pp; 1001 1002 if (!is_edp(intel_dp)) 1003 return; 1004 DRM_DEBUG_KMS("Turn eDP VDD on\n"); 1005 1006 WARN(intel_dp->want_panel_vdd, 1007 "eDP VDD already requested on\n"); 1008 1009 intel_dp->want_panel_vdd = true; 1010 1011 if (ironlake_edp_have_panel_vdd(intel_dp)) { 1012 DRM_DEBUG_KMS("eDP VDD already on\n"); 1013 return; 1014 } 1015 1016 if (!ironlake_edp_have_panel_power(intel_dp)) 1017 ironlake_wait_panel_power_cycle(intel_dp); 1018 1019 pp = ironlake_get_pp_control(dev_priv); 1020 pp |= EDP_FORCE_VDD; 1021 I915_WRITE(PCH_PP_CONTROL, pp); 1022 POSTING_READ(PCH_PP_CONTROL); 1023 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1024 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1025 1026 /* 1027 * If the panel wasn't on, delay before accessing aux channel 1028 */ 1029 if (!ironlake_edp_have_panel_power(intel_dp)) { 1030 DRM_DEBUG_KMS("eDP was not running\n"); 1031 msleep(intel_dp->panel_power_up_delay); 1032 } 1033} 1034 1035static void ironlake_panel_vdd_off_sync(struct intel_dp *intel_dp) 1036{ 1037 struct drm_device *dev = intel_dp->base.base.dev; 1038 struct drm_i915_private *dev_priv = dev->dev_private; 1039 u32 pp; 1040 1041 if (!intel_dp->want_panel_vdd && ironlake_edp_have_panel_vdd(intel_dp)) { 1042 pp = ironlake_get_pp_control(dev_priv); 1043 pp &= ~EDP_FORCE_VDD; 1044 I915_WRITE(PCH_PP_CONTROL, pp); 1045 POSTING_READ(PCH_PP_CONTROL); 1046 1047 /* Make sure sequencer is idle before allowing subsequent activity */ 1048 DRM_DEBUG_KMS("PCH_PP_STATUS: 0x%08x PCH_PP_CONTROL: 0x%08x\n", 1049 I915_READ(PCH_PP_STATUS), I915_READ(PCH_PP_CONTROL)); 1050 1051 msleep(intel_dp->panel_power_down_delay); 1052 } 1053} 1054 1055static void ironlake_panel_vdd_work(struct work_struct *__work) 1056{ 1057 struct intel_dp *intel_dp = container_of(to_delayed_work(__work), 1058 struct intel_dp, panel_vdd_work); 1059 struct drm_device *dev = intel_dp->base.base.dev; 1060 1061 mutex_lock(&dev->mode_config.mutex); 1062 ironlake_panel_vdd_off_sync(intel_dp); 1063 mutex_unlock(&dev->mode_config.mutex); 1064} 1065 1066static void ironlake_edp_panel_vdd_off(struct intel_dp *intel_dp, bool sync) 1067{ 1068 if (!is_edp(intel_dp)) 1069 return; 1070 1071 DRM_DEBUG_KMS("Turn eDP VDD off %d\n", intel_dp->want_panel_vdd); 1072 WARN(!intel_dp->want_panel_vdd, "eDP VDD not forced on"); 1073 1074 intel_dp->want_panel_vdd = false; 1075 1076 if (sync) { 1077 ironlake_panel_vdd_off_sync(intel_dp); 1078 } else { 1079 /* 1080 * Queue the timer to fire a long 1081 * time from now (relative to the power down delay) 1082 * to keep the panel power up across a sequence of operations 1083 */ 1084 schedule_delayed_work(&intel_dp->panel_vdd_work, 1085 msecs_to_jiffies(intel_dp->panel_power_cycle_delay * 5)); 1086 } 1087} 1088 1089static void ironlake_edp_panel_on(struct intel_dp *intel_dp) 1090{ 1091 struct drm_device *dev = intel_dp->base.base.dev; 1092 struct drm_i915_private *dev_priv = dev->dev_private; 1093 u32 pp; 1094 1095 if (!is_edp(intel_dp)) 1096 return; 1097 1098 DRM_DEBUG_KMS("Turn eDP power on\n"); 1099 1100 if (ironlake_edp_have_panel_power(intel_dp)) { 1101 DRM_DEBUG_KMS("eDP power already on\n"); 1102 return; 1103 } 1104 1105 ironlake_wait_panel_power_cycle(intel_dp); 1106 1107 pp = ironlake_get_pp_control(dev_priv); 1108 if (IS_GEN5(dev)) { 1109 /* ILK workaround: disable reset around power sequence */ 1110 pp &= ~PANEL_POWER_RESET; 1111 I915_WRITE(PCH_PP_CONTROL, pp); 1112 POSTING_READ(PCH_PP_CONTROL); 1113 } 1114 1115 pp |= POWER_TARGET_ON; 1116 if (!IS_GEN5(dev)) 1117 pp |= PANEL_POWER_RESET; 1118 1119 I915_WRITE(PCH_PP_CONTROL, pp); 1120 POSTING_READ(PCH_PP_CONTROL); 1121 1122 ironlake_wait_panel_on(intel_dp); 1123 1124 if (IS_GEN5(dev)) { 1125 pp |= PANEL_POWER_RESET; /* restore panel reset bit */ 1126 I915_WRITE(PCH_PP_CONTROL, pp); 1127 POSTING_READ(PCH_PP_CONTROL); 1128 } 1129} 1130 1131static void ironlake_edp_panel_off(struct intel_dp *intel_dp) 1132{ 1133 struct drm_device *dev = intel_dp->base.base.dev; 1134 struct drm_i915_private *dev_priv = dev->dev_private; 1135 u32 pp; 1136 1137 if (!is_edp(intel_dp)) 1138 return; 1139 1140 DRM_DEBUG_KMS("Turn eDP power off\n"); 1141 1142 WARN(!intel_dp->want_panel_vdd, "Need VDD to turn off panel\n"); 1143 1144 pp = ironlake_get_pp_control(dev_priv); 1145 /* We need to switch off panel power _and_ force vdd, for otherwise some 1146 * panels get very unhappy and cease to work. */ 1147 pp &= ~(POWER_TARGET_ON | EDP_FORCE_VDD | PANEL_POWER_RESET | EDP_BLC_ENABLE); 1148 I915_WRITE(PCH_PP_CONTROL, pp); 1149 POSTING_READ(PCH_PP_CONTROL); 1150 1151 intel_dp->want_panel_vdd = false; 1152 1153 ironlake_wait_panel_off(intel_dp); 1154} 1155 1156static void ironlake_edp_backlight_on(struct intel_dp *intel_dp) 1157{ 1158 struct drm_device *dev = intel_dp->base.base.dev; 1159 struct drm_i915_private *dev_priv = dev->dev_private; 1160 u32 pp; 1161 1162 if (!is_edp(intel_dp)) 1163 return; 1164 1165 DRM_DEBUG_KMS("\n"); 1166 /* 1167 * If we enable the backlight right away following a panel power 1168 * on, we may see slight flicker as the panel syncs with the eDP 1169 * link. So delay a bit to make sure the image is solid before 1170 * allowing it to appear. 1171 */ 1172 msleep(intel_dp->backlight_on_delay); 1173 pp = ironlake_get_pp_control(dev_priv); 1174 pp |= EDP_BLC_ENABLE; 1175 I915_WRITE(PCH_PP_CONTROL, pp); 1176 POSTING_READ(PCH_PP_CONTROL); 1177} 1178 1179static void ironlake_edp_backlight_off(struct intel_dp *intel_dp) 1180{ 1181 struct drm_device *dev = intel_dp->base.base.dev; 1182 struct drm_i915_private *dev_priv = dev->dev_private; 1183 u32 pp; 1184 1185 if (!is_edp(intel_dp)) 1186 return; 1187 1188 DRM_DEBUG_KMS("\n"); 1189 pp = ironlake_get_pp_control(dev_priv); 1190 pp &= ~EDP_BLC_ENABLE; 1191 I915_WRITE(PCH_PP_CONTROL, pp); 1192 POSTING_READ(PCH_PP_CONTROL); 1193 msleep(intel_dp->backlight_off_delay); 1194} 1195 1196static void ironlake_edp_pll_on(struct drm_encoder *encoder) 1197{ 1198 struct drm_device *dev = encoder->dev; 1199 struct drm_i915_private *dev_priv = dev->dev_private; 1200 u32 dpa_ctl; 1201 1202 DRM_DEBUG_KMS("\n"); 1203 dpa_ctl = I915_READ(DP_A); 1204 dpa_ctl |= DP_PLL_ENABLE; 1205 I915_WRITE(DP_A, dpa_ctl); 1206 POSTING_READ(DP_A); 1207 udelay(200); 1208} 1209 1210static void ironlake_edp_pll_off(struct drm_encoder *encoder) 1211{ 1212 struct drm_device *dev = encoder->dev; 1213 struct drm_i915_private *dev_priv = dev->dev_private; 1214 u32 dpa_ctl; 1215 1216 dpa_ctl = I915_READ(DP_A); 1217 dpa_ctl &= ~DP_PLL_ENABLE; 1218 I915_WRITE(DP_A, dpa_ctl); 1219 POSTING_READ(DP_A); 1220 udelay(200); 1221} 1222 1223/* If the sink supports it, try to set the power state appropriately */ 1224static void intel_dp_sink_dpms(struct intel_dp *intel_dp, int mode) 1225{ 1226 int ret, i; 1227 1228 /* Should have a valid DPCD by this point */ 1229 if (intel_dp->dpcd[DP_DPCD_REV] < 0x11) 1230 return; 1231 1232 if (mode != DRM_MODE_DPMS_ON) { 1233 ret = intel_dp_aux_native_write_1(intel_dp, DP_SET_POWER, 1234 DP_SET_POWER_D3); 1235 if (ret != 1) 1236 DRM_DEBUG_DRIVER("failed to write sink power state\n"); 1237 } else { 1238 /* 1239 * When turning on, we need to retry for 1ms to give the sink 1240 * time to wake up. 1241 */ 1242 for (i = 0; i < 3; i++) { 1243 ret = intel_dp_aux_native_write_1(intel_dp, 1244 DP_SET_POWER, 1245 DP_SET_POWER_D0); 1246 if (ret == 1) 1247 break; 1248 msleep(1); 1249 } 1250 } 1251} 1252 1253static bool intel_dp_get_hw_state(struct intel_encoder *encoder, 1254 enum pipe *pipe) 1255{ 1256 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1257 struct drm_device *dev = encoder->base.dev; 1258 struct drm_i915_private *dev_priv = dev->dev_private; 1259 u32 tmp = I915_READ(intel_dp->output_reg); 1260 1261 if (!(tmp & DP_PORT_EN)) 1262 return false; 1263 1264 if (is_cpu_edp(intel_dp) && IS_GEN7(dev)) { 1265 *pipe = PORT_TO_PIPE_CPT(tmp); 1266 } else if (!HAS_PCH_CPT(dev) || is_cpu_edp(intel_dp)) { 1267 *pipe = PORT_TO_PIPE(tmp); 1268 } else { 1269 u32 trans_sel; 1270 u32 trans_dp; 1271 int i; 1272 1273 switch (intel_dp->output_reg) { 1274 case PCH_DP_B: 1275 trans_sel = TRANS_DP_PORT_SEL_B; 1276 break; 1277 case PCH_DP_C: 1278 trans_sel = TRANS_DP_PORT_SEL_C; 1279 break; 1280 case PCH_DP_D: 1281 trans_sel = TRANS_DP_PORT_SEL_D; 1282 break; 1283 default: 1284 return true; 1285 } 1286 1287 for_each_pipe(i) { 1288 trans_dp = I915_READ(TRANS_DP_CTL(i)); 1289 if ((trans_dp & TRANS_DP_PORT_SEL_MASK) == trans_sel) { 1290 *pipe = i; 1291 return true; 1292 } 1293 } 1294 } 1295 1296 DRM_DEBUG_KMS("No pipe for dp port 0x%x found\n", intel_dp->output_reg); 1297 1298 return true; 1299} 1300 1301static void intel_disable_dp(struct intel_encoder *encoder) 1302{ 1303 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1304 1305 /* Make sure the panel is off before trying to change the mode. But also 1306 * ensure that we have vdd while we switch off the panel. */ 1307 ironlake_edp_panel_vdd_on(intel_dp); 1308 ironlake_edp_backlight_off(intel_dp); 1309 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1310 ironlake_edp_panel_off(intel_dp); 1311 intel_dp_link_down(intel_dp); 1312} 1313 1314static void intel_enable_dp(struct intel_encoder *encoder) 1315{ 1316 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 1317 struct drm_device *dev = encoder->base.dev; 1318 struct drm_i915_private *dev_priv = dev->dev_private; 1319 uint32_t dp_reg = I915_READ(intel_dp->output_reg); 1320 1321 ironlake_edp_panel_vdd_on(intel_dp); 1322 intel_dp_sink_dpms(intel_dp, DRM_MODE_DPMS_ON); 1323 if (!(dp_reg & DP_PORT_EN)) { 1324 intel_dp_start_link_train(intel_dp); 1325 ironlake_edp_panel_on(intel_dp); 1326 ironlake_edp_panel_vdd_off(intel_dp, true); 1327 intel_dp_complete_link_train(intel_dp); 1328 } else 1329 ironlake_edp_panel_vdd_off(intel_dp, false); 1330 ironlake_edp_backlight_on(intel_dp); 1331} 1332 1333static void 1334intel_dp_dpms(struct drm_connector *connector, int mode) 1335{ 1336 struct intel_dp *intel_dp = intel_attached_dp(connector); 1337 1338 /* DP supports only 2 dpms states. */ 1339 if (mode != DRM_MODE_DPMS_ON) 1340 mode = DRM_MODE_DPMS_OFF; 1341 1342 if (mode == connector->dpms) 1343 return; 1344 1345 connector->dpms = mode; 1346 1347 /* Only need to change hw state when actually enabled */ 1348 if (!intel_dp->base.base.crtc) { 1349 intel_dp->base.connectors_active = false; 1350 return; 1351 } 1352 1353 if (mode != DRM_MODE_DPMS_ON) { 1354 intel_encoder_dpms(&intel_dp->base, mode); 1355 1356 if (is_cpu_edp(intel_dp)) 1357 ironlake_edp_pll_off(&intel_dp->base.base); 1358 } else { 1359 if (is_cpu_edp(intel_dp)) 1360 ironlake_edp_pll_on(&intel_dp->base.base); 1361 1362 intel_encoder_dpms(&intel_dp->base, mode); 1363 } 1364 1365 intel_connector_check_state(to_intel_connector(connector)); 1366} 1367 1368/* 1369 * Native read with retry for link status and receiver capability reads for 1370 * cases where the sink may still be asleep. 1371 */ 1372static bool 1373intel_dp_aux_native_read_retry(struct intel_dp *intel_dp, uint16_t address, 1374 uint8_t *recv, int recv_bytes) 1375{ 1376 int ret, i; 1377 1378 /* 1379 * Sinks are *supposed* to come up within 1ms from an off state, 1380 * but we're also supposed to retry 3 times per the spec. 1381 */ 1382 for (i = 0; i < 3; i++) { 1383 ret = intel_dp_aux_native_read(intel_dp, address, recv, 1384 recv_bytes); 1385 if (ret == recv_bytes) 1386 return true; 1387 msleep(1); 1388 } 1389 1390 return false; 1391} 1392 1393/* 1394 * Fetch AUX CH registers 0x202 - 0x207 which contain 1395 * link status information 1396 */ 1397static bool 1398intel_dp_get_link_status(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1399{ 1400 return intel_dp_aux_native_read_retry(intel_dp, 1401 DP_LANE0_1_STATUS, 1402 link_status, 1403 DP_LINK_STATUS_SIZE); 1404} 1405 1406static uint8_t 1407intel_dp_link_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1408 int r) 1409{ 1410 return link_status[r - DP_LANE0_1_STATUS]; 1411} 1412 1413static uint8_t 1414intel_get_adjust_request_voltage(uint8_t adjust_request[2], 1415 int lane) 1416{ 1417 int s = ((lane & 1) ? 1418 DP_ADJUST_VOLTAGE_SWING_LANE1_SHIFT : 1419 DP_ADJUST_VOLTAGE_SWING_LANE0_SHIFT); 1420 uint8_t l = adjust_request[lane>>1]; 1421 1422 return ((l >> s) & 3) << DP_TRAIN_VOLTAGE_SWING_SHIFT; 1423} 1424 1425static uint8_t 1426intel_get_adjust_request_pre_emphasis(uint8_t adjust_request[2], 1427 int lane) 1428{ 1429 int s = ((lane & 1) ? 1430 DP_ADJUST_PRE_EMPHASIS_LANE1_SHIFT : 1431 DP_ADJUST_PRE_EMPHASIS_LANE0_SHIFT); 1432 uint8_t l = adjust_request[lane>>1]; 1433 1434 return ((l >> s) & 3) << DP_TRAIN_PRE_EMPHASIS_SHIFT; 1435} 1436 1437 1438#if 0 1439static char *voltage_names[] = { 1440 "0.4V", "0.6V", "0.8V", "1.2V" 1441}; 1442static char *pre_emph_names[] = { 1443 "0dB", "3.5dB", "6dB", "9.5dB" 1444}; 1445static char *link_train_names[] = { 1446 "pattern 1", "pattern 2", "idle", "off" 1447}; 1448#endif 1449 1450/* 1451 * These are source-specific values; current Intel hardware supports 1452 * a maximum voltage of 800mV and a maximum pre-emphasis of 6dB 1453 */ 1454 1455static uint8_t 1456intel_dp_voltage_max(struct intel_dp *intel_dp) 1457{ 1458 struct drm_device *dev = intel_dp->base.base.dev; 1459 1460 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) 1461 return DP_TRAIN_VOLTAGE_SWING_800; 1462 else if (HAS_PCH_CPT(dev) && !is_cpu_edp(intel_dp)) 1463 return DP_TRAIN_VOLTAGE_SWING_1200; 1464 else 1465 return DP_TRAIN_VOLTAGE_SWING_800; 1466} 1467 1468static uint8_t 1469intel_dp_pre_emphasis_max(struct intel_dp *intel_dp, uint8_t voltage_swing) 1470{ 1471 struct drm_device *dev = intel_dp->base.base.dev; 1472 1473 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1474 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1475 case DP_TRAIN_VOLTAGE_SWING_400: 1476 return DP_TRAIN_PRE_EMPHASIS_6; 1477 case DP_TRAIN_VOLTAGE_SWING_600: 1478 case DP_TRAIN_VOLTAGE_SWING_800: 1479 return DP_TRAIN_PRE_EMPHASIS_3_5; 1480 default: 1481 return DP_TRAIN_PRE_EMPHASIS_0; 1482 } 1483 } else { 1484 switch (voltage_swing & DP_TRAIN_VOLTAGE_SWING_MASK) { 1485 case DP_TRAIN_VOLTAGE_SWING_400: 1486 return DP_TRAIN_PRE_EMPHASIS_6; 1487 case DP_TRAIN_VOLTAGE_SWING_600: 1488 return DP_TRAIN_PRE_EMPHASIS_6; 1489 case DP_TRAIN_VOLTAGE_SWING_800: 1490 return DP_TRAIN_PRE_EMPHASIS_3_5; 1491 case DP_TRAIN_VOLTAGE_SWING_1200: 1492 default: 1493 return DP_TRAIN_PRE_EMPHASIS_0; 1494 } 1495 } 1496} 1497 1498static void 1499intel_get_adjust_train(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1500{ 1501 uint8_t v = 0; 1502 uint8_t p = 0; 1503 int lane; 1504 uint8_t *adjust_request = link_status + (DP_ADJUST_REQUEST_LANE0_1 - DP_LANE0_1_STATUS); 1505 uint8_t voltage_max; 1506 uint8_t preemph_max; 1507 1508 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1509 uint8_t this_v = intel_get_adjust_request_voltage(adjust_request, lane); 1510 uint8_t this_p = intel_get_adjust_request_pre_emphasis(adjust_request, lane); 1511 1512 if (this_v > v) 1513 v = this_v; 1514 if (this_p > p) 1515 p = this_p; 1516 } 1517 1518 voltage_max = intel_dp_voltage_max(intel_dp); 1519 if (v >= voltage_max) 1520 v = voltage_max | DP_TRAIN_MAX_SWING_REACHED; 1521 1522 preemph_max = intel_dp_pre_emphasis_max(intel_dp, v); 1523 if (p >= preemph_max) 1524 p = preemph_max | DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; 1525 1526 for (lane = 0; lane < 4; lane++) 1527 intel_dp->train_set[lane] = v | p; 1528} 1529 1530static uint32_t 1531intel_dp_signal_levels(uint8_t train_set) 1532{ 1533 uint32_t signal_levels = 0; 1534 1535 switch (train_set & DP_TRAIN_VOLTAGE_SWING_MASK) { 1536 case DP_TRAIN_VOLTAGE_SWING_400: 1537 default: 1538 signal_levels |= DP_VOLTAGE_0_4; 1539 break; 1540 case DP_TRAIN_VOLTAGE_SWING_600: 1541 signal_levels |= DP_VOLTAGE_0_6; 1542 break; 1543 case DP_TRAIN_VOLTAGE_SWING_800: 1544 signal_levels |= DP_VOLTAGE_0_8; 1545 break; 1546 case DP_TRAIN_VOLTAGE_SWING_1200: 1547 signal_levels |= DP_VOLTAGE_1_2; 1548 break; 1549 } 1550 switch (train_set & DP_TRAIN_PRE_EMPHASIS_MASK) { 1551 case DP_TRAIN_PRE_EMPHASIS_0: 1552 default: 1553 signal_levels |= DP_PRE_EMPHASIS_0; 1554 break; 1555 case DP_TRAIN_PRE_EMPHASIS_3_5: 1556 signal_levels |= DP_PRE_EMPHASIS_3_5; 1557 break; 1558 case DP_TRAIN_PRE_EMPHASIS_6: 1559 signal_levels |= DP_PRE_EMPHASIS_6; 1560 break; 1561 case DP_TRAIN_PRE_EMPHASIS_9_5: 1562 signal_levels |= DP_PRE_EMPHASIS_9_5; 1563 break; 1564 } 1565 return signal_levels; 1566} 1567 1568/* Gen6's DP voltage swing and pre-emphasis control */ 1569static uint32_t 1570intel_gen6_edp_signal_levels(uint8_t train_set) 1571{ 1572 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1573 DP_TRAIN_PRE_EMPHASIS_MASK); 1574 switch (signal_levels) { 1575 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1576 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1577 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1578 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1579 return EDP_LINK_TRAIN_400MV_3_5DB_SNB_B; 1580 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1581 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_6: 1582 return EDP_LINK_TRAIN_400_600MV_6DB_SNB_B; 1583 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1584 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1585 return EDP_LINK_TRAIN_600_800MV_3_5DB_SNB_B; 1586 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1587 case DP_TRAIN_VOLTAGE_SWING_1200 | DP_TRAIN_PRE_EMPHASIS_0: 1588 return EDP_LINK_TRAIN_800_1200MV_0DB_SNB_B; 1589 default: 1590 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1591 "0x%x\n", signal_levels); 1592 return EDP_LINK_TRAIN_400_600MV_0DB_SNB_B; 1593 } 1594} 1595 1596/* Gen7's DP voltage swing and pre-emphasis control */ 1597static uint32_t 1598intel_gen7_edp_signal_levels(uint8_t train_set) 1599{ 1600 int signal_levels = train_set & (DP_TRAIN_VOLTAGE_SWING_MASK | 1601 DP_TRAIN_PRE_EMPHASIS_MASK); 1602 switch (signal_levels) { 1603 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_0: 1604 return EDP_LINK_TRAIN_400MV_0DB_IVB; 1605 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_3_5: 1606 return EDP_LINK_TRAIN_400MV_3_5DB_IVB; 1607 case DP_TRAIN_VOLTAGE_SWING_400 | DP_TRAIN_PRE_EMPHASIS_6: 1608 return EDP_LINK_TRAIN_400MV_6DB_IVB; 1609 1610 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_0: 1611 return EDP_LINK_TRAIN_600MV_0DB_IVB; 1612 case DP_TRAIN_VOLTAGE_SWING_600 | DP_TRAIN_PRE_EMPHASIS_3_5: 1613 return EDP_LINK_TRAIN_600MV_3_5DB_IVB; 1614 1615 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_0: 1616 return EDP_LINK_TRAIN_800MV_0DB_IVB; 1617 case DP_TRAIN_VOLTAGE_SWING_800 | DP_TRAIN_PRE_EMPHASIS_3_5: 1618 return EDP_LINK_TRAIN_800MV_3_5DB_IVB; 1619 1620 default: 1621 DRM_DEBUG_KMS("Unsupported voltage swing/pre-emphasis level:" 1622 "0x%x\n", signal_levels); 1623 return EDP_LINK_TRAIN_500MV_0DB_IVB; 1624 } 1625} 1626 1627static uint8_t 1628intel_get_lane_status(uint8_t link_status[DP_LINK_STATUS_SIZE], 1629 int lane) 1630{ 1631 int s = (lane & 1) * 4; 1632 uint8_t l = link_status[lane>>1]; 1633 1634 return (l >> s) & 0xf; 1635} 1636 1637/* Check for clock recovery is done on all channels */ 1638static bool 1639intel_clock_recovery_ok(uint8_t link_status[DP_LINK_STATUS_SIZE], int lane_count) 1640{ 1641 int lane; 1642 uint8_t lane_status; 1643 1644 for (lane = 0; lane < lane_count; lane++) { 1645 lane_status = intel_get_lane_status(link_status, lane); 1646 if ((lane_status & DP_LANE_CR_DONE) == 0) 1647 return false; 1648 } 1649 return true; 1650} 1651 1652/* Check to see if channel eq is done on all channels */ 1653#define CHANNEL_EQ_BITS (DP_LANE_CR_DONE|\ 1654 DP_LANE_CHANNEL_EQ_DONE|\ 1655 DP_LANE_SYMBOL_LOCKED) 1656static bool 1657intel_channel_eq_ok(struct intel_dp *intel_dp, uint8_t link_status[DP_LINK_STATUS_SIZE]) 1658{ 1659 uint8_t lane_align; 1660 uint8_t lane_status; 1661 int lane; 1662 1663 lane_align = intel_dp_link_status(link_status, 1664 DP_LANE_ALIGN_STATUS_UPDATED); 1665 if ((lane_align & DP_INTERLANE_ALIGN_DONE) == 0) 1666 return false; 1667 for (lane = 0; lane < intel_dp->lane_count; lane++) { 1668 lane_status = intel_get_lane_status(link_status, lane); 1669 if ((lane_status & CHANNEL_EQ_BITS) != CHANNEL_EQ_BITS) 1670 return false; 1671 } 1672 return true; 1673} 1674 1675static bool 1676intel_dp_set_link_train(struct intel_dp *intel_dp, 1677 uint32_t dp_reg_value, 1678 uint8_t dp_train_pat) 1679{ 1680 struct drm_device *dev = intel_dp->base.base.dev; 1681 struct drm_i915_private *dev_priv = dev->dev_private; 1682 int ret; 1683 1684 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1685 dp_reg_value &= ~DP_LINK_TRAIN_MASK_CPT; 1686 1687 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1688 case DP_TRAINING_PATTERN_DISABLE: 1689 dp_reg_value |= DP_LINK_TRAIN_OFF_CPT; 1690 break; 1691 case DP_TRAINING_PATTERN_1: 1692 dp_reg_value |= DP_LINK_TRAIN_PAT_1_CPT; 1693 break; 1694 case DP_TRAINING_PATTERN_2: 1695 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1696 break; 1697 case DP_TRAINING_PATTERN_3: 1698 DRM_ERROR("DP training pattern 3 not supported\n"); 1699 dp_reg_value |= DP_LINK_TRAIN_PAT_2_CPT; 1700 break; 1701 } 1702 1703 } else { 1704 dp_reg_value &= ~DP_LINK_TRAIN_MASK; 1705 1706 switch (dp_train_pat & DP_TRAINING_PATTERN_MASK) { 1707 case DP_TRAINING_PATTERN_DISABLE: 1708 dp_reg_value |= DP_LINK_TRAIN_OFF; 1709 break; 1710 case DP_TRAINING_PATTERN_1: 1711 dp_reg_value |= DP_LINK_TRAIN_PAT_1; 1712 break; 1713 case DP_TRAINING_PATTERN_2: 1714 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1715 break; 1716 case DP_TRAINING_PATTERN_3: 1717 DRM_ERROR("DP training pattern 3 not supported\n"); 1718 dp_reg_value |= DP_LINK_TRAIN_PAT_2; 1719 break; 1720 } 1721 } 1722 1723 I915_WRITE(intel_dp->output_reg, dp_reg_value); 1724 POSTING_READ(intel_dp->output_reg); 1725 1726 intel_dp_aux_native_write_1(intel_dp, 1727 DP_TRAINING_PATTERN_SET, 1728 dp_train_pat); 1729 1730 if ((dp_train_pat & DP_TRAINING_PATTERN_MASK) != 1731 DP_TRAINING_PATTERN_DISABLE) { 1732 ret = intel_dp_aux_native_write(intel_dp, 1733 DP_TRAINING_LANE0_SET, 1734 intel_dp->train_set, 1735 intel_dp->lane_count); 1736 if (ret != intel_dp->lane_count) 1737 return false; 1738 } 1739 1740 return true; 1741} 1742 1743/* Enable corresponding port and start training pattern 1 */ 1744static void 1745intel_dp_start_link_train(struct intel_dp *intel_dp) 1746{ 1747 struct drm_device *dev = intel_dp->base.base.dev; 1748 struct drm_i915_private *dev_priv = dev->dev_private; 1749 struct intel_crtc *intel_crtc = to_intel_crtc(intel_dp->base.base.crtc); 1750 int i; 1751 uint8_t voltage; 1752 bool clock_recovery = false; 1753 int voltage_tries, loop_tries; 1754 uint32_t DP = intel_dp->DP; 1755 1756 /* 1757 * On CPT we have to enable the port in training pattern 1, which 1758 * will happen below in intel_dp_set_link_train. Otherwise, enable 1759 * the port and wait for it to become active. 1760 */ 1761 if (!HAS_PCH_CPT(dev)) { 1762 I915_WRITE(intel_dp->output_reg, intel_dp->DP); 1763 POSTING_READ(intel_dp->output_reg); 1764 intel_wait_for_vblank(dev, intel_crtc->pipe); 1765 } 1766 1767 /* Write the link configuration data */ 1768 intel_dp_aux_native_write(intel_dp, DP_LINK_BW_SET, 1769 intel_dp->link_configuration, 1770 DP_LINK_CONFIGURATION_SIZE); 1771 1772 DP |= DP_PORT_EN; 1773 1774 memset(intel_dp->train_set, 0, 4); 1775 voltage = 0xff; 1776 voltage_tries = 0; 1777 loop_tries = 0; 1778 clock_recovery = false; 1779 for (;;) { 1780 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1781 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1782 uint32_t signal_levels; 1783 1784 1785 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1786 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1787 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1788 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1789 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1790 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1791 } else { 1792 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1793 DRM_DEBUG_KMS("training pattern 1 signal levels %08x\n", signal_levels); 1794 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1795 } 1796 1797 if (!intel_dp_set_link_train(intel_dp, DP, 1798 DP_TRAINING_PATTERN_1 | 1799 DP_LINK_SCRAMBLING_DISABLE)) 1800 break; 1801 /* Set training pattern 1 */ 1802 1803 udelay(100); 1804 if (!intel_dp_get_link_status(intel_dp, link_status)) { 1805 DRM_ERROR("failed to get link status\n"); 1806 break; 1807 } 1808 1809 if (intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1810 DRM_DEBUG_KMS("clock recovery OK\n"); 1811 clock_recovery = true; 1812 break; 1813 } 1814 1815 /* Check to see if we've tried the max voltage */ 1816 for (i = 0; i < intel_dp->lane_count; i++) 1817 if ((intel_dp->train_set[i] & DP_TRAIN_MAX_SWING_REACHED) == 0) 1818 break; 1819 if (i == intel_dp->lane_count && voltage_tries == 5) { 1820 ++loop_tries; 1821 if (loop_tries == 5) { 1822 DRM_DEBUG_KMS("too many full retries, give up\n"); 1823 break; 1824 } 1825 memset(intel_dp->train_set, 0, 4); 1826 voltage_tries = 0; 1827 continue; 1828 } 1829 1830 /* Check to see if we've tried the same voltage 5 times */ 1831 if ((intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK) == voltage) { 1832 ++voltage_tries; 1833 if (voltage_tries == 5) { 1834 DRM_DEBUG_KMS("too many voltage retries, give up\n"); 1835 break; 1836 } 1837 } else 1838 voltage_tries = 0; 1839 voltage = intel_dp->train_set[0] & DP_TRAIN_VOLTAGE_SWING_MASK; 1840 1841 /* Compute new intel_dp->train_set as requested by target */ 1842 intel_get_adjust_train(intel_dp, link_status); 1843 } 1844 1845 intel_dp->DP = DP; 1846} 1847 1848static void 1849intel_dp_complete_link_train(struct intel_dp *intel_dp) 1850{ 1851 struct drm_device *dev = intel_dp->base.base.dev; 1852 bool channel_eq = false; 1853 int tries, cr_tries; 1854 uint32_t DP = intel_dp->DP; 1855 1856 /* channel equalization */ 1857 tries = 0; 1858 cr_tries = 0; 1859 channel_eq = false; 1860 for (;;) { 1861 /* Use intel_dp->train_set[0] to set the voltage and pre emphasis values */ 1862 uint32_t signal_levels; 1863 uint8_t link_status[DP_LINK_STATUS_SIZE]; 1864 1865 if (cr_tries > 5) { 1866 DRM_ERROR("failed to train DP, aborting\n"); 1867 intel_dp_link_down(intel_dp); 1868 break; 1869 } 1870 1871 if (IS_GEN7(dev) && is_cpu_edp(intel_dp)) { 1872 signal_levels = intel_gen7_edp_signal_levels(intel_dp->train_set[0]); 1873 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_IVB) | signal_levels; 1874 } else if (IS_GEN6(dev) && is_cpu_edp(intel_dp)) { 1875 signal_levels = intel_gen6_edp_signal_levels(intel_dp->train_set[0]); 1876 DP = (DP & ~EDP_LINK_TRAIN_VOL_EMP_MASK_SNB) | signal_levels; 1877 } else { 1878 signal_levels = intel_dp_signal_levels(intel_dp->train_set[0]); 1879 DP = (DP & ~(DP_VOLTAGE_MASK|DP_PRE_EMPHASIS_MASK)) | signal_levels; 1880 } 1881 1882 /* channel eq pattern */ 1883 if (!intel_dp_set_link_train(intel_dp, DP, 1884 DP_TRAINING_PATTERN_2 | 1885 DP_LINK_SCRAMBLING_DISABLE)) 1886 break; 1887 1888 udelay(400); 1889 if (!intel_dp_get_link_status(intel_dp, link_status)) 1890 break; 1891 1892 /* Make sure clock is still ok */ 1893 if (!intel_clock_recovery_ok(link_status, intel_dp->lane_count)) { 1894 intel_dp_start_link_train(intel_dp); 1895 cr_tries++; 1896 continue; 1897 } 1898 1899 if (intel_channel_eq_ok(intel_dp, link_status)) { 1900 channel_eq = true; 1901 break; 1902 } 1903 1904 /* Try 5 times, then try clock recovery if that fails */ 1905 if (tries > 5) { 1906 intel_dp_link_down(intel_dp); 1907 intel_dp_start_link_train(intel_dp); 1908 tries = 0; 1909 cr_tries++; 1910 continue; 1911 } 1912 1913 /* Compute new intel_dp->train_set as requested by target */ 1914 intel_get_adjust_train(intel_dp, link_status); 1915 ++tries; 1916 } 1917 1918 intel_dp_set_link_train(intel_dp, DP, DP_TRAINING_PATTERN_DISABLE); 1919} 1920 1921static void 1922intel_dp_link_down(struct intel_dp *intel_dp) 1923{ 1924 struct drm_device *dev = intel_dp->base.base.dev; 1925 struct drm_i915_private *dev_priv = dev->dev_private; 1926 uint32_t DP = intel_dp->DP; 1927 1928 if ((I915_READ(intel_dp->output_reg) & DP_PORT_EN) == 0) 1929 return; 1930 1931 DRM_DEBUG_KMS("\n"); 1932 1933 if (is_edp(intel_dp)) { 1934 DP &= ~DP_PLL_ENABLE; 1935 I915_WRITE(intel_dp->output_reg, DP); 1936 POSTING_READ(intel_dp->output_reg); 1937 udelay(100); 1938 } 1939 1940 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) { 1941 DP &= ~DP_LINK_TRAIN_MASK_CPT; 1942 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE_CPT); 1943 } else { 1944 DP &= ~DP_LINK_TRAIN_MASK; 1945 I915_WRITE(intel_dp->output_reg, DP | DP_LINK_TRAIN_PAT_IDLE); 1946 } 1947 POSTING_READ(intel_dp->output_reg); 1948 1949 msleep(17); 1950 1951 if (is_edp(intel_dp)) { 1952 if (HAS_PCH_CPT(dev) && (IS_GEN7(dev) || !is_cpu_edp(intel_dp))) 1953 DP |= DP_LINK_TRAIN_OFF_CPT; 1954 else 1955 DP |= DP_LINK_TRAIN_OFF; 1956 } 1957 1958 if (HAS_PCH_IBX(dev) && 1959 I915_READ(intel_dp->output_reg) & DP_PIPEB_SELECT) { 1960 struct drm_crtc *crtc = intel_dp->base.base.crtc; 1961 1962 /* Hardware workaround: leaving our transcoder select 1963 * set to transcoder B while it's off will prevent the 1964 * corresponding HDMI output on transcoder A. 1965 * 1966 * Combine this with another hardware workaround: 1967 * transcoder select bit can only be cleared while the 1968 * port is enabled. 1969 */ 1970 DP &= ~DP_PIPEB_SELECT; 1971 I915_WRITE(intel_dp->output_reg, DP); 1972 1973 /* Changes to enable or select take place the vblank 1974 * after being written. 1975 */ 1976 if (crtc == NULL) { 1977 /* We can arrive here never having been attached 1978 * to a CRTC, for instance, due to inheriting 1979 * random state from the BIOS. 1980 * 1981 * If the pipe is not running, play safe and 1982 * wait for the clocks to stabilise before 1983 * continuing. 1984 */ 1985 POSTING_READ(intel_dp->output_reg); 1986 msleep(50); 1987 } else 1988 intel_wait_for_vblank(dev, to_intel_crtc(crtc)->pipe); 1989 } 1990 1991 DP &= ~DP_AUDIO_OUTPUT_ENABLE; 1992 I915_WRITE(intel_dp->output_reg, DP & ~DP_PORT_EN); 1993 POSTING_READ(intel_dp->output_reg); 1994 msleep(intel_dp->panel_power_down_delay); 1995} 1996 1997static bool 1998intel_dp_get_dpcd(struct intel_dp *intel_dp) 1999{ 2000 if (intel_dp_aux_native_read_retry(intel_dp, 0x000, intel_dp->dpcd, 2001 sizeof(intel_dp->dpcd)) && 2002 (intel_dp->dpcd[DP_DPCD_REV] != 0)) { 2003 return true; 2004 } 2005 2006 return false; 2007} 2008 2009static void 2010intel_dp_probe_oui(struct intel_dp *intel_dp) 2011{ 2012 u8 buf[3]; 2013 2014 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 2015 return; 2016 2017 ironlake_edp_panel_vdd_on(intel_dp); 2018 2019 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 2020 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 2021 buf[0], buf[1], buf[2]); 2022 2023 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 2024 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 2025 buf[0], buf[1], buf[2]); 2026 2027 ironlake_edp_panel_vdd_off(intel_dp, false); 2028} 2029 2030static bool 2031intel_dp_get_sink_irq(struct intel_dp *intel_dp, u8 *sink_irq_vector) 2032{ 2033 int ret; 2034 2035 ret = intel_dp_aux_native_read_retry(intel_dp, 2036 DP_DEVICE_SERVICE_IRQ_VECTOR, 2037 sink_irq_vector, 1); 2038 if (!ret) 2039 return false; 2040 2041 return true; 2042} 2043 2044static void 2045intel_dp_handle_test_request(struct intel_dp *intel_dp) 2046{ 2047 /* NAK by default */ 2048 intel_dp_aux_native_write_1(intel_dp, DP_TEST_RESPONSE, DP_TEST_ACK); 2049} 2050 2051/* 2052 * According to DP spec 2053 * 5.1.2: 2054 * 1. Read DPCD 2055 * 2. Configure link according to Receiver Capabilities 2056 * 3. Use Link Training from 2.5.3.3 and 3.5.1.3 2057 * 4. Check link status on receipt of hot-plug interrupt 2058 */ 2059 2060static void 2061intel_dp_check_link_status(struct intel_dp *intel_dp) 2062{ 2063 u8 sink_irq_vector; 2064 u8 link_status[DP_LINK_STATUS_SIZE]; 2065 2066 if (!intel_dp->base.connectors_active) 2067 return; 2068 2069 if (WARN_ON(!intel_dp->base.base.crtc)) 2070 return; 2071 2072 /* Try to read receiver status if the link appears to be up */ 2073 if (!intel_dp_get_link_status(intel_dp, link_status)) { 2074 intel_dp_link_down(intel_dp); 2075 return; 2076 } 2077 2078 /* Now read the DPCD to see if it's actually running */ 2079 if (!intel_dp_get_dpcd(intel_dp)) { 2080 intel_dp_link_down(intel_dp); 2081 return; 2082 } 2083 2084 /* Try to read the source of the interrupt */ 2085 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11 && 2086 intel_dp_get_sink_irq(intel_dp, &sink_irq_vector)) { 2087 /* Clear interrupt source */ 2088 intel_dp_aux_native_write_1(intel_dp, 2089 DP_DEVICE_SERVICE_IRQ_VECTOR, 2090 sink_irq_vector); 2091 2092 if (sink_irq_vector & DP_AUTOMATED_TEST_REQUEST) 2093 intel_dp_handle_test_request(intel_dp); 2094 if (sink_irq_vector & (DP_CP_IRQ | DP_SINK_SPECIFIC_IRQ)) 2095 DRM_DEBUG_DRIVER("CP or sink specific irq unhandled\n"); 2096 } 2097 2098 if (!intel_channel_eq_ok(intel_dp, link_status)) { 2099 DRM_DEBUG_KMS("%s: channel EQ not ok, retraining\n", 2100 drm_get_encoder_name(&intel_dp->base.base)); 2101 intel_dp_start_link_train(intel_dp); 2102 intel_dp_complete_link_train(intel_dp); 2103 } 2104} 2105 2106static enum drm_connector_status 2107intel_dp_detect_dpcd(struct intel_dp *intel_dp) 2108{ 2109 if (intel_dp_get_dpcd(intel_dp)) 2110 return connector_status_connected; 2111 return connector_status_disconnected; 2112} 2113 2114static enum drm_connector_status 2115ironlake_dp_detect(struct intel_dp *intel_dp) 2116{ 2117 enum drm_connector_status status; 2118 2119 /* Can't disconnect eDP, but you can close the lid... */ 2120 if (is_edp(intel_dp)) { 2121 status = intel_panel_detect(intel_dp->base.base.dev); 2122 if (status == connector_status_unknown) 2123 status = connector_status_connected; 2124 return status; 2125 } 2126 2127 return intel_dp_detect_dpcd(intel_dp); 2128} 2129 2130static enum drm_connector_status 2131g4x_dp_detect(struct intel_dp *intel_dp) 2132{ 2133 struct drm_device *dev = intel_dp->base.base.dev; 2134 struct drm_i915_private *dev_priv = dev->dev_private; 2135 uint32_t bit; 2136 2137 switch (intel_dp->output_reg) { 2138 case DP_B: 2139 bit = DPB_HOTPLUG_LIVE_STATUS; 2140 break; 2141 case DP_C: 2142 bit = DPC_HOTPLUG_LIVE_STATUS; 2143 break; 2144 case DP_D: 2145 bit = DPD_HOTPLUG_LIVE_STATUS; 2146 break; 2147 default: 2148 return connector_status_unknown; 2149 } 2150 2151 if ((I915_READ(PORT_HOTPLUG_STAT) & bit) == 0) 2152 return connector_status_disconnected; 2153 2154 return intel_dp_detect_dpcd(intel_dp); 2155} 2156 2157static struct edid * 2158intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter) 2159{ 2160 struct intel_dp *intel_dp = intel_attached_dp(connector); 2161 struct edid *edid; 2162 int size; 2163 2164 if (is_edp(intel_dp)) { 2165 if (!intel_dp->edid) 2166 return NULL; 2167 2168 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH; 2169 edid = kmalloc(size, GFP_KERNEL); 2170 if (!edid) 2171 return NULL; 2172 2173 memcpy(edid, intel_dp->edid, size); 2174 return edid; 2175 } 2176 2177 edid = drm_get_edid(connector, adapter); 2178 return edid; 2179} 2180 2181static int 2182intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *adapter) 2183{ 2184 struct intel_dp *intel_dp = intel_attached_dp(connector); 2185 int ret; 2186 2187 if (is_edp(intel_dp)) { 2188 drm_mode_connector_update_edid_property(connector, 2189 intel_dp->edid); 2190 ret = drm_add_edid_modes(connector, intel_dp->edid); 2191 drm_edid_to_eld(connector, 2192 intel_dp->edid); 2193 connector->display_info.raw_edid = NULL; 2194 return intel_dp->edid_mode_count; 2195 } 2196 2197 ret = intel_ddc_get_modes(connector, adapter); 2198 return ret; 2199} 2200 2201 2202/** 2203 * Uses CRT_HOTPLUG_EN and CRT_HOTPLUG_STAT to detect DP connection. 2204 * 2205 * \return true if DP port is connected. 2206 * \return false if DP port is disconnected. 2207 */ 2208static enum drm_connector_status 2209intel_dp_detect(struct drm_connector *connector, bool force) 2210{ 2211 struct intel_dp *intel_dp = intel_attached_dp(connector); 2212 struct drm_device *dev = intel_dp->base.base.dev; 2213 enum drm_connector_status status; 2214 struct edid *edid = NULL; 2215 2216 intel_dp->has_audio = false; 2217 2218 if (HAS_PCH_SPLIT(dev)) 2219 status = ironlake_dp_detect(intel_dp); 2220 else 2221 status = g4x_dp_detect(intel_dp); 2222 2223 DRM_DEBUG_KMS("DPCD: %02hx%02hx%02hx%02hx%02hx%02hx%02hx%02hx\n", 2224 intel_dp->dpcd[0], intel_dp->dpcd[1], intel_dp->dpcd[2], 2225 intel_dp->dpcd[3], intel_dp->dpcd[4], intel_dp->dpcd[5], 2226 intel_dp->dpcd[6], intel_dp->dpcd[7]); 2227 2228 if (status != connector_status_connected) 2229 return status; 2230 2231 intel_dp_probe_oui(intel_dp); 2232 2233 if (intel_dp->force_audio != HDMI_AUDIO_AUTO) { 2234 intel_dp->has_audio = (intel_dp->force_audio == HDMI_AUDIO_ON); 2235 } else { 2236 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2237 if (edid) { 2238 intel_dp->has_audio = drm_detect_monitor_audio(edid); 2239 connector->display_info.raw_edid = NULL; 2240 kfree(edid); 2241 } 2242 } 2243 2244 return connector_status_connected; 2245} 2246 2247static int intel_dp_get_modes(struct drm_connector *connector) 2248{ 2249 struct intel_dp *intel_dp = intel_attached_dp(connector); 2250 struct drm_device *dev = intel_dp->base.base.dev; 2251 struct drm_i915_private *dev_priv = dev->dev_private; 2252 int ret; 2253 2254 /* We should parse the EDID data and find out if it has an audio sink 2255 */ 2256 2257 ret = intel_dp_get_edid_modes(connector, &intel_dp->adapter); 2258 if (ret) { 2259 if (is_edp(intel_dp) && !intel_dp->panel_fixed_mode) { 2260 struct drm_display_mode *newmode; 2261 list_for_each_entry(newmode, &connector->probed_modes, 2262 head) { 2263 if ((newmode->type & DRM_MODE_TYPE_PREFERRED)) { 2264 intel_dp->panel_fixed_mode = 2265 drm_mode_duplicate(dev, newmode); 2266 break; 2267 } 2268 } 2269 } 2270 return ret; 2271 } 2272 2273 /* if eDP has no EDID, try to use fixed panel mode from VBT */ 2274 if (is_edp(intel_dp)) { 2275 /* initialize panel mode from VBT if available for eDP */ 2276 if (intel_dp->panel_fixed_mode == NULL && dev_priv->lfp_lvds_vbt_mode != NULL) { 2277 intel_dp->panel_fixed_mode = 2278 drm_mode_duplicate(dev, dev_priv->lfp_lvds_vbt_mode); 2279 if (intel_dp->panel_fixed_mode) { 2280 intel_dp->panel_fixed_mode->type |= 2281 DRM_MODE_TYPE_PREFERRED; 2282 } 2283 } 2284 if (intel_dp->panel_fixed_mode) { 2285 struct drm_display_mode *mode; 2286 mode = drm_mode_duplicate(dev, intel_dp->panel_fixed_mode); 2287 drm_mode_probed_add(connector, mode); 2288 return 1; 2289 } 2290 } 2291 return 0; 2292} 2293 2294static bool 2295intel_dp_detect_audio(struct drm_connector *connector) 2296{ 2297 struct intel_dp *intel_dp = intel_attached_dp(connector); 2298 struct edid *edid; 2299 bool has_audio = false; 2300 2301 edid = intel_dp_get_edid(connector, &intel_dp->adapter); 2302 if (edid) { 2303 has_audio = drm_detect_monitor_audio(edid); 2304 2305 connector->display_info.raw_edid = NULL; 2306 kfree(edid); 2307 } 2308 2309 return has_audio; 2310} 2311 2312static int 2313intel_dp_set_property(struct drm_connector *connector, 2314 struct drm_property *property, 2315 uint64_t val) 2316{ 2317 struct drm_i915_private *dev_priv = connector->dev->dev_private; 2318 struct intel_dp *intel_dp = intel_attached_dp(connector); 2319 int ret; 2320 2321 ret = drm_connector_property_set_value(connector, property, val); 2322 if (ret) 2323 return ret; 2324 2325 if (property == dev_priv->force_audio_property) { 2326 int i = val; 2327 bool has_audio; 2328 2329 if (i == intel_dp->force_audio) 2330 return 0; 2331 2332 intel_dp->force_audio = i; 2333 2334 if (i == HDMI_AUDIO_AUTO) 2335 has_audio = intel_dp_detect_audio(connector); 2336 else 2337 has_audio = (i == HDMI_AUDIO_ON); 2338 2339 if (has_audio == intel_dp->has_audio) 2340 return 0; 2341 2342 intel_dp->has_audio = has_audio; 2343 goto done; 2344 } 2345 2346 if (property == dev_priv->broadcast_rgb_property) { 2347 if (val == !!intel_dp->color_range) 2348 return 0; 2349 2350 intel_dp->color_range = val ? DP_COLOR_RANGE_16_235 : 0; 2351 goto done; 2352 } 2353 2354 return -EINVAL; 2355 2356done: 2357 if (intel_dp->base.base.crtc) { 2358 struct drm_crtc *crtc = intel_dp->base.base.crtc; 2359 intel_set_mode(crtc, &crtc->mode, 2360 crtc->x, crtc->y, crtc->fb); 2361 } 2362 2363 return 0; 2364} 2365 2366static void 2367intel_dp_destroy(struct drm_connector *connector) 2368{ 2369 struct drm_device *dev = connector->dev; 2370 2371 if (intel_dpd_is_edp(dev)) 2372 intel_panel_destroy_backlight(dev); 2373 2374 drm_sysfs_connector_remove(connector); 2375 drm_connector_cleanup(connector); 2376 kfree(connector); 2377} 2378 2379static void intel_dp_encoder_destroy(struct drm_encoder *encoder) 2380{ 2381 struct intel_dp *intel_dp = enc_to_intel_dp(encoder); 2382 2383 i2c_del_adapter(&intel_dp->adapter); 2384 drm_encoder_cleanup(encoder); 2385 if (is_edp(intel_dp)) { 2386 kfree(intel_dp->edid); 2387 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2388 ironlake_panel_vdd_off_sync(intel_dp); 2389 } 2390 kfree(intel_dp); 2391} 2392 2393static const struct drm_encoder_helper_funcs intel_dp_helper_funcs = { 2394 .mode_fixup = intel_dp_mode_fixup, 2395 .mode_set = intel_dp_mode_set, 2396 .disable = intel_encoder_noop, 2397}; 2398 2399static const struct drm_connector_funcs intel_dp_connector_funcs = { 2400 .dpms = intel_dp_dpms, 2401 .detect = intel_dp_detect, 2402 .fill_modes = drm_helper_probe_single_connector_modes, 2403 .set_property = intel_dp_set_property, 2404 .destroy = intel_dp_destroy, 2405}; 2406 2407static const struct drm_connector_helper_funcs intel_dp_connector_helper_funcs = { 2408 .get_modes = intel_dp_get_modes, 2409 .mode_valid = intel_dp_mode_valid, 2410 .best_encoder = intel_best_encoder, 2411}; 2412 2413static const struct drm_encoder_funcs intel_dp_enc_funcs = { 2414 .destroy = intel_dp_encoder_destroy, 2415}; 2416 2417static void 2418intel_dp_hot_plug(struct intel_encoder *intel_encoder) 2419{ 2420 struct intel_dp *intel_dp = container_of(intel_encoder, struct intel_dp, base); 2421 2422 intel_dp_check_link_status(intel_dp); 2423} 2424 2425/* Return which DP Port should be selected for Transcoder DP control */ 2426int 2427intel_trans_dp_port_sel(struct drm_crtc *crtc) 2428{ 2429 struct drm_device *dev = crtc->dev; 2430 struct intel_encoder *encoder; 2431 2432 for_each_encoder_on_crtc(dev, crtc, encoder) { 2433 struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base); 2434 2435 if (intel_dp->base.type == INTEL_OUTPUT_DISPLAYPORT || 2436 intel_dp->base.type == INTEL_OUTPUT_EDP) 2437 return intel_dp->output_reg; 2438 } 2439 2440 return -1; 2441} 2442 2443/* check the VBT to see whether the eDP is on DP-D port */ 2444bool intel_dpd_is_edp(struct drm_device *dev) 2445{ 2446 struct drm_i915_private *dev_priv = dev->dev_private; 2447 struct child_device_config *p_child; 2448 int i; 2449 2450 if (!dev_priv->child_dev_num) 2451 return false; 2452 2453 for (i = 0; i < dev_priv->child_dev_num; i++) { 2454 p_child = dev_priv->child_dev + i; 2455 2456 if (p_child->dvo_port == PORT_IDPD && 2457 p_child->device_type == DEVICE_TYPE_eDP) 2458 return true; 2459 } 2460 return false; 2461} 2462 2463static void 2464intel_dp_add_properties(struct intel_dp *intel_dp, struct drm_connector *connector) 2465{ 2466 intel_attach_force_audio_property(connector); 2467 intel_attach_broadcast_rgb_property(connector); 2468} 2469 2470void 2471intel_dp_init(struct drm_device *dev, int output_reg, enum port port) 2472{ 2473 struct drm_i915_private *dev_priv = dev->dev_private; 2474 struct drm_connector *connector; 2475 struct intel_dp *intel_dp; 2476 struct intel_encoder *intel_encoder; 2477 struct intel_connector *intel_connector; 2478 const char *name = NULL; 2479 int type; 2480 2481 intel_dp = kzalloc(sizeof(struct intel_dp), GFP_KERNEL); 2482 if (!intel_dp) 2483 return; 2484 2485 intel_dp->output_reg = output_reg; 2486 intel_dp->port = port; 2487 2488 intel_connector = kzalloc(sizeof(struct intel_connector), GFP_KERNEL); 2489 if (!intel_connector) { 2490 kfree(intel_dp); 2491 return; 2492 } 2493 intel_encoder = &intel_dp->base; 2494 2495 if (HAS_PCH_SPLIT(dev) && output_reg == PCH_DP_D) 2496 if (intel_dpd_is_edp(dev)) 2497 intel_dp->is_pch_edp = true; 2498 2499 if (output_reg == DP_A || is_pch_edp(intel_dp)) { 2500 type = DRM_MODE_CONNECTOR_eDP; 2501 intel_encoder->type = INTEL_OUTPUT_EDP; 2502 } else { 2503 type = DRM_MODE_CONNECTOR_DisplayPort; 2504 intel_encoder->type = INTEL_OUTPUT_DISPLAYPORT; 2505 } 2506 2507 connector = &intel_connector->base; 2508 drm_connector_init(dev, connector, &intel_dp_connector_funcs, type); 2509 drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs); 2510 2511 connector->polled = DRM_CONNECTOR_POLL_HPD; 2512 2513 intel_encoder->cloneable = false; 2514 2515 INIT_DELAYED_WORK(&intel_dp->panel_vdd_work, 2516 ironlake_panel_vdd_work); 2517 2518 intel_encoder->crtc_mask = (1 << 0) | (1 << 1) | (1 << 2); 2519 2520 connector->interlace_allowed = true; 2521 connector->doublescan_allowed = 0; 2522 2523 drm_encoder_init(dev, &intel_encoder->base, &intel_dp_enc_funcs, 2524 DRM_MODE_ENCODER_TMDS); 2525 drm_encoder_helper_add(&intel_encoder->base, &intel_dp_helper_funcs); 2526 2527 intel_connector_attach_encoder(intel_connector, intel_encoder); 2528 drm_sysfs_connector_add(connector); 2529 2530 intel_encoder->enable = intel_enable_dp; 2531 intel_encoder->disable = intel_disable_dp; 2532 intel_encoder->get_hw_state = intel_dp_get_hw_state; 2533 intel_connector->get_hw_state = intel_connector_get_hw_state; 2534 2535 /* Set up the DDC bus. */ 2536 switch (port) { 2537 case PORT_A: 2538 name = "DPDDC-A"; 2539 break; 2540 case PORT_B: 2541 dev_priv->hotplug_supported_mask |= DPB_HOTPLUG_INT_STATUS; 2542 name = "DPDDC-B"; 2543 break; 2544 case PORT_C: 2545 dev_priv->hotplug_supported_mask |= DPC_HOTPLUG_INT_STATUS; 2546 name = "DPDDC-C"; 2547 break; 2548 case PORT_D: 2549 dev_priv->hotplug_supported_mask |= DPD_HOTPLUG_INT_STATUS; 2550 name = "DPDDC-D"; 2551 break; 2552 default: 2553 WARN(1, "Invalid port %c\n", port_name(port)); 2554 break; 2555 } 2556 2557 intel_dp_i2c_init(intel_dp, intel_connector, name); 2558 2559 /* Cache some DPCD data in the eDP case */ 2560 if (is_edp(intel_dp)) { 2561 bool ret; 2562 struct edp_power_seq cur, vbt; 2563 u32 pp_on, pp_off, pp_div; 2564 struct edid *edid; 2565 2566 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2567 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2568 pp_div = I915_READ(PCH_PP_DIVISOR); 2569 2570 if (!pp_on || !pp_off || !pp_div) { 2571 DRM_INFO("bad panel power sequencing delays, disabling panel\n"); 2572 intel_dp_encoder_destroy(&intel_dp->base.base); 2573 intel_dp_destroy(&intel_connector->base); 2574 return; 2575 } 2576 2577 /* Pull timing values out of registers */ 2578 cur.t1_t3 = (pp_on & PANEL_POWER_UP_DELAY_MASK) >> 2579 PANEL_POWER_UP_DELAY_SHIFT; 2580 2581 cur.t8 = (pp_on & PANEL_LIGHT_ON_DELAY_MASK) >> 2582 PANEL_LIGHT_ON_DELAY_SHIFT; 2583 2584 cur.t9 = (pp_off & PANEL_LIGHT_OFF_DELAY_MASK) >> 2585 PANEL_LIGHT_OFF_DELAY_SHIFT; 2586 2587 cur.t10 = (pp_off & PANEL_POWER_DOWN_DELAY_MASK) >> 2588 PANEL_POWER_DOWN_DELAY_SHIFT; 2589 2590 cur.t11_t12 = ((pp_div & PANEL_POWER_CYCLE_DELAY_MASK) >> 2591 PANEL_POWER_CYCLE_DELAY_SHIFT) * 1000; 2592 2593 DRM_DEBUG_KMS("cur t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2594 cur.t1_t3, cur.t8, cur.t9, cur.t10, cur.t11_t12); 2595 2596 vbt = dev_priv->edp.pps; 2597 2598 DRM_DEBUG_KMS("vbt t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", 2599 vbt.t1_t3, vbt.t8, vbt.t9, vbt.t10, vbt.t11_t12); 2600 2601#define get_delay(field) ((max(cur.field, vbt.field) + 9) / 10) 2602 2603 intel_dp->panel_power_up_delay = get_delay(t1_t3); 2604 intel_dp->backlight_on_delay = get_delay(t8); 2605 intel_dp->backlight_off_delay = get_delay(t9); 2606 intel_dp->panel_power_down_delay = get_delay(t10); 2607 intel_dp->panel_power_cycle_delay = get_delay(t11_t12); 2608 2609 DRM_DEBUG_KMS("panel power up delay %d, power down delay %d, power cycle delay %d\n", 2610 intel_dp->panel_power_up_delay, intel_dp->panel_power_down_delay, 2611 intel_dp->panel_power_cycle_delay); 2612 2613 DRM_DEBUG_KMS("backlight on delay %d, off delay %d\n", 2614 intel_dp->backlight_on_delay, intel_dp->backlight_off_delay); 2615 2616 ironlake_edp_panel_vdd_on(intel_dp); 2617 ret = intel_dp_get_dpcd(intel_dp); 2618 ironlake_edp_panel_vdd_off(intel_dp, false); 2619 2620 if (ret) { 2621 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x11) 2622 dev_priv->no_aux_handshake = 2623 intel_dp->dpcd[DP_MAX_DOWNSPREAD] & 2624 DP_NO_AUX_HANDSHAKE_LINK_TRAINING; 2625 } else { 2626 /* if this fails, presume the device is a ghost */ 2627 DRM_INFO("failed to retrieve link info, disabling eDP\n"); 2628 intel_dp_encoder_destroy(&intel_dp->base.base); 2629 intel_dp_destroy(&intel_connector->base); 2630 return; 2631 } 2632 2633 ironlake_edp_panel_vdd_on(intel_dp); 2634 edid = drm_get_edid(connector, &intel_dp->adapter); 2635 if (edid) { 2636 drm_mode_connector_update_edid_property(connector, 2637 edid); 2638 intel_dp->edid_mode_count = 2639 drm_add_edid_modes(connector, edid); 2640 drm_edid_to_eld(connector, edid); 2641 intel_dp->edid = edid; 2642 } 2643 ironlake_edp_panel_vdd_off(intel_dp, false); 2644 } 2645 2646 intel_encoder->hot_plug = intel_dp_hot_plug; 2647 2648 if (is_edp(intel_dp)) { 2649 dev_priv->int_edp_connector = connector; 2650 intel_panel_setup_backlight(dev); 2651 } 2652 2653 intel_dp_add_properties(intel_dp, connector); 2654 2655 /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written 2656 * 0xd. Failure to do so will result in spurious interrupts being 2657 * generated on the port when a cable is not attached. 2658 */ 2659 if (IS_G4X(dev) && !IS_GM45(dev)) { 2660 u32 temp = I915_READ(PEG_BAND_GAP_DATA); 2661 I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd); 2662 } 2663} 2664